From 6188b987660711fc3e6ff762789c51d6fb8eff3d Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Wed, 15 Oct 2025 12:51:42 -0700 Subject: [PATCH 01/79] break circular dependencies --- README.md | 18 ++++- bubus/models.py | 146 +++++++++++++++++++++++++++++++++++ bubus/service.py | 195 +++++++++++------------------------------------ 3 files changed, 205 insertions(+), 154 deletions(-) diff --git a/README.md b/README.md index afd7ed8..d90b45d 100644 --- a/README.md +++ b/README.md @@ -762,6 +762,17 @@ long_lists = await event.event_results_flat_list(include=lambda r: isinstance(r. all_items = await event.event_results_flat_list(raise_if_any=False, raise_if_none=False) ``` +##### `event_create_pending_results(handlers: dict[str, EventHandler], eventbus: EventBus | None = None, timeout: float | None = None) -> dict[str, EventResult]` + +Create (or reset) the `EventResult` placeholders for the provided handlers. The `EventBus` uses this internally before it begins executing handlers so that the event's state is immediately visible. Advanced users can call it when coordinating handler execution manually. + +```python +applicable_handlers = bus._get_applicable_handlers(event) # internal helper shown for illustration +pending_results = event.event_create_pending_results(applicable_handlers, eventbus=bus) + +assert all(result.status == 'pending' for result in pending_results.values()) +``` + ##### `event_bus` (property) Shortcut to get the `EventBus` that is currently processing this event. Can be used to avoid having to pass an `EventBus` instance to your handlers. @@ -785,7 +796,7 @@ async def some_handler(event: MyEvent): The placeholder object that represents the pending result from a single handler executing an event. `Event.event_results` contains a `dict[PythonIdStr, EventResult]` in the shape of `{handler_id: EventResult()}`. -You shouldn't need to ever directly use this class, it's an internal wrapper to track pending and completed results from each handler within `BaseEvent.event_results`. +You generally won't interact with this class directlyβ€”the bus instantiates and updates it for youβ€”but its API is documented here for advanced integrations and custom dispatch loops. #### `EventResult` Fields @@ -804,7 +815,7 @@ class EventResult(BaseModel): started_at: datetime # When handler started completed_at: datetime # When handler completed timeout: float # Handler timeout in seconds - child_events: list[BaseEvent] # list of child events emitted during handler execution + event_children: list[BaseEvent] # child events emitted during handler execution ``` #### `EventResult` Methods @@ -818,6 +829,9 @@ handler_result = event.event_results['handler_id'] value = await handler_result # Returns result or raises an exception if handler hits an error ``` +- `execute(event, handler, *, eventbus, timeout, enter_context, exit_context, log_filtered_traceback)` + Low-level helper that runs the handler, updates timing/status fields, captures errors, and notifies its completion signal. `EventBus.execute_handler()` delegates to this; you generally only need it when building a custom bus or integrating the event system into another dispatcher. + --- ## 🧡 Advanced Concurrency Control diff --git a/bubus/models.py b/bubus/models.py index 4079e49..1ed2de4 100644 --- a/bubus/models.py +++ b/bubus/models.py @@ -457,6 +457,35 @@ def event_completed_at(self) -> datetime | None: completed_times = [result.completed_at for result in self.event_results.values() if result.completed_at is not None] return max(completed_times) if completed_times else self.event_processed_at + def event_create_pending_results( + self, + handlers: dict[PythonIdStr, EventHandler], + *, + eventbus: 'EventBus | None' = None, + timeout: float | None = None, + ) -> 'dict[PythonIdStr, EventResult[T_EventResultType]]': + """Ensure EventResult placeholders exist for provided handlers before execution.""" + pending_results: dict[PythonIdStr, 'EventResult[T_EventResultType]'] = {} + for handler_id, handler in handlers.items(): + event_result = self.event_result_update( + handler=handler, + eventbus=eventbus, + status='pending', + ) + # Reset runtime fields so we never reuse stale data + event_result.result = None + event_result.error = None + event_result.started_at = None + event_result.completed_at = None + event_result.status = 'pending' + event_result.timeout = timeout if timeout is not None else self.event_timeout + event_result.result_type = self.event_result_type + pending_results[handler_id] = event_result + + if self.event_completed_signal and not self.event_completed_signal.is_set(): + self.event_processed_at = self.event_processed_at or datetime.now(UTC) + return pending_results + @staticmethod def _event_result_is_truthy(event_result: 'EventResult[T_EventResultType]') -> bool: if event_result.status != 'completed': @@ -682,6 +711,10 @@ def event_result_update( # Update the EventResult with provided kwargs self.event_results[handler_id].update(**kwargs) + if 'timeout' in kwargs: + self.event_results[handler_id].timeout = kwargs['timeout'] + if kwargs.get('status') == 'started' and hasattr(self, 'event_processed_at'): + self.event_processed_at = self.event_processed_at or datetime.now(UTC) # logger.debug( # f'Updated EventResult for handler {handler_id}: status={self.event_results[handler_id].status}, total_results={len(self.event_results)}' # ) @@ -958,6 +991,119 @@ def update(self, **kwargs: Any) -> Self: self.handler_completed_signal.set() return self + async def execute( + self, + event: 'BaseEvent[T_EventResultType]', + handler: EventHandler, + *, + eventbus: 'EventBus', + timeout: float | None, + enter_context: Callable[[BaseEvent[Any], str], tuple[Any, Any, Any]], + exit_context: Callable[[tuple[Any, Any, Any]], None], + log_filtered_traceback: Callable[[BaseException], str], + ) -> T_EventResultType | BaseEvent[Any] | None: + """Execute the handler and update internal state automatically.""" + + self.timeout = timeout if timeout is not None else self.timeout or event.event_timeout + self.result_type = event.event_result_type + self.update(status='started') + if hasattr(event, 'event_processed_at'): + event.event_processed_at = event.event_processed_at or datetime.now(UTC) + + monitor_task: asyncio.Task[None] | None = None + handler_task: asyncio.Task[Any] | None = None + + tokens = enter_context(event, self.handler_id) + + async def deadlock_monitor() -> None: + await asyncio.sleep(15.0) + logger.warning( + f'⚠️ {eventbus} handler {self.handler_name}() has been running for >15s on event. Possible slow processing or deadlock.\n' + '(handler could be trying to await its own result or could be blocked by another async task).\n' + f'{self.handler_name}({event})' + ) + + monitor_task = asyncio.create_task( + deadlock_monitor(), name=f'{eventbus}.deadlock_monitor({event}, {self.handler_name}#{self.handler_id[-4:]})' + ) + + try: + if inspect.iscoroutinefunction(handler): + handler_task = asyncio.create_task(handler(event)) # type: ignore + result_value: Any = await asyncio.wait_for(handler_task, timeout=self.timeout) + elif inspect.isfunction(handler) or inspect.ismethod(handler): + result_value = handler(event) + if isinstance(result_value, BaseEvent): + logger.debug( + f'Handler {self.handler_name} returned BaseEvent, not awaiting to avoid circular dependency' + ) + else: + raise ValueError(f'Handler {get_handler_name(handler)} must be a sync or async function, got: {type(handler)}') + + monitor_task.cancel() + self.update(result=result_value) + return cast(T_EventResultType | BaseEvent[Any] | None, self.result) + + except asyncio.CancelledError as exc: + if monitor_task: + monitor_task.cancel() + handler_interrupted_error = asyncio.CancelledError( + f'Event handler {self.handler_name}#{self.handler_id[-4:]}({event}) was interrupted because of a parent timeout' + ) + self.update(error=handler_interrupted_error) + raise handler_interrupted_error from exc + + except TimeoutError as exc: + if monitor_task: + monitor_task.cancel() + children = ( + f' and interrupted any processing of {len(event.event_children)} child events' + if event.event_children + else '' + ) + timeout_error = TimeoutError( + f'Event handler {self.handler_name}#{self.handler_id[-4:]}({event}) timed out after {self.timeout}s{children}' + ) + self.update(error=timeout_error) + event.event_cancel_pending_child_processing(timeout_error) + + from bubus.logging import log_timeout_tree + + log_timeout_tree(event, self) + raise timeout_error from exc + + except Exception as exc: + if monitor_task: + monitor_task.cancel() + self.update(error=exc) + + red = '\033[91m' + reset = '\033[0m' + logger.error( + f'❌ {eventbus} Error in event handler {self.handler_name}({event}) -> \n{red}{type(exc).__name__}({exc}){reset}\n{log_filtered_traceback(exc)}', + ) + raise + + finally: + if handler_task and not handler_task.done(): + handler_task.cancel() + try: + await asyncio.wait_for(handler_task, timeout=0.1) + except (asyncio.CancelledError, TimeoutError): + pass + + if monitor_task: + try: + if not monitor_task.done(): + monitor_task.cancel() + await monitor_task + except asyncio.CancelledError: + pass + except Exception: + pass + + exit_context(tokens) + def log_tree( self, indent: str = '', is_last: bool = True, child_events_by_parent: dict[str | None, list[BaseEvent[Any]]] | None = None ) -> None: diff --git a/bubus/service.py b/bubus/service.py index 72f652e..6f2a6e8 100644 --- a/bubus/service.py +++ b/bubus/service.py @@ -964,13 +964,10 @@ async def process_event(self, event: 'BaseEvent[Any]', timeout: float | None = N # Get applicable handlers applicable_handlers = self._get_applicable_handlers(event) - # Create pending EventResults for all applicable handlers before execution - # This ensures the event knows it has handlers and won't mark itself complete prematurely - for handler_id, handler in applicable_handlers.items(): - if handler_id not in event.event_results: - event.event_result_update( - handler=handler, eventbus=self, status='pending', timeout=timeout or event.event_timeout - ) + # Prepare EventResult placeholders ahead of execution + event.event_create_pending_results( + applicable_handlers, eventbus=self, timeout=timeout or event.event_timeout + ) # Execute handlers await self._execute_handlers(event, handlers=applicable_handlers, timeout=timeout) @@ -1034,8 +1031,26 @@ def _get_applicable_handlers(self, event: 'BaseEvent[Any]') -> dict[str, EventHa return filtered_handlers + def _enter_handler_context(self, event: 'BaseEvent[Any]', handler_id: str) -> tuple[contextvars.Token[Any], contextvars.Token[bool], contextvars.Token[str | None]]: + token = _current_event_context.set(event) + handler_token = inside_handler_context.set(True) + handler_id_token = _current_handler_id_context.set(handler_id) + return token, handler_token, handler_id_token + + def _exit_handler_context( + self, + tokens: tuple[contextvars.Token[Any], contextvars.Token[bool], contextvars.Token[str | None]], + ) -> None: + token, handler_token, handler_id_token = tokens + _current_event_context.reset(token) + inside_handler_context.reset(handler_token) + _current_handler_id_context.reset(handler_id_token) + async def _execute_handlers( - self, event: 'BaseEvent[Any]', handlers: dict[PythonIdStr, EventHandler] | None = None, timeout: float | None = None + self, + event: 'BaseEvent[Any]', + handlers: dict[PythonIdStr, EventHandler] | None = None, + timeout: float | None = None, ) -> None: """Execute all handlers for an event in parallel""" applicable_handlers = handlers if (handlers is not None) else self._get_applicable_handlers(event) @@ -1043,6 +1058,10 @@ async def _execute_handlers( event.event_mark_complete_if_all_handlers_completed() # mark event completed immediately if it has no handlers return + event.event_create_pending_results( + applicable_handlers, eventbus=self, timeout=timeout or event.event_timeout + ) + # Execute all handlers in parallel if self.parallel_handlers: handler_tasks: dict[PythonIdStr, tuple[asyncio.Task[Any], EventHandler]] = {} @@ -1080,155 +1099,27 @@ async def _execute_handlers( async def execute_handler( self, event: 'BaseEvent[T_EventResultType]', handler: EventHandler, timeout: float | None = None ) -> Any: - """Safely execute a single handler with deadlock detection""" - - # Check if this handler has already been executed for this event + """Safely execute a single handler via its EventResult wrapper.""" handler_id = get_handler_id(handler, self) logger.debug(f' ↳ {self}.execute_handler({event}, handler={get_handler_name(handler)}#{handler_id[-4:]})') - if handler_id in event.event_results: - existing_result = event.event_results[handler_id] - if existing_result.started_at is not None: - raise RuntimeError( - f'Handler {get_handler_name(handler)}#{handler_id[-4:]} has already been executed for event {event.event_id}. ' - f'Previous execution started at {existing_result.started_at}' - ) - - # Mark handler as started - event_result = event.event_result_update( - handler=handler, eventbus=self, status='started', timeout=timeout or event.event_timeout + if handler_id not in event.event_results: + event.event_create_pending_results({handler_id: handler}, eventbus=self, timeout=timeout or event.event_timeout) + + event_result = event.event_results[handler_id] + result_value = await event_result.execute( + event, + handler, + eventbus=self, + timeout=timeout or event.event_timeout, + enter_context=self._enter_handler_context, + exit_context=self._exit_handler_context, + log_filtered_traceback=_log_filtered_traceback, ) - - # Set the current event in context so child events can reference it - token = _current_event_context.set(event) - # Mark that we're inside a handler - handler_token = inside_handler_context.set(True) - # Set the current handler ID so child events can be tracked - handler_id_token = _current_handler_id_context.set(handler_id) - - # Create a task to monitor for potential deadlock / slow handlers - async def deadlock_monitor(): - await asyncio.sleep(15.0) - logger.warning( - f'⚠️ {self} handler {get_handler_name(handler)}() has been running for >15s on event. Possible slow processing or deadlock.\n' - '(handler could be trying to await its own result or could be blocked by another async task).\n' - f'{get_handler_name(handler)}({event})' - ) - - monitor_task = asyncio.create_task( - deadlock_monitor(), name=f'{self}.deadlock_monitor({event}, {get_handler_name(handler)}#{handler_id[-4:]})' + logger.debug( + f' ↳ Handler {get_handler_name(handler)}#{handler_id[-4:]} returned: {type(result_value).__name__ if result_value is not None else "None"}' ) - - handler_task = None - try: - if inspect.iscoroutinefunction(handler): - # Create a task for the handler so we can properly cancel it on timeout - handler_task = asyncio.create_task(handler(event)) # type: ignore - # This allows us to process child events when the handler awaits them - result_value: Any = await asyncio.wait_for(handler_task, timeout=event_result.timeout) - elif inspect.isfunction(handler) or inspect.ismethod(handler): - # If handler function is sync function, run it directly in the main thread - # This blocks but ensures we have access to the event loop, dont run it in a subthread! - result_value: Any = handler(event) - - # If the sync handler returned a BaseEvent (from dispatch), DON'T await it - # For forwarding handlers like bus.on('*', other_bus.dispatch), the handler - # has already queued the event on the target bus. The event will be tracked - # as a child event automatically. - if isinstance(result_value, BaseEvent): - logger.debug( - f'Handler {get_handler_name(handler)} returned BaseEvent, not awaiting to avoid circular dependency' - ) - else: - raise ValueError(f'Handler {get_handler_name(handler)} must be a sync or async function, got: {type(handler)}') - - logger.debug( - f' ↳ Handler {get_handler_name(handler)}#{handler_id[-4:]} returned: {type(result_value).__name__} {str(result_value)[:26]}...' # pyright: ignore - ) - # Cancel the monitor task since handler completed successfully - monitor_task.cancel() - - # Record successful result - event.event_result_update(handler=handler, eventbus=self, result=result_value) - if handler_id in event.event_results: - # logger.debug( - # f' ↳ Updated result for {get_handler_name(handler)}#{handler_id[-4:]}: {event.event_results[handler_id].status}' - # ) - pass - else: - logger.error(f' ↳ ERROR: Result not found for {get_handler_name(handler)}#{handler_id[-4:]} after update!') - return cast(T_EventResultType, result_value) - - except asyncio.CancelledError as e: - # Cancel the monitor task on timeout too - monitor_task.cancel() - - # Create a RuntimeError for timeout - # TODO: figure out why it breaks when we try to switch to InterruptedError instead of asyncio.CancelledError - handler_interrupted_error = asyncio.CancelledError( - f'Event handler {get_handler_name(handler)}#{handler_id[-4:]}({event}) was interrupted because of a parent timeout' - ) - event.event_result_update(handler=handler, eventbus=self, error=handler_interrupted_error) - - # import ipdb; ipdb.set_trace() - raise handler_interrupted_error from e - - except TimeoutError as e: - # Cancel the monitor task on timeout too - monitor_task.cancel() - - # Create a RuntimeError for timeout - children = ( - f' and interrupted any processing of {len(event.event_children)} child events' if event.event_children else '' - ) - handler_timeout_error = TimeoutError( - f'Event handler {get_handler_name(handler)}#{handler_id[-4:]}({event}) timed out after {event_result.timeout}s{children}' - ) - event.event_result_update(handler=handler, eventbus=self, error=handler_timeout_error) - event.event_cancel_pending_child_processing(handler_timeout_error) - - from bubus.logging import log_timeout_tree - - log_timeout_tree(event, event_result) - # import ipdb; ipdb.set_trace() - raise handler_timeout_error from e - except Exception as e: - # Cancel the monitor task on error too - monitor_task.cancel() - - # Record error - event.event_result_update(handler=handler, eventbus=self, error=e) - - red = '\033[91m' - reset = '\033[0m' - logger.error( - f'❌ {self} Error in event handler {get_handler_name(handler)}({event}) -> \n{red}{type(e).__name__}({e}){reset}\n{_log_filtered_traceback(e)}', - ) - raise - finally: - # Reset context - _current_event_context.reset(token) - inside_handler_context.reset(handler_token) - _current_handler_id_context.reset(handler_id_token) - - # Ensure handler task is cancelled if it's still running - if handler_task and not handler_task.done(): - handler_task.cancel() - try: - await asyncio.wait_for(handler_task, timeout=0.1) - except (asyncio.CancelledError, TimeoutError): - pass # Expected when we cancel the task - - # Ensure monitor task is cancelled - try: - if not monitor_task.done(): - monitor_task.cancel() - await monitor_task - except asyncio.CancelledError: - pass # Expected when we cancel the monitor - except Exception as e: - # logger.debug(f"❌ {self} Handler monitor task cleanup error for {get_handler_name(handler)}#{str(id(handler))[-4:]}({event}): {type(e).__name__}: {e}") - pass + return cast(T_EventResultType, result_value) def _would_create_loop(self, event: 'BaseEvent[Any]', handler: EventHandler) -> bool: """Check if calling this handler would create a loop""" From e05bf24d6bf86fdaf9e2c4cd1177890922845403 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Wed, 15 Oct 2025 14:26:35 -0700 Subject: [PATCH 02/79] cleaner variable names --- bubus/models.py | 34 +++++++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/bubus/models.py b/bubus/models.py index 1ed2de4..536e6d5 100644 --- a/bubus/models.py +++ b/bubus/models.py @@ -464,7 +464,10 @@ def event_create_pending_results( eventbus: 'EventBus | None' = None, timeout: float | None = None, ) -> 'dict[PythonIdStr, EventResult[T_EventResultType]]': - """Ensure EventResult placeholders exist for provided handlers before execution.""" + """Ensure EventResult placeholders exist for provided handlers before execution. + + Any stale timing/error data from prior runs is cleared so consumers immediately see a fresh pending state. + """ pending_results: dict[PythonIdStr, 'EventResult[T_EventResultType]'] = {} for handler_id, handler in handlers.items(): event_result = self.event_result_update( @@ -998,12 +1001,29 @@ async def execute( *, eventbus: 'EventBus', timeout: float | None, - enter_context: Callable[[BaseEvent[Any], str], tuple[Any, Any, Any]], - exit_context: Callable[[tuple[Any, Any, Any]], None], - log_filtered_traceback: Callable[[BaseException], str], + enter_context: Callable[[BaseEvent[Any], str], tuple[Any, Any, Any]] | None = None, + exit_context: Callable[[tuple[Any, Any, Any]], None] | None = None, + log_filtered_traceback: Callable[[BaseException], str] | None = None, ) -> T_EventResultType | BaseEvent[Any] | None: """Execute the handler and update internal state automatically.""" + def _default_enter(_: BaseEvent[Any], __: str) -> tuple[None, None, None]: + return (None, None, None) + + def _default_exit(_: tuple[Any, Any, Any]) -> None: + return None + + def _default_log(exc: BaseException) -> str: + from traceback import TracebackException + + return ''.join( + TracebackException.from_exception(exc, capture_locals=False).format() + ) + + _enter = enter_context or _default_enter + _exit = exit_context or _default_exit + _log_exc = log_filtered_traceback or _default_log + self.timeout = timeout if timeout is not None else self.timeout or event.event_timeout self.result_type = event.event_result_type self.update(status='started') @@ -1013,7 +1033,7 @@ async def execute( monitor_task: asyncio.Task[None] | None = None handler_task: asyncio.Task[Any] | None = None - tokens = enter_context(event, self.handler_id) + tokens = _enter(event, self.handler_id) async def deadlock_monitor() -> None: await asyncio.sleep(15.0) @@ -1080,7 +1100,7 @@ async def deadlock_monitor() -> None: red = '\033[91m' reset = '\033[0m' logger.error( - f'❌ {eventbus} Error in event handler {self.handler_name}({event}) -> \n{red}{type(exc).__name__}({exc}){reset}\n{log_filtered_traceback(exc)}', + f'❌ {eventbus} Error in event handler {self.handler_name}({event}) -> \n{red}{type(exc).__name__}({exc}){reset}\n{_log_exc(exc)}', ) raise @@ -1102,7 +1122,7 @@ async def deadlock_monitor() -> None: except Exception: pass - exit_context(tokens) + _exit(tokens) def log_tree( self, indent: str = '', is_last: bool = True, child_events_by_parent: dict[str | None, list[BaseEvent[Any]]] | None = None From 9bc7b4b18facf8d5678fef62154228c8b14e5099 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Wed, 15 Oct 2025 14:37:06 -0700 Subject: [PATCH 03/79] consistent naming of event_children --- bubus/logging.py | 18 +++++++-------- bubus/models.py | 11 +++++---- tests/test_comprehensive_patterns.py | 4 ++-- tests/test_parent_event_tracking.py | 34 ++++++++++++++-------------- 4 files changed, 35 insertions(+), 32 deletions(-) diff --git a/bubus/logging.py b/bubus/logging.py index b1b3814..7311db0 100644 --- a/bubus/logging.py +++ b/bubus/logging.py @@ -37,7 +37,7 @@ def log_event_tree( event: 'BaseEvent[Any]', indent: str = '', is_last: bool = True, - child_events_by_parent: dict[str | None, list['BaseEvent[Any]']] | None = None, + event_children_by_parent: dict[str | None, list['BaseEvent[Any]']] | None = None, ) -> str: from bubus.models import logger @@ -74,8 +74,8 @@ def log_event_tree( # Calculate which is the last item considering both results and unmapped children unmapped_children: list['BaseEvent[Any]'] = [] - if child_events_by_parent: - all_children = child_events_by_parent.get(event.event_id, []) + if event_children_by_parent: + all_children = event_children_by_parent.get(event.event_id, []) for child in all_children: # Will be printed later if not already printed by a handler if child.event_id not in [c.event_id for r in event.event_results.values() for c in r.event_children]: @@ -85,18 +85,18 @@ def log_event_tree( for i, (_handler_id, result) in enumerate(results_sorted): is_last_item = i == total_items - 1 - lines.append(log_eventresult_tree(result, new_indent, is_last_item, child_events_by_parent)) + lines.append(log_eventresult_tree(result, new_indent, is_last_item, event_children_by_parent)) # Track child events printed by this result for child in result.event_children: printed_child_ids.add(child.event_id) # Print unmapped children (those not printed by any handler) - if child_events_by_parent: - children = child_events_by_parent.get(event.event_id, []) + if event_children_by_parent: + children = event_children_by_parent.get(event.event_id, []) for i, child in enumerate(children): if child.event_id not in printed_child_ids: is_last_child = i == len(children) - 1 - lines.append(log_event_tree(child, new_indent, is_last_child, child_events_by_parent)) + lines.append(log_event_tree(child, new_indent, is_last_child, event_children_by_parent)) return '\n'.join(lines) @@ -105,7 +105,7 @@ def log_eventresult_tree( result: 'EventResult[Any]', indent: str = '', is_last: bool = True, - child_events_by_parent: dict[str | None, list['BaseEvent[Any]']] | None = None, + event_children_by_parent: dict[str | None, list['BaseEvent[Any]']] | None = None, ) -> str: """Print this result and its child events with proper tree formatting""" @@ -158,7 +158,7 @@ def log_eventresult_tree( if result.event_children: for i, child in enumerate(result.event_children): is_last_child = i == len(result.event_children) - 1 - lines.append(log_event_tree(child, new_indent, is_last_child, child_events_by_parent)) + lines.append(log_event_tree(child, new_indent, is_last_child, event_children_by_parent)) return '\n'.join(lines) diff --git a/bubus/models.py b/bubus/models.py index 536e6d5..c3a676f 100644 --- a/bubus/models.py +++ b/bubus/models.py @@ -796,12 +796,12 @@ def event_log_tree( self, indent: str = '', is_last: bool = True, - child_events_by_parent: 'dict[str | None, list[BaseEvent[Any]]] | None' = None, + event_children_by_parent: 'dict[str | None, list[BaseEvent[Any]]] | None' = None, ) -> None: """Print this event and its results with proper tree formatting""" from bubus.logging import log_event_tree - log_event_tree(self, indent, is_last, child_events_by_parent) + log_event_tree(self, indent, is_last, event_children_by_parent) @property def event_bus(self) -> 'EventBus': @@ -1125,12 +1125,15 @@ async def deadlock_monitor() -> None: _exit(tokens) def log_tree( - self, indent: str = '', is_last: bool = True, child_events_by_parent: dict[str | None, list[BaseEvent[Any]]] | None = None + self, + indent: str = '', + is_last: bool = True, + event_children_by_parent: dict[str | None, list[BaseEvent[Any]]] | None = None, ) -> None: """Print this result and its child events with proper tree formatting""" from bubus.logging import log_eventresult_tree - log_eventresult_tree(self, indent, is_last, child_events_by_parent) + log_eventresult_tree(self, indent, is_last, event_children_by_parent) # Resolve forward references diff --git a/tests/test_comprehensive_patterns.py b/tests/test_comprehensive_patterns.py index b98b211..8b63a86 100644 --- a/tests/test_comprehensive_patterns.py +++ b/tests/test_comprehensive_patterns.py @@ -120,8 +120,8 @@ async def parent_bus1_handler(event: ParentEvent) -> str: ) # Child events should have parent's ID - child_events = [e for e in all_events if isinstance(e, (ImmediateChildEvent, QueuedChildEvent))] - assert all(event.event_parent_id == parent_event.event_id for event in child_events) + event_children = [e for e in all_events if isinstance(e, (ImmediateChildEvent, QueuedChildEvent))] + assert all(event.event_parent_id == parent_event.event_id for event in event_children) # Sort results by sequence number to see actual execution order sorted_results = sorted(results, key=lambda x: x[0]) diff --git a/tests/test_parent_event_tracking.py b/tests/test_parent_event_tracking.py index c11090c..5fbe659 100644 --- a/tests/test_parent_event_tracking.py +++ b/tests/test_parent_event_tracking.py @@ -41,13 +41,13 @@ class TestParentEventTracking: async def test_basic_parent_tracking(self, eventbus: EventBus): """Test that child events automatically get event_parent_id""" - child_events: list[BaseEvent[Any]] = [] + event_children: list[BaseEvent[Any]] = [] async def parent_handler(event: ParentEvent) -> str: # Handler that dispatches a child event child = ChildEvent(data=f'child_of_{event.message}') eventbus.dispatch(child) - child_events.append(child) + event_children.append(child) return 'parent_handled' eventbus.on('ParentEvent', parent_handler) # type: ignore[reportUnknownArgumentType] @@ -67,8 +67,8 @@ async def parent_handler(event: ParentEvent) -> str: assert parent_handler_result is not None and parent_handler_result.result == 'parent_handled' # Verify child has event_parent_id set - assert len(child_events) == 1 - child = child_events[0] + assert len(event_children) == 1 + child = event_children[0] assert child.event_parent_id == parent.event_id async def test_multi_level_parent_tracking(self, eventbus: EventBus): @@ -115,14 +115,14 @@ async def grandchild_handler(event: BaseEvent[str]) -> str: async def test_multiple_children_same_parent(self, eventbus: EventBus): """Test multiple child events from same parent""" - child_events: list[BaseEvent[Any]] = [] + event_children: list[BaseEvent[Any]] = [] async def parent_handler(event: BaseEvent[str]) -> str: # Dispatch multiple children for i in range(3): child = ChildEvent(data=f'child_{i}') eventbus.dispatch(child) - child_events.append(child) + event_children.append(child) return 'spawned_children' eventbus.on('ParentEvent', parent_handler) @@ -134,8 +134,8 @@ async def parent_handler(event: BaseEvent[str]) -> str: await eventbus.wait_until_idle() # All children should have same parent - assert len(child_events) == 3 - for child in child_events: + assert len(event_children) == 3 + for child in event_children: assert child.event_parent_id == parent.event_id async def test_parallel_handlers_parent_tracking(self, eventbus: EventBus): @@ -240,13 +240,13 @@ async def bus2_handler(event: BaseEvent[str]) -> str: async def test_sync_handler_parent_tracking(self, eventbus: EventBus): """Test parent tracking works with sync handlers""" - child_events: list[BaseEvent[Any]] = [] + event_children: list[BaseEvent[Any]] = [] def sync_parent_handler(event: BaseEvent[str]) -> str: # Sync handler that dispatches child child = ChildEvent(data='from_sync') eventbus.dispatch(child) - child_events.append(child) + event_children.append(child) return 'sync_handled' eventbus.on('ParentEvent', sync_parent_handler) @@ -257,18 +257,18 @@ def sync_parent_handler(event: BaseEvent[str]) -> str: await eventbus.wait_until_idle() # Parent tracking should work even with sync handlers - assert len(child_events) == 1 - assert child_events[0].event_parent_id == parent.event_id + assert len(event_children) == 1 + assert event_children[0].event_parent_id == parent.event_id async def test_error_handler_parent_tracking(self, eventbus: EventBus): """Test parent tracking when handler errors occur""" - child_events: list[BaseEvent[Any]] = [] + event_children: list[BaseEvent[Any]] = [] async def failing_handler(event: BaseEvent[str]) -> str: # Dispatch child before failing child = ChildEvent(data='before_error') eventbus.dispatch(child) - child_events.append(child) + event_children.append(child) raise ValueError( 'Handler error - expected to fail - testing that parent event tracking works even when handlers error' ) @@ -277,7 +277,7 @@ async def success_handler(event: BaseEvent[str]) -> str: # This should still run child = ChildEvent(data='after_error') eventbus.dispatch(child) - child_events.append(child) + event_children.append(child) return 'success' eventbus.on('ParentEvent', failing_handler) @@ -289,8 +289,8 @@ async def success_handler(event: BaseEvent[str]) -> str: await eventbus.wait_until_idle() # Both children should have event_parent_id despite error - assert len(child_events) == 2 - for child in child_events: + assert len(event_children) == 2 + for child in event_children: assert child.event_parent_id == parent.event_id async def test_event_children_tracking(self, eventbus: EventBus): From aa7361376ebdff2883953901b9bc923599881f24 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Wed, 15 Oct 2025 14:47:22 -0700 Subject: [PATCH 04/79] add tests for independent usage --- tests/test_event_result_standalone.py | 86 +++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100644 tests/test_event_result_standalone.py diff --git a/tests/test_event_result_standalone.py b/tests/test_event_result_standalone.py new file mode 100644 index 0000000..e1da0d5 --- /dev/null +++ b/tests/test_event_result_standalone.py @@ -0,0 +1,86 @@ +import asyncio +from uuid import uuid4 + +import pytest + +from bubus.models import BaseEvent, EventResult, get_handler_id + + +class _StubEvent: + """Minimal event-like object used to verify EventResult independence.""" + + def __init__(self): + self.event_id = 'stub-event' + self.event_children: list[BaseEvent | _StubEvent] = [] + self.event_result_type = str + self.event_timeout = 0.5 + self.event_processed_at = None + self.event_results: dict[str, EventResult] = {} + self._cancelled_with: BaseException | None = None + + def event_cancel_pending_child_processing(self, error: BaseException) -> None: + self._cancelled_with = error + + +@pytest.mark.asyncio +async def test_event_result_execute_without_base_event() -> None: + """EventResult should execute without requiring a real BaseEvent or EventBus.""" + + stub_event = _StubEvent() + + event_result = EventResult( + event_id=str(uuid4()), + handler_id=str(id(lambda: None)), + handler_name='handler', + eventbus_id=str(id(object())), + eventbus_name='Standalone', + timeout=stub_event.event_timeout, + result_type=str, + ) + + async def handler(event: _StubEvent) -> str: + return 'ok' + + result_value = await event_result.execute( + stub_event, + handler, + eventbus='StandaloneBus', + timeout=stub_event.event_timeout, + ) + + assert result_value == 'ok' + assert event_result.status == 'completed' + assert event_result.result == 'ok' + assert stub_event._cancelled_with is None + + +class StandaloneEvent(BaseEvent[str]): + data: str + + +@pytest.mark.asyncio +async def test_event_and_result_without_eventbus() -> None: + """Verify BaseEvent + EventResult work without instantiating an EventBus.""" + + event = StandaloneEvent(data='message') + + def handler(evt: StandaloneEvent) -> str: + return evt.data.upper() + + handler_id = get_handler_id(handler, None) + pending_results = event.event_create_pending_results({handler_id: handler}) + event_result = pending_results[handler_id] + + value = await event_result.execute( + event, + handler, + eventbus='StandaloneBus', + timeout=event.event_timeout, + ) + + assert value == 'MESSAGE' + assert event_result.status == 'completed' + assert event.event_results[handler_id] is event_result + + event.event_mark_complete_if_all_handlers_completed() + assert event.event_completed_at is not None From 582aefc0eba991cd33d4f5b77d571500c3643af4 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Wed, 15 Oct 2025 17:36:18 -0700 Subject: [PATCH 05/79] add support for middlewares to hook into event bus handler lifecycle --- README.md | 54 ++++++++- bubus/__init__.py | 4 + bubus/middlewares.py | 257 +++++++++++++++++++++++++++++++++++++++++ bubus/service.py | 237 ++++++++++++++++++++++--------------- tests/test_eventbus.py | 179 +++++++++++++++++++++++++++- 5 files changed, 629 insertions(+), 102 deletions(-) create mode 100644 bubus/middlewares.py diff --git a/README.md b/README.md index afd7ed8..df2c09b 100644 --- a/README.md +++ b/README.md @@ -477,11 +477,29 @@ await bus.dispatch(DataEvent()) Persist events automatically to a `jsonl` file for future replay and debugging: ```python +from pathlib import Path + +from bubus import EventBus +from bubus.middlewares import ( + LoggerEventBusMiddleware, + SQLiteEventBusMiddleware, + WALEventBusMiddleware, +) + # Enable WAL event log persistence (optional) -bus = EventBus(name='MyBus', wal_path='./events.jsonl') +bus = EventBus( + name='MyBus', + middlewares=[ + WALEventBusMiddleware('./events.jsonl'), + LoggerEventBusMiddleware('./events.log'), + SQLiteEventBusMiddleware('./events.sqlite'), + ], +) + +# LoggerEventBusMiddleware defaults to stdout-only logging if no file path is provided # All completed events are automatically appended as JSON lines to the end -bus.dispatch(SecondEventAbc(some_key="banana")) +await bus.dispatch(SecondEventAbc(some_key="banana")) ``` `./events.jsonl`: @@ -507,17 +525,43 @@ The main event bus class that manages event processing and handler execution. ```python EventBus( name: str | None = None, - wal_path: Path | str | None = None, parallel_handlers: bool = False, - max_history_size: int | None = 50 + max_history_size: int | None = 50, + middlewares: Sequence[EventBusMiddleware | type[EventBusMiddleware]] | None = None, ) ``` **Parameters:** - `name`: Optional unique name for the bus (auto-generated if not provided) -- `wal_path`: Path for write-ahead logging of events to a `jsonl` file (optional) - `parallel_handlers`: If `True`, handlers run concurrently for each event, otherwise serially if `False` (the default) +- `middlewares`: Optional list of `EventBusMiddleware` subclasses or instances that hook into handler execution for analytics, logging, retries, etc. + +Handler middlewares subclass `EventBusMiddleware` and override whichever lifecycle hooks they need: + +```python +from bubus.middlewares import EventBusMiddleware + +class AnalyticsMiddleware(EventBusMiddleware): + async def before_handler(self, eventbus, event, event_result): + await analytics_bus.dispatch(HandlerStartedAnalyticsEvent(event_id=event_result.event_id)) + + async def after_handler(self, eventbus, event, event_result): + await analytics_bus.dispatch(HandlerCompletedAnalyticsEvent(event_id=event_result.event_id)) + + async def on_handler_error(self, eventbus, event, event_result, error): + await analytics_bus.dispatch(HandlerCompletedAnalyticsEvent(event_id=event_result.event_id, error=error)) +``` + +Middlewares can observe or mutate the `EventResult` at each step, dispatch additional events, or trigger other side effects (metrics, retries, auth checks, etc.). + +The built-in `SQLiteEventBusMiddleware` mirrors every event and handler transition into append-only `events_log` and `event_results_log` tables, making it easy to inspect or audit the bus state: + +```python +from bubus.middlewares import SQLiteEventBusMiddleware + +bus = EventBus(middlewares=[SQLiteEventBusMiddleware('./events.sqlite')]) +``` - `max_history_size`: Maximum number of events to keep in history (default: 50, None = unlimited) #### `EventBus` Properties diff --git a/bubus/__init__.py b/bubus/__init__.py index df6e6e2..871b740 100644 --- a/bubus/__init__.py +++ b/bubus/__init__.py @@ -1,10 +1,14 @@ """Event bus for the browser-use agent.""" +from bubus.middlewares import EventBusMiddleware, LoggerEventBusMiddleware, SQLiteEventBusMiddleware from bubus.models import BaseEvent, EventHandler, EventResult, PythonIdentifierStr, PythonIdStr, UUIDStr from bubus.service import EventBus __all__ = [ 'EventBus', + 'EventBusMiddleware', + 'LoggerEventBusMiddleware', + 'SQLiteEventBusMiddleware', 'BaseEvent', 'EventResult', 'EventHandler', diff --git a/bubus/middlewares.py b/bubus/middlewares.py new file mode 100644 index 0000000..39efff9 --- /dev/null +++ b/bubus/middlewares.py @@ -0,0 +1,257 @@ +"""Reusable EventBus middleware helpers.""" + +from __future__ import annotations + +import asyncio +import logging +import sqlite3 +import threading +from pathlib import Path +from typing import Any + +from bubus.logging import log_eventbus_tree +from bubus.models import BaseEvent +from bubus.service import EventBus, EventBusMiddleware as _EventBusMiddleware + +__all__ = ['EventBusMiddleware', 'WALEventBusMiddleware', 'LoggerEventBusMiddleware', 'SQLiteEventBusMiddleware'] + +logger = logging.getLogger('bubus.middleware') + +EventBusMiddleware = _EventBusMiddleware + + +class WALEventBusMiddleware(EventBusMiddleware): + """Persist completed events to a JSONL write-ahead log.""" + + def __init__(self, wal_path: Path | str): + self.wal_path = Path(wal_path) + self.wal_path.parent.mkdir(parents=True, exist_ok=True) + self._lock = threading.Lock() + + async def after_event(self, eventbus: EventBus, event: BaseEvent[Any]) -> None: + if getattr(event, '_wal_written', False): + return + + if not self._event_is_complete(event): + return + + try: + await asyncio.to_thread(self._write_event, event) + setattr(event, '_wal_written', True) + except Exception as exc: # pragma: no cover - logging branch + logger.error( + '❌ %s Failed to save event %s to WAL file %s: %s %s', + eventbus, + event.event_id, + self.wal_path, + type(exc).__name__, + exc, + ) + + def _event_is_complete(self, event: BaseEvent[Any]) -> bool: + signal = event.event_completed_signal + if signal is not None and not signal.is_set(): + return False + if any(result.status not in ('completed', 'error') for result in event.event_results.values()): + return False + return event.event_are_all_children_complete() + + def _write_event(self, event: BaseEvent[Any]) -> None: + event_json = event.model_dump_json() # pyright: ignore[reportUnknownMemberType] + with self._lock: + with self.wal_path.open('a', encoding='utf-8') as fp: + fp.write(event_json + '\n') + + +class LoggerEventBusMiddleware(EventBusMiddleware): + """Log completed events using the existing logging helpers and optionally mirror to a text file.""" + + def __init__(self, log_path: Path | str | None = None): + self.log_path = Path(log_path) if log_path is not None else None + if self.log_path is not None: + self.log_path.parent.mkdir(parents=True, exist_ok=True) + + async def after_event(self, eventbus: EventBus, event: BaseEvent[Any]) -> None: + if getattr(event, '_logger_middleware_logged', False): + return + + if not self._event_is_complete(event): + return + + setattr(event, '_logger_middleware_logged', True) + + summary = event.event_log_safe_summary() + logger.info('βœ… %s completed event %s', eventbus, summary) + + line = f'[{eventbus.name}] {summary}\n' + await asyncio.to_thread(self._append_line, line) + + if logger.isEnabledFor(logging.DEBUG): + log_eventbus_tree(eventbus) + + def _event_is_complete(self, event: BaseEvent[Any]) -> bool: + signal = event.event_completed_signal + if signal is not None and not signal.is_set(): + return False + if any(result.status not in ('completed', 'error') for result in event.event_results.values()): + return False + return event.event_are_all_children_complete() + + def _append_line(self, line: str) -> None: + if self.log_path is not None: + with self.log_path.open('a', encoding='utf-8') as fp: + fp.write(line) + print(line.rstrip('\n'), flush=True) + + +class SQLiteEventBusMiddleware(EventBusMiddleware): + """Mirror events and handler results into append-only SQLite tables.""" + + def __init__(self, db_path: str | Path): + self.db_path = Path(db_path) + self.db_path.parent.mkdir(parents=True, exist_ok=True) + self._conn = sqlite3.connect(self.db_path, check_same_thread=False) + self._conn.execute('PRAGMA journal_mode=WAL') + self._conn.execute('PRAGMA synchronous=NORMAL') + self._setup_schema() + self._lock = asyncio.Lock() + + def __del__(self): + try: + self._conn.close() + except Exception: + pass + + def _setup_schema(self) -> None: + self._conn.execute( + ''' + CREATE TABLE IF NOT EXISTS events_log ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + event_id TEXT NOT NULL, + event_type TEXT NOT NULL, + event_status TEXT NOT NULL, + eventbus_name TEXT, + event_json TEXT NOT NULL, + inserted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ) + ''' + ) + self._conn.execute( + ''' + CREATE TABLE IF NOT EXISTS event_results_log ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + event_id TEXT NOT NULL, + handler_id TEXT NOT NULL, + handler_name TEXT NOT NULL, + eventbus_id TEXT NOT NULL, + eventbus_name TEXT NOT NULL, + status TEXT NOT NULL, + result_repr TEXT, + error_repr TEXT, + inserted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ) + ''' + ) + self._conn.commit() + + async def before_handler(self, eventbus: EventBus, event: BaseEvent[Any], event_result) -> None: + await self._insert_event_result(event_result) + + async def after_handler(self, eventbus: EventBus, event: BaseEvent[Any], event_result) -> None: + await self._insert_event_result(event_result) + + async def on_handler_error( + self, + eventbus: EventBus, + event: BaseEvent[Any], + event_result, + error: BaseException, + ) -> None: + await self._insert_event_result(event_result, error_override=error) + + async def after_event(self, eventbus: EventBus, event: BaseEvent[Any]) -> None: + if getattr(event, '_sqlite_logged', False): + return + + if not self._event_is_complete(event): + return + + await self._insert_event(eventbus, event) + setattr(event, '_sqlite_logged', True) + + async def _insert_event_result(self, event_result, error_override: BaseException | None = None) -> None: + error = error_override or event_result.error + error_repr = repr(error) if error is not None else None + result_repr = None + if event_result.result is not None and error is None: + try: + result_repr = repr(event_result.result) + except Exception: + result_repr = '' + + await self._execute( + ''' + INSERT INTO event_results_log ( + event_id, + handler_id, + handler_name, + eventbus_id, + eventbus_name, + status, + result_repr, + error_repr + ) + VALUES (?, ?, ?, ?, ?, ?, ?, ?) + ''', + ( + event_result.event_id, + event_result.handler_id, + event_result.handler_name, + event_result.eventbus_id, + event_result.eventbus_name, + event_result.status, + result_repr, + error_repr, + ), + ) + + async def _insert_event(self, eventbus: EventBus, event: BaseEvent[Any]) -> None: + event_json = event.model_dump_json() # pyright: ignore[reportUnknownMemberType] + has_error = any(result.status == 'error' for result in event.event_results.values()) + event_status = 'error' if has_error else event.event_status + + await self._execute( + ''' + INSERT INTO events_log ( + event_id, + event_type, + event_status, + eventbus_name, + event_json + ) + VALUES (?, ?, ?, ?, ?) + ''', + ( + event.event_id, + event.event_type, + event_status, + eventbus.name, + event_json, + ), + ) + + async def _execute(self, sql: str, params: tuple[Any, ...]) -> None: + async with self._lock: + await asyncio.to_thread(self._run_execute, sql, params) + + def _run_execute(self, sql: str, params: tuple[Any, ...]) -> None: + self._conn.execute(sql, params) + self._conn.commit() + + def _event_is_complete(self, event: BaseEvent[Any]) -> bool: + signal = event.event_completed_signal + if signal is not None and not signal.is_set(): + return False + if any(result.status not in ('completed', 'error') for result in event.event_results.values()): + return False + return event.event_are_all_children_complete() diff --git a/bubus/service.py b/bubus/service.py index 72f652e..df19715 100644 --- a/bubus/service.py +++ b/bubus/service.py @@ -6,12 +6,11 @@ import warnings import weakref from collections import defaultdict, deque -from collections.abc import Callable +from collections.abc import Callable, Sequence from contextvars import ContextVar from pathlib import Path from typing import Any, Literal, TypeVar, cast, overload -import anyio # pyright: ignore[reportMissingImports] from uuid_extensions import uuid7str # pyright: ignore[reportMissingImports, reportUnknownVariableType] uuid7str: Callable[[], str] = uuid7str # pyright: ignore @@ -34,6 +33,7 @@ UUIDStr, get_handler_id, get_handler_name, + EventResult, ) logger = logging.getLogger('bubus') @@ -52,6 +52,31 @@ class QueueShutDown(Exception): EventPatternType = PythonIdentifierStr | Literal['*'] | type['BaseEvent[Any]'] +class EventBusMiddleware: + """Base class for EventBus middlewares.""" + + async def before_handler( + self, eventbus: 'EventBus', event: 'BaseEvent[Any]', event_result: EventResult[Any] + ) -> None: + return None + + async def after_handler( + self, eventbus: 'EventBus', event: 'BaseEvent[Any]', event_result: EventResult[Any] + ) -> None: + return None + + async def on_handler_error( + self, + eventbus: 'EventBus', + event: 'BaseEvent[Any]', + event_result: EventResult[Any], + error: BaseException, + ) -> None: + return None + + async def after_event(self, eventbus: 'EventBus', event: 'BaseEvent[Any]') -> None: + return None + class CleanShutdownQueue(asyncio.Queue[QueueEntryType]): """asyncio.Queue subclass that handles shutdown cleanly without warnings.""" @@ -263,7 +288,6 @@ class EventBus: # Class Attributes name: PythonIdentifierStr = 'EventBus' parallel_handlers: bool = False - wal_path: Path | None = None # Runtime State id: UUIDStr = '00000000-0000-0000-0000-000000000000' @@ -278,9 +302,9 @@ class EventBus: def __init__( self, name: PythonIdentifierStr | None = None, - wal_path: Path | str | None = None, parallel_handlers: bool = False, max_history_size: int | None = 50, # Keep only 50 events in history + middlewares: Sequence[EventBusMiddleware | type[EventBusMiddleware]] | None = None, ): self.id = uuid7str() self.name = name or f'{self.__class__.__name__}_{self.id[-8:]}' @@ -332,8 +356,9 @@ def __init__( self.event_history = {} self.handlers = defaultdict(list) self.parallel_handlers = parallel_handlers - self.wal_path = Path(wal_path) if wal_path else None self._on_idle = None + self._middlewares: list[EventBusMiddleware] = [] + self.middlewares = list(middlewares or []) # Memory leak prevention settings self.max_history_size = max_history_size @@ -341,11 +366,6 @@ def __init__( # Register this instance EventBus.all_instances.add(self) - # Instead of registering as normal event handlers, - # these special handlers are just called manually at the end of step - # self.on('*', self._default_log_handler) - # self.on('*', self._default_wal_handler) - def __del__(self): """Auto-cleanup on garbage collection""" # Most cleanup should have been done by the event loop close hook @@ -371,6 +391,71 @@ def __str__(self) -> str: def __repr__(self) -> str: return str(self) + @property + def middlewares(self) -> list[EventBusMiddleware]: + return getattr(self, '_middlewares', []) + + @middlewares.setter + def middlewares(self, value: Sequence[EventBusMiddleware | type[EventBusMiddleware]]) -> None: + instances: list[EventBusMiddleware] = [] + for middleware in value: + if isinstance(middleware, EventBusMiddleware): + instances.append(middleware) + elif inspect.isclass(middleware) and issubclass(middleware, EventBusMiddleware): + instances.append(middleware()) + else: + raise TypeError( + f'Invalid middleware {middleware!r}. Expected EventBusMiddleware instance or subclass.' + ) + self._middlewares = instances + + async def _call_middleware_hook( + self, + middleware: EventBusMiddleware, + method_name: str, + *args: Any, + ) -> None: + method = getattr(middleware, method_name, None) + if method is None: + return + result = method(*args) + if inspect.isawaitable(result): + await result + + async def _middlewares_before_handler(self, event: 'BaseEvent[Any]', event_result: EventResult[Any]) -> None: + for middleware in self._middlewares: + await self._call_middleware_hook(middleware, 'before_handler', self, event, event_result) + + async def _middlewares_after_handler(self, event: 'BaseEvent[Any]', event_result: EventResult[Any]) -> None: + for middleware in self._middlewares: + await self._call_middleware_hook(middleware, 'after_handler', self, event, event_result) + + async def _middlewares_on_error( + self, event: 'BaseEvent[Any]', event_result: EventResult[Any], error: BaseException + ) -> None: + for middleware in self._middlewares: + await self._call_middleware_hook(middleware, 'on_handler_error', self, event, event_result, error) + + async def _middleware_after_event(self, event: 'BaseEvent[Any]') -> None: + for middleware in self._middlewares: + await self._call_middleware_hook(middleware, 'after_event', self, event) + + async def _dispatch_after_event_hooks(self, event: 'BaseEvent[Any]') -> None: + if getattr(event, '_after_event_hooks_run', False): + return + + event_completed = False + if event.event_completed_signal is not None and event.event_completed_signal.is_set(): + event_completed = True + elif event.event_results and all(result.status in ('completed', 'error') for result in event.event_results.values()): + event_completed = True + + if not event_completed: + return + + setattr(event, '_after_event_hooks_run', True) + await self._middleware_after_event(event) + @property def events_pending(self) -> list['BaseEvent[Any]']: """Get events that haven't started processing yet (does not include events that have not even finished dispatching yet in self.event_queue)""" @@ -975,12 +1060,11 @@ async def process_event(self, event: 'BaseEvent[Any]', timeout: float | None = N # Execute handlers await self._execute_handlers(event, handlers=applicable_handlers, timeout=timeout) - await self._default_log_handler(event) - await self._default_wal_handler(event) - # Mark event as complete if all handlers are done event.event_mark_complete_if_all_handlers_completed() + await self._dispatch_after_event_hooks(event) + # After processing this event, check if any parent events can now be marked complete # We do this by walking up the parent chain current = event @@ -991,10 +1075,12 @@ async def process_event(self, event: 'BaseEvent[Any]', timeout: float | None = N # Find parent event in any bus's history parent_event = None + parent_bus: EventBus | None = None # Create a list copy to avoid "Set changed size during iteration" error for bus in list(EventBus.all_instances): if bus and current.event_parent_id in bus.event_history: parent_event = bus.event_history[current.event_parent_id] + parent_bus = bus break if not parent_event: @@ -1004,6 +1090,9 @@ async def process_event(self, event: 'BaseEvent[Any]', timeout: float | None = N if parent_event.event_completed_signal and not parent_event.event_completed_signal.is_set(): parent_event.event_mark_complete_if_all_handlers_completed() + if parent_bus: + await parent_bus._dispatch_after_event_hooks(parent_event) + # Move up the chain current = parent_event @@ -1078,35 +1167,39 @@ async def _execute_handlers( # print('FINSIHED EXECUTING ALL HANDLERS') async def execute_handler( - self, event: 'BaseEvent[T_EventResultType]', handler: EventHandler, timeout: float | None = None + self, + event: 'BaseEvent[T_EventResultType]', + handler: EventHandler, + timeout: float | None = None, ) -> Any: - """Safely execute a single handler with deadlock detection""" + """Safely execute a single handler with middleware support.""" - # Check if this handler has already been executed for this event handler_id = get_handler_id(handler, self) - logger.debug(f' ↳ {self}.execute_handler({event}, handler={get_handler_name(handler)}#{handler_id[-4:]})') - if handler_id in event.event_results: - existing_result = event.event_results[handler_id] - if existing_result.started_at is not None: - raise RuntimeError( - f'Handler {get_handler_name(handler)}#{handler_id[-4:]} has already been executed for event {event.event_id}. ' - f'Previous execution started at {existing_result.started_at}' - ) - # Mark handler as started + event_result = event.event_results.get(handler_id) + if event_result is None: + event_result = event.event_result_update( + handler=handler, eventbus=self, status='pending', timeout=timeout or event.event_timeout + ) + elif event_result.started_at is not None: + raise RuntimeError( + f'Handler {get_handler_name(handler)}#{handler_id[-4:]} has already been executed for event {event.event_id}. ' + f'Previous execution started at {event_result.started_at}' + ) + + handler_id = get_handler_id(handler, self) + event_result = event.event_result_update( handler=handler, eventbus=self, status='started', timeout=timeout or event.event_timeout ) - # Set the current event in context so child events can reference it + await self._middlewares_before_handler(event, event_result) + token = _current_event_context.set(event) - # Mark that we're inside a handler handler_token = inside_handler_context.set(True) - # Set the current handler ID so child events can be tracked handler_id_token = _current_handler_id_context.set(handler_id) - # Create a task to monitor for potential deadlock / slow handlers async def deadlock_monitor(): await asyncio.sleep(15.0) logger.warning( @@ -1120,21 +1213,13 @@ async def deadlock_monitor(): ) handler_task = None + final_result: EventResult[Any] | None = None try: if inspect.iscoroutinefunction(handler): - # Create a task for the handler so we can properly cancel it on timeout handler_task = asyncio.create_task(handler(event)) # type: ignore - # This allows us to process child events when the handler awaits them result_value: Any = await asyncio.wait_for(handler_task, timeout=event_result.timeout) elif inspect.isfunction(handler) or inspect.ismethod(handler): - # If handler function is sync function, run it directly in the main thread - # This blocks but ensures we have access to the event loop, dont run it in a subthread! - result_value: Any = handler(event) - - # If the sync handler returned a BaseEvent (from dispatch), DON'T await it - # For forwarding handlers like bus.on('*', other_bus.dispatch), the handler - # has already queued the event on the target bus. The event will be tracked - # as a child event automatically. + result_value = handler(event) if isinstance(result_value, BaseEvent): logger.debug( f'Handler {get_handler_name(handler)} returned BaseEvent, not awaiting to avoid circular dependency' @@ -1145,59 +1230,45 @@ async def deadlock_monitor(): logger.debug( f' ↳ Handler {get_handler_name(handler)}#{handler_id[-4:]} returned: {type(result_value).__name__} {str(result_value)[:26]}...' # pyright: ignore ) - # Cancel the monitor task since handler completed successfully monitor_task.cancel() - # Record successful result - event.event_result_update(handler=handler, eventbus=self, result=result_value) - if handler_id in event.event_results: - # logger.debug( - # f' ↳ Updated result for {get_handler_name(handler)}#{handler_id[-4:]}: {event.event_results[handler_id].status}' - # ) - pass - else: - logger.error(f' ↳ ERROR: Result not found for {get_handler_name(handler)}#{handler_id[-4:]} after update!') - return cast(T_EventResultType, result_value) + final_result = event.event_result_update(handler=handler, eventbus=self, result=result_value) + + await self._middlewares_after_handler(event, final_result) + return cast(T_EventResultType, final_result.result) except asyncio.CancelledError as e: - # Cancel the monitor task on timeout too monitor_task.cancel() - - # Create a RuntimeError for timeout - # TODO: figure out why it breaks when we try to switch to InterruptedError instead of asyncio.CancelledError handler_interrupted_error = asyncio.CancelledError( f'Event handler {get_handler_name(handler)}#{handler_id[-4:]}({event}) was interrupted because of a parent timeout' ) - event.event_result_update(handler=handler, eventbus=self, error=handler_interrupted_error) - - # import ipdb; ipdb.set_trace() + final_result = event.event_result_update(handler=handler, eventbus=self, error=handler_interrupted_error) + await self._middlewares_on_error(event, final_result, handler_interrupted_error) raise handler_interrupted_error from e except TimeoutError as e: - # Cancel the monitor task on timeout too monitor_task.cancel() - - # Create a RuntimeError for timeout children = ( f' and interrupted any processing of {len(event.event_children)} child events' if event.event_children else '' ) handler_timeout_error = TimeoutError( f'Event handler {get_handler_name(handler)}#{handler_id[-4:]}({event}) timed out after {event_result.timeout}s{children}' ) - event.event_result_update(handler=handler, eventbus=self, error=handler_timeout_error) + final_result = event.event_result_update(handler=handler, eventbus=self, error=handler_timeout_error) event.event_cancel_pending_child_processing(handler_timeout_error) from bubus.logging import log_timeout_tree - log_timeout_tree(event, event_result) - # import ipdb; ipdb.set_trace() + if final_result is not None: + log_timeout_tree(event, final_result) + await self._middlewares_on_error(event, final_result, handler_timeout_error) raise handler_timeout_error from e except Exception as e: - # Cancel the monitor task on error too monitor_task.cancel() - # Record error - event.event_result_update(handler=handler, eventbus=self, error=e) + final_result = event.event_result_update(handler=handler, eventbus=self, error=e) + + await self._middlewares_on_error(event, final_result, e) red = '\033[91m' reset = '\033[0m' @@ -1206,29 +1277,28 @@ async def deadlock_monitor(): ) raise finally: - # Reset context _current_event_context.reset(token) inside_handler_context.reset(handler_token) _current_handler_id_context.reset(handler_id_token) - # Ensure handler task is cancelled if it's still running if handler_task and not handler_task.done(): handler_task.cancel() try: await asyncio.wait_for(handler_task, timeout=0.1) except (asyncio.CancelledError, TimeoutError): - pass # Expected when we cancel the task + pass - # Ensure monitor task is cancelled try: if not monitor_task.done(): monitor_task.cancel() await monitor_task except asyncio.CancelledError: - pass # Expected when we cancel the monitor - except Exception as e: - # logger.debug(f"❌ {self} Handler monitor task cleanup error for {get_handler_name(handler)}#{str(id(handler))[-4:]}({event}): {type(e).__name__}: {e}") pass + except Exception: + pass + + assert final_result is not None, 'Handler execution did not produce an EventResult' + return final_result.result def _would_create_loop(self, event: 'BaseEvent[Any]', handler: EventHandler) -> bool: """Check if calling this handler would create a loop""" @@ -1322,27 +1392,6 @@ def _handler_dispatched_ancestor( # Recursively check the parent's ancestry return self._handler_dispatched_ancestor(parent_event, handler_id, visited, depth) - async def _default_log_handler(self, event: 'BaseEvent[Any]') -> None: - """Default handler that logs all events""" - # logger.debug( - # f'βœ… {self} completed: {event} -> {list(event.event_results.values()) or ''}' - # ) - pass - - async def _default_wal_handler(self, event: 'BaseEvent[Any]') -> None: - """Persist completed event to WAL file as JSONL""" - - if not self.wal_path: - return None - - try: - event_json = event.model_dump_json() # pyright: ignore[reportUnknownMemberType] - self.wal_path.parent.mkdir(parents=True, exist_ok=True) - async with await anyio.open_file(self.wal_path, 'a', encoding='utf-8') as f: # pyright: ignore[reportUnknownMemberType] - await f.write(event_json + '\n') # pyright: ignore[reportUnknownMemberType] - except Exception as e: - logger.error(f'❌ {self} Failed to save event {event.event_id} to WAL file: {type(e).__name__} {e}\n{event}') - def cleanup_excess_events(self) -> int: """ Clean up excess events from event_history based on max_history_size. diff --git a/tests/test_eventbus.py b/tests/test_eventbus.py index b4cb977..5e86890 100644 --- a/tests/test_eventbus.py +++ b/tests/test_eventbus.py @@ -17,6 +17,7 @@ import asyncio import json import os +import sqlite3 import time from datetime import datetime, timezone from typing import Any @@ -25,6 +26,12 @@ from pydantic import Field from bubus import BaseEvent, EventBus +from bubus.middlewares import ( + EventBusMiddleware, + LoggerEventBusMiddleware, + SQLiteEventBusMiddleware, + WALEventBusMiddleware, +) class CreateAgentTaskEvent(BaseEvent): @@ -694,7 +701,7 @@ async def test_wal_persistence_handler(self, tmp_path): """Test that events are automatically persisted to WAL file""" # Create event bus with WAL path wal_path = tmp_path / 'test_events.jsonl' - bus = EventBus(name='TestBus', wal_path=wal_path) + bus = EventBus(name='TestBus', middlewares=[WALEventBusMiddleware(wal_path)]) try: # Emit some events @@ -734,7 +741,7 @@ async def test_wal_persistence_creates_parent_dir(self, tmp_path): assert not wal_path.parent.exists() # Create event bus - bus = EventBus(name='TestBus', wal_path=wal_path) + bus = EventBus(name='TestBus', middlewares=[WALEventBusMiddleware(wal_path)]) try: # Emit an event @@ -755,7 +762,7 @@ async def test_wal_persistence_creates_parent_dir(self, tmp_path): async def test_wal_persistence_skips_incomplete_events(self, tmp_path): """Test that WAL persistence only writes completed events""" wal_path = tmp_path / 'incomplete_events.jsonl' - bus = EventBus(name='TestBus', wal_path=wal_path) + bus = EventBus(name='TestBus', middlewares=[WALEventBusMiddleware(wal_path)]) try: # Add a slow handler that will delay completion @@ -789,6 +796,172 @@ async def slow_handler(event: BaseEvent) -> str: await bus.stop() +class TestHandlerMiddleware: + """Tests for the handler middleware pipeline.""" + + async def test_middleware_wraps_successful_handler(self): + calls: list[tuple[str, str]] = [] + + class TrackingMiddleware(EventBusMiddleware): + def __init__(self, call_log: list[tuple[str, str]]): + self.call_log = call_log + + async def before_handler(self, eventbus: EventBus, event: BaseEvent, event_result): + self.call_log.append(('before', event_result.status)) + + async def after_handler(self, eventbus: EventBus, event: BaseEvent, event_result): + self.call_log.append(('after', event_result.status)) + + bus = EventBus(middlewares=[TrackingMiddleware(calls)]) + bus.on('UserActionEvent', lambda event: 'ok') + + try: + completed = await bus.dispatch(UserActionEvent(action='test', user_id='user1')) + await bus.wait_until_idle() + + assert completed.event_results + result = next(iter(completed.event_results.values())) + assert result.status == 'completed' + assert result.result == 'ok' + assert calls == [('before', 'started'), ('after', 'completed')] + finally: + await bus.stop() + + async def test_middleware_observes_handler_errors(self): + observations: list[tuple[str, str]] = [] + + class ErrorMiddleware(EventBusMiddleware): + def __init__(self, log: list[tuple[str, str]]): + self.log = log + + async def before_handler(self, eventbus: EventBus, event: BaseEvent, event_result): + self.log.append(('before', event_result.status)) + + async def on_handler_error( + self, + eventbus: EventBus, + event: BaseEvent, + event_result, + error: BaseException, + ): + self.log.append(('error', type(error).__name__)) + + async def failing_handler(event: BaseEvent) -> None: + raise ValueError('boom') + + bus = EventBus(middlewares=[ErrorMiddleware(observations)]) + bus.on('UserActionEvent', failing_handler) + + try: + event = await bus.dispatch(UserActionEvent(action='fail', user_id='user2')) + await bus.wait_until_idle() + + result = next(iter(event.event_results.values())) + assert result.status == 'error' + assert isinstance(result.error, ValueError) + assert observations == [('before', 'started'), ('error', 'ValueError')] + finally: + await bus.stop() + + +class TestSQLiteMiddleware: + async def test_sqlite_middleware_persists_events_and_results(self, tmp_path): + db_path = tmp_path / 'events.sqlite' + middleware = SQLiteEventBusMiddleware(db_path) + bus = EventBus(middlewares=[middleware]) + + async def handler(event: BaseEvent) -> str: + return 'ok' + + bus.on('UserActionEvent', handler) + + try: + await bus.dispatch(UserActionEvent(action='ping', user_id='u-1')) + await bus.wait_until_idle() + + conn = sqlite3.connect(db_path) + events = conn.execute('SELECT event_id, event_type, event_status, event_json FROM events_log').fetchall() + assert len(events) == 1 + assert events[0][1] == 'UserActionEvent' + assert events[0][2] == 'completed' + + result_rows = conn.execute( + 'SELECT status, result_repr, error_repr FROM event_results_log ORDER BY id' + ).fetchall() + conn.close() + + assert [status for status, *_ in result_rows] == ['started', 'completed'] + assert result_rows[-1][1] == "'ok'" + assert result_rows[-1][2] is None + finally: + await bus.stop() + + +class TestLoggerMiddleware: + async def test_logger_middleware_writes_file(self, tmp_path): + log_path = tmp_path / 'events.log' + bus = EventBus(middlewares=[LoggerEventBusMiddleware(log_path)]) + + async def handler(event: BaseEvent) -> str: + return 'logged' + + bus.on('UserActionEvent', handler) + + try: + await bus.dispatch(UserActionEvent(action='log', user_id='user')) + await bus.wait_until_idle() + + assert log_path.exists() + contents = log_path.read_text().strip().splitlines() + assert contents + assert 'UserActionEvent' in contents[-1] + finally: + await bus.stop() + + async def test_logger_middleware_stdout_only(self, capsys): + bus = EventBus(middlewares=[LoggerEventBusMiddleware()]) + + async def handler(event: BaseEvent) -> str: + return 'stdout' + + bus.on('UserActionEvent', handler) + + try: + await bus.dispatch(UserActionEvent(action='log', user_id='user')) + await bus.wait_until_idle() + + captured = capsys.readouterr() + assert 'UserActionEvent' in captured.out + assert 'stdout' not in captured.err + finally: + await bus.stop() + async def test_sqlite_middleware_records_errors(self, tmp_path): + db_path = tmp_path / 'events.sqlite' + middleware = SQLiteEventBusMiddleware(db_path) + bus = EventBus(middlewares=[middleware]) + + async def failing_handler(event: BaseEvent) -> None: + raise RuntimeError('handler boom') + + bus.on('UserActionEvent', failing_handler) + + try: + await bus.dispatch(UserActionEvent(action='boom', user_id='u-2')) + await bus.wait_until_idle() + + conn = sqlite3.connect(db_path) + result_rows = conn.execute( + 'SELECT status, error_repr FROM event_results_log ORDER BY id' + ).fetchall() + events = conn.execute('SELECT event_status FROM events_log').fetchall() + conn.close() + + assert [status for status, _ in result_rows] == ['started', 'error'] + assert 'RuntimeError' in result_rows[-1][1] + assert events[0][0] == 'error' + finally: + await bus.stop() + class TestEventBusHierarchy: """Test hierarchical EventBus subscription patterns""" From 518ee27dba5441b98d537fdd6a6d1351be42a746 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Wed, 15 Oct 2025 18:40:26 -0700 Subject: [PATCH 06/79] better variable naming --- README.md | 2 +- bubus/models.py | 18 +++++++++--------- bubus/service.py | 32 +++++++++++++++++--------------- 3 files changed, 27 insertions(+), 25 deletions(-) diff --git a/README.md b/README.md index 9131925..c2a107b 100644 --- a/README.md +++ b/README.md @@ -873,7 +873,7 @@ handler_result = event.event_results['handler_id'] value = await handler_result # Returns result or raises an exception if handler hits an error ``` -- `execute(event, handler, *, eventbus, timeout, enter_context, exit_context, log_filtered_traceback)` +- `execute(event, handler, *, eventbus, timeout, enter_handler_context, exit_handler_context, format_exception_for_log)` Low-level helper that runs the handler, updates timing/status fields, captures errors, and notifies its completion signal. `EventBus.execute_handler()` delegates to this; you generally only need it when building a custom bus or integrating the event system into another dispatcher. --- diff --git a/bubus/models.py b/bubus/models.py index c3a676f..97c5b46 100644 --- a/bubus/models.py +++ b/bubus/models.py @@ -1001,9 +1001,9 @@ async def execute( *, eventbus: 'EventBus', timeout: float | None, - enter_context: Callable[[BaseEvent[Any], str], tuple[Any, Any, Any]] | None = None, - exit_context: Callable[[tuple[Any, Any, Any]], None] | None = None, - log_filtered_traceback: Callable[[BaseException], str] | None = None, + enter_handler_context: Callable[[BaseEvent[Any], str], tuple[Any, Any, Any]] | None = None, + exit_handler_context: Callable[[tuple[Any, Any, Any]], None] | None = None, + format_exception_for_log: Callable[[BaseException], str] | None = None, ) -> T_EventResultType | BaseEvent[Any] | None: """Execute the handler and update internal state automatically.""" @@ -1020,9 +1020,9 @@ def _default_log(exc: BaseException) -> str: TracebackException.from_exception(exc, capture_locals=False).format() ) - _enter = enter_context or _default_enter - _exit = exit_context or _default_exit - _log_exc = log_filtered_traceback or _default_log + _enter_handler_context_callable = enter_handler_context or _default_enter + _exit_handler_context_callable = exit_handler_context or _default_exit + _format_exception_for_log_callable = format_exception_for_log or _default_log self.timeout = timeout if timeout is not None else self.timeout or event.event_timeout self.result_type = event.event_result_type @@ -1033,7 +1033,7 @@ def _default_log(exc: BaseException) -> str: monitor_task: asyncio.Task[None] | None = None handler_task: asyncio.Task[Any] | None = None - tokens = _enter(event, self.handler_id) + handler_context_tokens = _enter_handler_context_callable(event, self.handler_id) async def deadlock_monitor() -> None: await asyncio.sleep(15.0) @@ -1100,7 +1100,7 @@ async def deadlock_monitor() -> None: red = '\033[91m' reset = '\033[0m' logger.error( - f'❌ {eventbus} Error in event handler {self.handler_name}({event}) -> \n{red}{type(exc).__name__}({exc}){reset}\n{_log_exc(exc)}', + f'❌ {eventbus} Error in event handler {self.handler_name}({event}) -> \n{red}{type(exc).__name__}({exc}){reset}\n{_format_exception_for_log_callable(exc)}', ) raise @@ -1122,7 +1122,7 @@ async def deadlock_monitor() -> None: except Exception: pass - _exit(tokens) + _exit_handler_context_callable(handler_context_tokens) def log_tree( self, diff --git a/bubus/service.py b/bubus/service.py index f24a7bb..d743393 100644 --- a/bubus/service.py +++ b/bubus/service.py @@ -1120,20 +1120,22 @@ def _get_applicable_handlers(self, event: 'BaseEvent[Any]') -> dict[str, EventHa return filtered_handlers - def _enter_handler_context(self, event: 'BaseEvent[Any]', handler_id: str) -> tuple[contextvars.Token[Any], contextvars.Token[bool], contextvars.Token[str | None]]: - token = _current_event_context.set(event) - handler_token = inside_handler_context.set(True) - handler_id_token = _current_handler_id_context.set(handler_id) - return token, handler_token, handler_id_token - - def _exit_handler_context( + def _enter_handler_execution_context( + self, event: 'BaseEvent[Any]', handler_id: str + ) -> tuple[contextvars.Token[Any], contextvars.Token[bool], contextvars.Token[str | None]]: + event_token = _current_event_context.set(event) + inside_handler_token = inside_handler_context.set(True) + current_handler_token = _current_handler_id_context.set(handler_id) + return event_token, inside_handler_token, current_handler_token + + def _exit_handler_execution_context( self, - tokens: tuple[contextvars.Token[Any], contextvars.Token[bool], contextvars.Token[str | None]], + handler_context_tokens: tuple[contextvars.Token[Any], contextvars.Token[bool], contextvars.Token[str | None]], ) -> None: - token, handler_token, handler_id_token = tokens - _current_event_context.reset(token) - inside_handler_context.reset(handler_token) - _current_handler_id_context.reset(handler_id_token) + event_token, inside_handler_token, current_handler_token = handler_context_tokens + _current_event_context.reset(event_token) + inside_handler_context.reset(inside_handler_token) + _current_handler_id_context.reset(current_handler_token) async def _execute_handlers( self, @@ -1211,9 +1213,9 @@ async def execute_handler( handler, eventbus=self, timeout=timeout or event.event_timeout, - enter_context=self._enter_handler_context, - exit_context=self._exit_handler_context, - log_filtered_traceback=_log_filtered_traceback, + enter_handler_context=self._enter_handler_execution_context, + exit_handler_context=self._exit_handler_execution_context, + format_exception_for_log=_log_filtered_traceback, ) result_type_name = type(result_value).__name__ if result_value is not None else 'None' From 3963aa74702102c1a55b2fd02c3414c9bfb6594c Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Wed, 15 Oct 2025 19:08:44 -0700 Subject: [PATCH 07/79] implement event history backend --- bubus/event_history.py | 64 +++++++++++++++++++++++++++ bubus/models.py | 20 ++++----- bubus/service.py | 24 +++++----- tests/test_event_result_standalone.py | 6 +-- 4 files changed, 91 insertions(+), 23 deletions(-) create mode 100644 bubus/event_history.py diff --git a/bubus/event_history.py b/bubus/event_history.py new file mode 100644 index 0000000..a6722f6 --- /dev/null +++ b/bubus/event_history.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +from collections.abc import MutableMapping, Iterator, Iterable +from typing import Any, Callable, Generic, TypeVar + +from .models import BaseEvent, UUIDStr + +BaseEventT = TypeVar('BaseEventT', bound=BaseEvent[Any]) + + +class EventHistory(MutableMapping[UUIDStr, BaseEventT], Generic[BaseEventT]): + """Base class for storing EventBus history with filter support.""" + + def add(self, event: BaseEventT) -> None: + self[event.event_id] = event + + def get(self, event_id: UUIDStr, default: BaseEventT | None = None) -> BaseEventT | None: + try: + return self[event_id] + except KeyError: + return default + + def contains(self, event_id: UUIDStr) -> bool: + return event_id in self + + def count(self) -> int: + return len(self) + + def iter_events(self) -> Iterable[BaseEventT]: + return self.values() + + def iter_items(self) -> Iterable[tuple[UUIDStr, BaseEventT]]: + return self.items() + + def filter(self, predicate: Callable[[BaseEventT], bool]) -> list[BaseEventT]: + return [event for event in self.values() if predicate(event)] + + def copy(self) -> dict[UUIDStr, BaseEventT]: + return dict(self.items()) + + +class InMemoryEventHistory(EventHistory[BaseEvent[Any]]): + """Simple in-memory event history implementation.""" + + def __init__(self) -> None: + self._events: dict[UUIDStr, BaseEvent[Any]] = {} + + def __getitem__(self, key: UUIDStr) -> BaseEvent[Any]: + return self._events[key] + + def __setitem__(self, key: UUIDStr, value: BaseEvent[Any]) -> None: + self._events[key] = value + + def __delitem__(self, key: UUIDStr) -> None: + del self._events[key] + + def __iter__(self) -> Iterator[UUIDStr]: + return iter(self._events) + + def __len__(self) -> int: + return len(self._events) + + def clear(self) -> None: + self._events.clear() diff --git a/bubus/models.py b/bubus/models.py index 97c5b46..6a237bc 100644 --- a/bubus/models.py +++ b/bubus/models.py @@ -1007,22 +1007,22 @@ async def execute( ) -> T_EventResultType | BaseEvent[Any] | None: """Execute the handler and update internal state automatically.""" - def _default_enter(_: BaseEvent[Any], __: str) -> tuple[None, None, None]: + def _default_enter_handler_context(_: BaseEvent[Any], __: str) -> tuple[None, None, None]: return (None, None, None) - def _default_exit(_: tuple[Any, Any, Any]) -> None: + def _default_exit_handler_context(_: tuple[Any, Any, Any]) -> None: return None - def _default_log(exc: BaseException) -> str: + def _default_format_exception_for_log(exc: BaseException) -> str: from traceback import TracebackException return ''.join( TracebackException.from_exception(exc, capture_locals=False).format() ) - _enter_handler_context_callable = enter_handler_context or _default_enter - _exit_handler_context_callable = exit_handler_context or _default_exit - _format_exception_for_log_callable = format_exception_for_log or _default_log + _enter_handler_context_callable = enter_handler_context or _default_enter_handler_context + _exit_handler_context_callable = exit_handler_context or _default_exit_handler_context + _format_exception_for_log_callable = format_exception_for_log or _default_format_exception_for_log self.timeout = timeout if timeout is not None else self.timeout or event.event_timeout self.result_type = event.event_result_type @@ -1050,10 +1050,10 @@ async def deadlock_monitor() -> None: try: if inspect.iscoroutinefunction(handler): handler_task = asyncio.create_task(handler(event)) # type: ignore - result_value: Any = await asyncio.wait_for(handler_task, timeout=self.timeout) + handler_return_value: Any = await asyncio.wait_for(handler_task, timeout=self.timeout) elif inspect.isfunction(handler) or inspect.ismethod(handler): - result_value = handler(event) - if isinstance(result_value, BaseEvent): + handler_return_value = handler(event) + if isinstance(handler_return_value, BaseEvent): logger.debug( f'Handler {self.handler_name} returned BaseEvent, not awaiting to avoid circular dependency' ) @@ -1061,7 +1061,7 @@ async def deadlock_monitor() -> None: raise ValueError(f'Handler {get_handler_name(handler)} must be a sync or async function, got: {type(handler)}') monitor_task.cancel() - self.update(result=result_value) + self.update(result=handler_return_value) return cast(T_EventResultType | BaseEvent[Any] | None, self.result) except asyncio.CancelledError as exc: diff --git a/bubus/service.py b/bubus/service.py index d743393..25d9351 100644 --- a/bubus/service.py +++ b/bubus/service.py @@ -15,6 +15,7 @@ uuid7str: Callable[[], str] = uuid7str # pyright: ignore +from bubus.event_history import EventHistory, InMemoryEventHistory from bubus.models import ( BUBUS_LOGGING_LEVEL, AsyncEventHandlerClassMethod, @@ -293,7 +294,7 @@ class EventBus: id: UUIDStr = '00000000-0000-0000-0000-000000000000' handlers: dict[PythonIdStr, list[ContravariantEventHandler['BaseEvent[Any]']]] # collected by .on(, ) event_queue: CleanShutdownQueue['BaseEvent[Any]'] | None - event_history: dict[UUIDStr, 'BaseEvent[Any]'] # collected by .dispatch() + event_history: 'EventHistory[BaseEvent[Any]]' _is_running: bool = False _runloop_task: asyncio.Task[None] | None = None @@ -304,6 +305,7 @@ def __init__( name: PythonIdentifierStr | None = None, parallel_handlers: bool = False, max_history_size: int | None = 50, # Keep only 50 events in history + event_history: EventHistory['BaseEvent[Any]'] | None = None, middlewares: Sequence[EventBusMiddleware | type[EventBusMiddleware]] | None = None, ): self.id = uuid7str() @@ -353,7 +355,7 @@ def __init__( ) self.event_queue = None - self.event_history = {} + self.event_history = event_history or InMemoryEventHistory() self.handlers = defaultdict(list) self.parallel_handlers = parallel_handlers self._on_idle = None @@ -459,19 +461,19 @@ async def _dispatch_after_event_hooks(self, event: 'BaseEvent[Any]') -> None: @property def events_pending(self) -> list['BaseEvent[Any]']: """Get events that haven't started processing yet (does not include events that have not even finished dispatching yet in self.event_queue)""" - return [ - event for event in self.event_history.values() if event.event_started_at is None and event.event_completed_at is None - ] + return self.event_history.filter(lambda event: event.event_started_at is None and event.event_completed_at is None) @property def events_started(self) -> list['BaseEvent[Any]']: """Get events currently being processed""" - return [event for event in self.event_history.values() if event.event_started_at and not event.event_completed_at] + return [ + event for event in self.event_history.filter(lambda e: e.event_started_at and not e.event_completed_at) + ] @property def events_completed(self) -> list['BaseEvent[Any]']: """Get events that have completed processing""" - return [event for event in self.event_history.values() if event.event_completed_at is not None] + return self.event_history.filter(lambda e: e.event_completed_at is not None) # Overloads for typed event patterns with specific handler signatures # Order matters - more specific types must come before general ones @@ -631,7 +633,9 @@ def dispatch(self, event: T_ExpectedEvent) -> T_ExpectedEvent: # Only enforce if we have memory limits set if self.max_history_size is not None: queue_size = self.event_queue.qsize() if self.event_queue else 0 - pending_in_history = sum(1 for e in self.event_history.values() if e.event_status in ('pending', 'started')) + pending_in_history = len( + self.event_history.filter(lambda event: event.event_status in ('pending', 'started')) + ) total_pending = queue_size + pending_in_history if total_pending >= 100: @@ -649,7 +653,7 @@ def dispatch(self, event: T_ExpectedEvent) -> T_ExpectedEvent: try: self.event_queue.put_nowait(event) # Only add to history after successfully queuing - self.event_history[event.event_id] = event + self.event_history.add(event) logger.info( f'πŸ—£οΈ {self}.dispatch({event.event_type}) ➑️ {event.event_type}#{event.event_id[-4:]} (#{self.event_queue.qsize()} {event.event_status})' ) @@ -667,7 +671,7 @@ def dispatch(self, event: T_ExpectedEvent) -> T_ExpectedEvent: # This avoids "orphaned" pending results for handlers that get filtered out later. # Clean up if over the limit - if self.max_history_size and len(self.event_history) > self.max_history_size: + if self.max_history_size and self.event_history.count() > self.max_history_size: self.cleanup_event_history() return event diff --git a/tests/test_event_result_standalone.py b/tests/test_event_result_standalone.py index e1da0d5..1f5fbc7 100644 --- a/tests/test_event_result_standalone.py +++ b/tests/test_event_result_standalone.py @@ -16,10 +16,10 @@ def __init__(self): self.event_timeout = 0.5 self.event_processed_at = None self.event_results: dict[str, EventResult] = {} - self._cancelled_with: BaseException | None = None + self._cancelled_due_to_error: BaseException | None = None def event_cancel_pending_child_processing(self, error: BaseException) -> None: - self._cancelled_with = error + self._cancelled_due_to_error = error @pytest.mark.asyncio @@ -51,7 +51,7 @@ async def handler(event: _StubEvent) -> str: assert result_value == 'ok' assert event_result.status == 'completed' assert event_result.result == 'ok' - assert stub_event._cancelled_with is None + assert stub_event._cancelled_due_to_error is None class StandaloneEvent(BaseEvent[str]): From 8cd3335876b19f59aec0696e7755a408d80d5276 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Thu, 16 Oct 2025 18:34:05 -0700 Subject: [PATCH 08/79] add monitor dash mini app --- README.md | 15 +- bubus/__init__.py | 12 +- bubus/event_history.py | 11 +- monitor_app/README.md | 34 +++ monitor_app/__init__.py | 5 + monitor_app/config.py | 21 ++ monitor_app/db.py | 108 ++++++++ monitor_app/main.py | 549 +++++++++++++++++++++++++++++++++++++ monitor_app/test_events.py | 94 +++++++ 9 files changed, 834 insertions(+), 15 deletions(-) create mode 100644 monitor_app/README.md create mode 100644 monitor_app/__init__.py create mode 100644 monitor_app/config.py create mode 100644 monitor_app/db.py create mode 100644 monitor_app/main.py create mode 100644 monitor_app/test_events.py diff --git a/README.md b/README.md index c2a107b..3bc1f2e 100644 --- a/README.md +++ b/README.md @@ -480,19 +480,16 @@ Persist events automatically to a `jsonl` file for future replay and debugging: from pathlib import Path from bubus import EventBus -from bubus.middlewares import ( - LoggerEventBusMiddleware, - SQLiteEventBusMiddleware, - WALEventBusMiddleware, -) +from bubus.event_history import SQLiteEventHistory +from bubus.middlewares import LoggerEventBusMiddleware, WALEventBusMiddleware # Enable WAL event log persistence (optional) bus = EventBus( name='MyBus', + event_history=SQLiteEventHistory('./events.sqlite'), middlewares=[ WALEventBusMiddleware('./events.jsonl'), LoggerEventBusMiddleware('./events.log'), - SQLiteEventBusMiddleware('./events.sqlite'), ], ) @@ -555,12 +552,12 @@ class AnalyticsMiddleware(EventBusMiddleware): Middlewares can observe or mutate the `EventResult` at each step, dispatch additional events, or trigger other side effects (metrics, retries, auth checks, etc.). -The built-in `SQLiteEventBusMiddleware` mirrors every event and handler transition into append-only `events_log` and `event_results_log` tables, making it easy to inspect or audit the bus state: +Pair that with the built-in `SQLiteEventHistory` to mirror every event and handler transition into append-only `events_log` and `event_results_log` tables, making it easy to inspect or audit the bus state: ```python -from bubus.middlewares import SQLiteEventBusMiddleware +from bubus.event_history import SQLiteEventHistory -bus = EventBus(middlewares=[SQLiteEventBusMiddleware('./events.sqlite')]) +bus = EventBus(event_history=SQLiteEventHistory('./events.sqlite')) ``` - `max_history_size`: Maximum number of events to keep in history (default: 50, None = unlimited) diff --git a/bubus/__init__.py b/bubus/__init__.py index 871b740..858db76 100644 --- a/bubus/__init__.py +++ b/bubus/__init__.py @@ -1,14 +1,18 @@ """Event bus for the browser-use agent.""" -from bubus.middlewares import EventBusMiddleware, LoggerEventBusMiddleware, SQLiteEventBusMiddleware -from bubus.models import BaseEvent, EventHandler, EventResult, PythonIdentifierStr, PythonIdStr, UUIDStr -from bubus.service import EventBus +from .event_history import EventHistory, InMemoryEventHistory, SQLiteEventHistory +from .middlewares import EventBusMiddleware, LoggerEventBusMiddleware, WALEventBusMiddleware +from .models import BaseEvent, EventHandler, EventResult, PythonIdentifierStr, PythonIdStr, UUIDStr +from .service import EventBus __all__ = [ 'EventBus', 'EventBusMiddleware', 'LoggerEventBusMiddleware', - 'SQLiteEventBusMiddleware', + 'WALEventBusMiddleware', + 'EventHistory', + 'InMemoryEventHistory', + 'SQLiteEventHistory', 'BaseEvent', 'EventResult', 'EventHandler', diff --git a/bubus/event_history.py b/bubus/event_history.py index a6722f6..3553f46 100644 --- a/bubus/event_history.py +++ b/bubus/event_history.py @@ -1,10 +1,17 @@ from __future__ import annotations -from collections.abc import MutableMapping, Iterator, Iterable -from typing import Any, Callable, Generic, TypeVar +from collections.abc import Iterable, Iterator, MutableMapping +import sqlite3 +import threading +from pathlib import Path +from typing import TYPE_CHECKING, Any, Callable, Generic, TypeVar from .models import BaseEvent, UUIDStr +if TYPE_CHECKING: + from .models import EventResult + from .service import EventBus + BaseEventT = TypeVar('BaseEventT', bound=BaseEvent[Any]) diff --git a/monitor_app/README.md b/monitor_app/README.md new file mode 100644 index 0000000..6e05e75 --- /dev/null +++ b/monitor_app/README.md @@ -0,0 +1,34 @@ +# bubus Monitor App + +Minimal FastAPI application that reads the `events_log` and `event_results_log` tables produced by `SQLiteEventHistory` and exposes them over HTTP/WebSocket for live monitoring. + +Install dependencies (once): + +```bash +pip install fastapi uvicorn +``` + +## Quick start + +```bash +cd monitor_app +uvicorn monitor_app.main:app --reload +``` + +The app assumes the history database lives at `../events.sqlite`. Override via: + +```bash +EVENT_HISTORY_DB=/path/to/history.sqlite uvicorn monitor_app.main:app --reload +``` + +Then visit [http://localhost:8000](http://localhost:8000) for a simple dashboard that shows recent events and handler results updating in near real-time through a WebSocket stream. + +## Endpoints + +- `GET /events?limit=20` – latest events (JSON) +- `GET /results?limit=20` – latest handler results (JSON) +- `GET /meta` – database path + existence flag +- `GET /` – minimal HTML dashboard +- `WS /ws/events` – pushes new rows as they arrive (`{"events": [...], "results": [...]}`) + +This app is intentionally small so you can extend it with additional metrics, authentication, or richer UI as needed. diff --git a/monitor_app/__init__.py b/monitor_app/__init__.py new file mode 100644 index 0000000..9bf2e16 --- /dev/null +++ b/monitor_app/__init__.py @@ -0,0 +1,5 @@ +"""Minimal FastAPI app for monitoring bubus SQLite event history.""" + +from .main import app + +__all__ = ['app'] diff --git a/monitor_app/config.py b/monitor_app/config.py new file mode 100644 index 0000000..73da000 --- /dev/null +++ b/monitor_app/config.py @@ -0,0 +1,21 @@ +"""Configuration helpers for the monitoring app.""" + +from __future__ import annotations + +import os +from pathlib import Path + +DEFAULT_DB_PATH = Path(os.getenv('EVENT_HISTORY_DB', 'events.sqlite')) + + +def resolve_db_path() -> Path: + """ + Resolve the path to the SQLite history database. + + The path can be overridden via the EVENT_HISTORY_DB environment variable. + """ + db_path = Path(os.getenv('EVENT_HISTORY_DB', DEFAULT_DB_PATH)) + if not db_path.is_absolute(): + # Resolve relative to repository root (parent directory of monitor_app) + db_path = Path(__file__).resolve().parent.parent / db_path + return db_path diff --git a/monitor_app/db.py b/monitor_app/db.py new file mode 100644 index 0000000..ecbd84c --- /dev/null +++ b/monitor_app/db.py @@ -0,0 +1,108 @@ +"""Async helpers for reading the SQLite event history.""" + +from __future__ import annotations + +import asyncio +import sqlite3 +from dataclasses import dataclass +from typing import Any, List + +from .config import resolve_db_path + + +def _connect() -> sqlite3.Connection: + conn = sqlite3.connect(resolve_db_path(), check_same_thread=False) + conn.row_factory = sqlite3.Row + return conn + + +async def fetch_events(limit: int = 50) -> list[dict[str, Any]]: + return await asyncio.to_thread(_fetch_events_sync, limit) + + +def _fetch_events_sync(limit: int) -> list[dict[str, Any]]: + conn = _connect() + try: + rows = conn.execute( + """ + SELECT id, event_id, event_type, event_status, eventbus_name, phase, event_json, inserted_at + FROM events_log + ORDER BY inserted_at DESC + LIMIT ? + """, + (limit,), + ).fetchall() + return [dict(row) for row in rows] + finally: + conn.close() + + +async def fetch_results(limit: int = 50) -> list[dict[str, Any]]: + return await asyncio.to_thread(_fetch_results_sync, limit) + + +def _fetch_results_sync(limit: int) -> list[dict[str, Any]]: + conn = _connect() + try: + rows = conn.execute( + """ + SELECT id, event_id, event_result_id, handler_name, status, phase, result_repr, error_repr, + eventbus_name, event_result_json, inserted_at + FROM event_results_log + ORDER BY inserted_at DESC + LIMIT ? + """, + (limit,), + ).fetchall() + return [dict(row) for row in rows] + finally: + conn.close() + + +@dataclass +class HistoryStreamState: + last_event_id: int = 0 + last_result_id: int = 0 + + +async def stream_new_rows(state: HistoryStreamState) -> dict[str, List[dict[str, Any]]]: + """Return new rows added since the last call.""" + updates = await asyncio.to_thread(_stream_new_rows_sync, state) + return updates + + +def _stream_new_rows_sync(state: HistoryStreamState) -> dict[str, List[dict[str, Any]]]: + conn = _connect() + try: + events = conn.execute( + """ + SELECT id, event_id, event_type, event_status, eventbus_name, phase, event_json, inserted_at + FROM events_log + WHERE id > ? + ORDER BY id ASC + """, + (state.last_event_id,), + ).fetchall() + + results = conn.execute( + """ + SELECT id, event_id, event_result_id, handler_name, status, phase, result_repr, error_repr, + eventbus_name, event_result_json, inserted_at + FROM event_results_log + WHERE id > ? + ORDER BY id ASC + """, + (state.last_result_id,), + ).fetchall() + + if events: + state.last_event_id = events[-1]['id'] + if results: + state.last_result_id = results[-1]['id'] + + return { + 'events': [dict(row) for row in events], + 'results': [dict(row) for row in results], + } + finally: + conn.close() diff --git a/monitor_app/main.py b/monitor_app/main.py new file mode 100644 index 0000000..6a4d995 --- /dev/null +++ b/monitor_app/main.py @@ -0,0 +1,549 @@ +from __future__ import annotations + +import asyncio +import json +from datetime import datetime +from typing import Annotated, Any + +from fastapi import FastAPI, Query, WebSocket, WebSocketDisconnect +from fastapi.responses import HTMLResponse, JSONResponse + +from . import db +from .config import resolve_db_path + +app = FastAPI(title='bubus event monitor', version='0.1.0') + + +def _format_timestamp(value: str | None) -> str | None: + if not value: + return None + # SQLite timestamp string -> ISO 8601 + try: + return datetime.fromisoformat(value.replace('Z', '+00:00')).isoformat() + except ValueError: + return value + + +async def _fetch_events(limit: int) -> list[dict[str, Any]]: + rows = await db.fetch_events(limit) + for row in rows: + row['inserted_at'] = _format_timestamp(row.get('inserted_at')) + return rows + + +async def _fetch_results(limit: int) -> list[dict[str, Any]]: + rows = await db.fetch_results(limit) + for row in rows: + row['inserted_at'] = _format_timestamp(row.get('inserted_at')) + return rows + + +@app.get('/', response_class=HTMLResponse) +async def index() -> str: + return """ + + + + + bubus Event Monitor + + + +
+

bubus Event Monitor

+
+ Database: + connecting… + +
+
+
+ +
+ + + +
+
+
+ + + + """ + + +@app.get('/events') +async def list_events(limit: Annotated[int, Query(ge=1, le=200)] = 20) -> JSONResponse: + rows = await _fetch_events(limit) + return JSONResponse(rows) + + +@app.get('/results') +async def list_results(limit: Annotated[int, Query(ge=1, le=200)] = 20) -> JSONResponse: + rows = await _fetch_results(limit) + return JSONResponse(rows) + + +@app.get('/meta') +async def meta() -> dict[str, Any]: + db_path = resolve_db_path() + exists = db_path.exists() + return { + 'db_path': str(db_path), + 'db_exists': exists, + } + + +@app.websocket('/ws/events') +async def websocket_events(socket: WebSocket) -> None: + await socket.accept() + state = db.HistoryStreamState() + try: + # Prime with latest IDs so we only broadcast new rows + latest_events = await _fetch_events(1) + latest_results = await _fetch_results(1) + if latest_events: + state.last_event_id = latest_events[0]['id'] + if latest_results: + state.last_result_id = latest_results[0]['id'] + + while True: + updates = await db.stream_new_rows(state) + if updates['events'] or updates['results']: + for key in ('events', 'results'): + for row in updates[key]: + row['inserted_at'] = _format_timestamp(row.get('inserted_at')) + await socket.send_text(json.dumps(updates)) + await asyncio.sleep(1.0) + except WebSocketDisconnect: + return + except Exception as exc: # pragma: no cover - surface to client + await socket.send_text(json.dumps({'error': str(exc)})) + await asyncio.sleep(0.5) diff --git a/monitor_app/test_events.py b/monitor_app/test_events.py new file mode 100644 index 0000000..fa143ac --- /dev/null +++ b/monitor_app/test_events.py @@ -0,0 +1,94 @@ +"""Utility script to generate synthetic events for the monitor app.""" + +from __future__ import annotations + +import argparse +import asyncio +import random +import string +from typing import Sequence + +from bubus import BaseEvent, EventBus +from bubus.event_history import SQLiteEventHistory + +from .config import resolve_db_path + + +class RandomTestEvent(BaseEvent): + abc_payload_field: str + xyz_category_field: str + + +class FollowUpEvent(BaseEvent): + abc_parent_payload_field: str + xyz_detail_field: str + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description='Generate random events for the bubus monitor.') + parser.add_argument('--events', type=int, default=50, help='Number of events to emit.') + parser.add_argument('--min-delay', type=float, default=0.2, help='Minimum delay between events (seconds).') + parser.add_argument('--max-delay', type=float, default=1.0, help='Maximum delay between events (seconds).') + parser.add_argument('--error-rate', type=float, default=0.2, help='Fraction of handlers that should raise an error.') + parser.add_argument('--child-rate', type=float, default=0.3, help='Probability of dispatching a follow-up event.') + parser.add_argument('--categories', nargs='*', default=['alpha', 'beta', 'gamma'], help='Event categories to sample.') + return parser.parse_args() + + +def _random_text(length: int = 8) -> str: + return ''.join(random.choices(string.ascii_lowercase + string.digits, k=length)) + + +async def run_generator(args: argparse.Namespace) -> None: + db_path = resolve_db_path() + db_path.parent.mkdir(parents=True, exist_ok=True) + history = SQLiteEventHistory(db_path) + bus = EventBus(name='MonitorGenerator', event_history=history) + + categories: Sequence[str] = args.categories or ['default'] + + async def random_handler(event: RandomTestEvent) -> str: + await asyncio.sleep(random.uniform(0.05, 0.4)) + if random.random() < args.error_rate: + raise RuntimeError(f'Flaky handler failed for payload={event.abc_payload_field}') + if random.random() < args.child_rate: + follow_up = FollowUpEvent( + abc_parent_payload_field=event.abc_payload_field, + xyz_detail_field=_random_text(6), + ) + bus.dispatch(follow_up) + return event.abc_payload_field[::-1] + + async def followup_handler(event: FollowUpEvent) -> str: + await asyncio.sleep(random.uniform(0.05, 0.3)) + return f'followup:{event.xyz_detail_field}' + + bus.on('RandomTestEvent', random_handler) + bus.on('FollowUpEvent', followup_handler) + + print(f'🟒 Writing events to {db_path}') + + try: + for _ in range(args.events): + payload = _random_text(10) + event = RandomTestEvent( + abc_payload_field=payload, + xyz_category_field=random.choice(list(categories)), + ) + bus.dispatch(event) + await asyncio.sleep(random.uniform(args.min_delay, args.max_delay)) + + # Give handlers time to finish + await bus.wait_until_idle() + finally: + await bus.stop() + print('βœ… Done') + + +def main() -> None: + args = parse_args() + asyncio.run(run_generator(args)) + + +if __name__ == '__main__': + main() From 5747fcbc6d8e649a5be51fa6158d4873b6d4987a Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Fri, 17 Oct 2025 14:48:46 -0700 Subject: [PATCH 09/79] improve monitor ui dash --- bubus/event_history.py | 196 +++++++++++++++++++++++++++++++++++-- bubus/middlewares.py | 159 +----------------------------- bubus/models.py | 2 +- bubus/service.py | 4 +- monitor_app/config.py | 2 - monitor_app/main.py | 104 +++++++++++--------- monitor_app/test_events.py | 115 +++++++++++++++++----- 7 files changed, 344 insertions(+), 238 deletions(-) diff --git a/bubus/event_history.py b/bubus/event_history.py index 3553f46..6d0be6e 100644 --- a/bubus/event_history.py +++ b/bubus/event_history.py @@ -1,8 +1,8 @@ from __future__ import annotations -from collections.abc import Iterable, Iterator, MutableMapping import sqlite3 import threading +from collections.abc import Iterable, Iterator, MutableMapping from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Generic, TypeVar @@ -21,12 +21,6 @@ class EventHistory(MutableMapping[UUIDStr, BaseEventT], Generic[BaseEventT]): def add(self, event: BaseEventT) -> None: self[event.event_id] = event - def get(self, event_id: UUIDStr, default: BaseEventT | None = None) -> BaseEventT | None: - try: - return self[event_id] - except KeyError: - return default - def contains(self, event_id: UUIDStr) -> bool: return event_id in self @@ -45,6 +39,22 @@ def filter(self, predicate: Callable[[BaseEventT], bool]) -> list[BaseEventT]: def copy(self) -> dict[UUIDStr, BaseEventT]: return dict(self.items()) + # Lifecycle hooks ----------------------------------------------------- # + + def record_event_snapshot(self, eventbus: EventBus, event: BaseEventT, phase: str | None = None) -> None: + """Optional hook: persist or mirror a snapshot of the event lifecycle.""" + return None + + def record_event_result_snapshot( + self, + eventbus: EventBus, + event: BaseEventT, + event_result: EventResult[Any], + phase: str | None = None, + ) -> None: + """Optional hook: persist or mirror a snapshot of an event result lifecycle.""" + return None + class InMemoryEventHistory(EventHistory[BaseEvent[Any]]): """Simple in-memory event history implementation.""" @@ -69,3 +79,175 @@ def __len__(self) -> int: def clear(self) -> None: self._events.clear() + + +class SQLiteEventHistory(EventHistory[BaseEvent[Any]]): + """Event history backend that mirrors lifecycle snapshots into append-only SQLite tables.""" + + def __init__(self, db_path: Path | str): + self.db_path = Path(db_path) + self.db_path.parent.mkdir(parents=True, exist_ok=True) + + self._events: dict[UUIDStr, BaseEvent[Any]] = {} + self._lock = threading.RLock() + self._conn = sqlite3.connect(self.db_path, check_same_thread=False, isolation_level=None) + self._init_db() + + def __del__(self): + try: + self._conn.close() + except Exception: + pass + + # MutableMapping implementation --------------------------------------- # + def __getitem__(self, key: UUIDStr) -> BaseEvent[Any]: + return self._events[key] + + def __setitem__(self, key: UUIDStr, value: BaseEvent[Any]) -> None: + self._events[key] = value + + def __delitem__(self, key: UUIDStr) -> None: + self._events.pop(key, None) + + def __iter__(self) -> Iterator[UUIDStr]: + return iter(self._events) + + def __len__(self) -> int: + return len(self._events) + + def clear(self) -> None: + self._events.clear() + + # Internal helpers ---------------------------------------------------- # + def _init_db(self) -> None: + with self._lock: + self._conn.execute( + """ + CREATE TABLE IF NOT EXISTS events_log ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + event_id TEXT NOT NULL, + event_type TEXT NOT NULL, + event_status TEXT NOT NULL, + eventbus_id TEXT NOT NULL, + eventbus_name TEXT NOT NULL, + phase TEXT, + event_json TEXT NOT NULL, + inserted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ) + """ + ) + self._conn.execute( + """ + CREATE TABLE IF NOT EXISTS event_results_log ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + event_result_id TEXT NOT NULL, + event_id TEXT NOT NULL, + handler_id TEXT NOT NULL, + handler_name TEXT NOT NULL, + eventbus_id TEXT NOT NULL, + eventbus_name TEXT NOT NULL, + event_type TEXT NOT NULL, + status TEXT NOT NULL, + phase TEXT, + result_repr TEXT, + error_repr TEXT, + event_result_json TEXT, + inserted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ) + """ + ) + self._conn.execute('PRAGMA journal_mode=WAL') + self._conn.execute('PRAGMA synchronous=NORMAL') + + # Persistence hooks --------------------------------------------------- # + def record_event_snapshot( + self, + eventbus: EventBus, + event: BaseEvent[Any], + phase: str | None = None, + ) -> None: + event_status = 'error' if any(result.status == 'error' for result in event.event_results.values()) else event.event_status + event_json = event.model_dump_json() + + with self._lock: + self._conn.execute( + """ + INSERT INTO events_log ( + event_id, + event_type, + event_status, + eventbus_id, + eventbus_name, + phase, + event_json + ) + VALUES (?, ?, ?, ?, ?, ?, ?) + """, + ( + event.event_id, + event.event_type, + event_status, + eventbus.id, + eventbus.name, + phase, + event_json, + ), + ) + self._conn.commit() + + def record_event_result_snapshot( + self, + eventbus: EventBus, + event: BaseEvent[Any], + event_result: EventResult[Any], + phase: str | None = None, + ) -> None: + error_repr = repr(event_result.error) if event_result.error is not None else None + result_repr: str | None = None + if event_result.result is not None and event_result.error is None: + try: + result_repr = repr(event_result.result) + except Exception: + result_repr = '' + + # Avoid huge JSON blobs for unreadable result types by falling back to repr + try: + event_result_json = event_result.model_dump_json() + except Exception: + event_result_json = None + + with self._lock: + self._conn.execute( + """ + INSERT INTO event_results_log ( + event_result_id, + event_id, + handler_id, + handler_name, + eventbus_id, + eventbus_name, + event_type, + status, + phase, + result_repr, + error_repr, + event_result_json + ) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + """, + ( + event_result.id, + event_result.event_id, + event_result.handler_id, + event_result.handler_name, + event_result.eventbus_id, + event_result.eventbus_name, + event.event_type, + event_result.status, + phase, + result_repr, + error_repr, + event_result_json, + ), + ) + self._conn.commit() diff --git a/bubus/middlewares.py b/bubus/middlewares.py index 39efff9..6cc798d 100644 --- a/bubus/middlewares.py +++ b/bubus/middlewares.py @@ -4,16 +4,16 @@ import asyncio import logging -import sqlite3 import threading from pathlib import Path from typing import Any from bubus.logging import log_eventbus_tree from bubus.models import BaseEvent -from bubus.service import EventBus, EventBusMiddleware as _EventBusMiddleware +from bubus.service import EventBus +from bubus.service import EventBusMiddleware as _EventBusMiddleware -__all__ = ['EventBusMiddleware', 'WALEventBusMiddleware', 'LoggerEventBusMiddleware', 'SQLiteEventBusMiddleware'] +__all__ = ['EventBusMiddleware', 'WALEventBusMiddleware', 'LoggerEventBusMiddleware'] logger = logging.getLogger('bubus.middleware') @@ -102,156 +102,3 @@ def _append_line(self, line: str) -> None: with self.log_path.open('a', encoding='utf-8') as fp: fp.write(line) print(line.rstrip('\n'), flush=True) - - -class SQLiteEventBusMiddleware(EventBusMiddleware): - """Mirror events and handler results into append-only SQLite tables.""" - - def __init__(self, db_path: str | Path): - self.db_path = Path(db_path) - self.db_path.parent.mkdir(parents=True, exist_ok=True) - self._conn = sqlite3.connect(self.db_path, check_same_thread=False) - self._conn.execute('PRAGMA journal_mode=WAL') - self._conn.execute('PRAGMA synchronous=NORMAL') - self._setup_schema() - self._lock = asyncio.Lock() - - def __del__(self): - try: - self._conn.close() - except Exception: - pass - - def _setup_schema(self) -> None: - self._conn.execute( - ''' - CREATE TABLE IF NOT EXISTS events_log ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - event_id TEXT NOT NULL, - event_type TEXT NOT NULL, - event_status TEXT NOT NULL, - eventbus_name TEXT, - event_json TEXT NOT NULL, - inserted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP - ) - ''' - ) - self._conn.execute( - ''' - CREATE TABLE IF NOT EXISTS event_results_log ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - event_id TEXT NOT NULL, - handler_id TEXT NOT NULL, - handler_name TEXT NOT NULL, - eventbus_id TEXT NOT NULL, - eventbus_name TEXT NOT NULL, - status TEXT NOT NULL, - result_repr TEXT, - error_repr TEXT, - inserted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP - ) - ''' - ) - self._conn.commit() - - async def before_handler(self, eventbus: EventBus, event: BaseEvent[Any], event_result) -> None: - await self._insert_event_result(event_result) - - async def after_handler(self, eventbus: EventBus, event: BaseEvent[Any], event_result) -> None: - await self._insert_event_result(event_result) - - async def on_handler_error( - self, - eventbus: EventBus, - event: BaseEvent[Any], - event_result, - error: BaseException, - ) -> None: - await self._insert_event_result(event_result, error_override=error) - - async def after_event(self, eventbus: EventBus, event: BaseEvent[Any]) -> None: - if getattr(event, '_sqlite_logged', False): - return - - if not self._event_is_complete(event): - return - - await self._insert_event(eventbus, event) - setattr(event, '_sqlite_logged', True) - - async def _insert_event_result(self, event_result, error_override: BaseException | None = None) -> None: - error = error_override or event_result.error - error_repr = repr(error) if error is not None else None - result_repr = None - if event_result.result is not None and error is None: - try: - result_repr = repr(event_result.result) - except Exception: - result_repr = '' - - await self._execute( - ''' - INSERT INTO event_results_log ( - event_id, - handler_id, - handler_name, - eventbus_id, - eventbus_name, - status, - result_repr, - error_repr - ) - VALUES (?, ?, ?, ?, ?, ?, ?, ?) - ''', - ( - event_result.event_id, - event_result.handler_id, - event_result.handler_name, - event_result.eventbus_id, - event_result.eventbus_name, - event_result.status, - result_repr, - error_repr, - ), - ) - - async def _insert_event(self, eventbus: EventBus, event: BaseEvent[Any]) -> None: - event_json = event.model_dump_json() # pyright: ignore[reportUnknownMemberType] - has_error = any(result.status == 'error' for result in event.event_results.values()) - event_status = 'error' if has_error else event.event_status - - await self._execute( - ''' - INSERT INTO events_log ( - event_id, - event_type, - event_status, - eventbus_name, - event_json - ) - VALUES (?, ?, ?, ?, ?) - ''', - ( - event.event_id, - event.event_type, - event_status, - eventbus.name, - event_json, - ), - ) - - async def _execute(self, sql: str, params: tuple[Any, ...]) -> None: - async with self._lock: - await asyncio.to_thread(self._run_execute, sql, params) - - def _run_execute(self, sql: str, params: tuple[Any, ...]) -> None: - self._conn.execute(sql, params) - self._conn.commit() - - def _event_is_complete(self, event: BaseEvent[Any]) -> bool: - signal = event.event_completed_signal - if signal is not None and not signal.is_set(): - return False - if any(result.status not in ('completed', 'error') for result in event.event_results.values()): - return False - return event.event_are_all_children_complete() diff --git a/bubus/models.py b/bubus/models.py index 6a237bc..a5bc85b 100644 --- a/bubus/models.py +++ b/bubus/models.py @@ -1062,7 +1062,7 @@ async def deadlock_monitor() -> None: monitor_task.cancel() self.update(result=handler_return_value) - return cast(T_EventResultType | BaseEvent[Any] | None, self.result) + return self.result except asyncio.CancelledError as exc: if monitor_task: diff --git a/bubus/service.py b/bubus/service.py index 25d9351..c0f14c3 100644 --- a/bubus/service.py +++ b/bubus/service.py @@ -9,7 +9,7 @@ from collections.abc import Callable, Sequence from contextvars import ContextVar from pathlib import Path -from typing import Any, Literal, TypeVar, cast, overload +from typing import Any, Literal, TypeGuard, TypeVar, cast, overload from uuid_extensions import uuid7str # pyright: ignore[reportMissingImports, reportUnknownVariableType] @@ -27,6 +27,7 @@ EventHandlerClassMethod, EventHandlerFunc, EventHandlerMethod, + EventResult, PythonIdentifierStr, PythonIdStr, T_Event, @@ -34,7 +35,6 @@ UUIDStr, get_handler_id, get_handler_name, - EventResult, ) logger = logging.getLogger('bubus') diff --git a/monitor_app/config.py b/monitor_app/config.py index 73da000..45c846a 100644 --- a/monitor_app/config.py +++ b/monitor_app/config.py @@ -1,7 +1,5 @@ """Configuration helpers for the monitoring app.""" -from __future__ import annotations - import os from pathlib import Path diff --git a/monitor_app/main.py b/monitor_app/main.py index 6a4d995..a679377 100644 --- a/monitor_app/main.py +++ b/monitor_app/main.py @@ -63,43 +63,53 @@ async def index() -> str: .toolbar input, .toolbar select { background: rgba(15,23,42,0.72); border: 1px solid rgba(148,163,184,0.35); color: inherit; border-radius: 0.5rem; padding: 0.4rem 0.65rem; font-size: 0.86rem; min-width: 9.5rem; } .toolbar label { display: flex; align-items: center; gap: 0.35rem; } - #events-tree { display: grid; gap: 0.45rem; } - .tree-node { position: relative; background: rgba(15,23,42,0.34); border: 1px solid rgba(148,163,184,0.26); border-radius: 0.6rem; padding: 0.45rem 0.75rem 0.55rem 1.2rem; } - .tree-node::before { content: ''; position: absolute; left: 0.55rem; top: 0.6rem; bottom: 0.6rem; border-left: 2px solid rgba(94,234,212,0.25); } - .tree-node details { padding-top: 0; } - .tree-node details > summary { list-style: none; cursor: pointer; padding: 0; outline: none; } - .tree-node details > summary::-webkit-details-marker { display: none; } - .event-summary { display: flex; flex-wrap: wrap; gap: 0.4rem; align-items: center; font-size: 0.9rem; } - .pill { display: inline-flex; align-items: center; gap: 0.35rem; border-radius: 999px; padding: 0.2rem 0.6rem; border: 1px solid rgba(148,163,184,0.32); background: rgba(15,23,42,0.68); font-size: 0.85rem; } - .pill-type { font-weight: 600; text-transform: uppercase; letter-spacing: 0.04em; background: rgba(94,234,212,0.12); border-color: rgba(94,234,212,0.42); } + #events-tree { display: grid; gap: 0.6rem; grid-template-columns: repeat(auto-fit, minmax(420px, 1fr)); align-items: start; } + + .tree-node { background: rgba(15,23,42,0.36); border: 1px solid rgba(148,163,184,0.22); border-radius: 0.6rem; padding: 0.55rem 0.75rem 0.65rem; box-shadow: 0 12px 24px rgba(8,11,25,0.35); } + .tree-node > details > summary { list-style: none; cursor: pointer; padding: 0; margin: 0; outline: none; } + .tree-node summary::-webkit-details-marker { display: none; } + .event-summary { display: flex; flex-wrap: wrap; gap: 0.4rem; align-items: center; } + .pill { display: inline-flex; align-items: center; gap: 0.35rem; border-radius: 999px; padding: 0.22rem 0.6rem; border: 1px solid rgba(148,163,184,0.32); background: rgba(13,23,42,0.78); font-size: 0.85rem; font-weight: 500; color: rgba(226,232,240,0.92); } + .pill-type { text-transform: uppercase; letter-spacing: 0.055em; background: rgba(94,234,212,0.14); border-color: rgba(94,234,212,0.42); color: #5eead4; } .pill-muted { color: rgba(226,232,240,0.88); } - .pill-status { font-weight: 600; letter-spacing: 0.04em; text-transform: uppercase; } + .pill-status { font-weight: 600; letter-spacing: 0.05em; text-transform: uppercase; } .pill-status.pill-completed { background: rgba(16,185,129,0.2); border-color: rgba(16,185,129,0.5); color: #34d399; } .pill-status.pill-started { background: rgba(250,204,21,0.2); border-color: rgba(250,204,21,0.45); color: #facc15; } - .pill-status.pill-pending { background: rgba(59,130,246,0.24); border-color: rgba(59,130,246,0.45); color: #60a5fa; } - .pill-status.pill-error { background: rgba(239,68,68,0.24); border-color: rgba(239,68,68,0.5); color: #f87171; } + .pill-status.pill-pending { background: rgba(59,130,246,0.24); border-color: rgba(59,130,246,0.42); color: #60a5fa; } + .pill-status.pill-error { background: rgba(239,68,68,0.22); border-color: rgba(239,68,68,0.48); color: #f87171; } .event-meta { margin-top: 0.5rem; padding: 0.45rem 0.55rem 0.3rem; background: rgba(15,23,42,0.46); border-radius: 0.55rem; border: 1px solid rgba(148,163,184,0.2); } .meta-grid { display: grid; grid-template-columns: repeat(auto-fit, minmax(180px, 1fr)); gap: 0.35rem 0.55rem; } - .meta-item { display: grid; grid-template-columns: auto 1fr; align-items: center; column-gap: 0.35rem; font-size: 0.84rem; padding: 0.18rem 0.45rem; background: rgba(15,23,42,0.6); border-radius: 0.45rem; } - .meta-icon { opacity: 0.85; font-size: 0.88rem; } - .meta-label { color: rgba(203,213,225,0.78); font-weight: 500; } - .meta-value { color: rgba(226,232,240,0.95); font-weight: 600; overflow-wrap: anywhere; } - .meta-value code { font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, 'Liberation Mono', 'Courier New', monospace; font-size: 0.8rem; padding: 0.05rem 0.35rem; background: rgba(15,23,42,0.72); border-radius: 0.35rem; border: 1px solid rgba(148,163,184,0.26); } - .results-section { margin-top: 0.5rem; } - .results-table { width: 100%; border-collapse: collapse; font-size: 0.82rem; } - .results-table th { text-align: left; padding: 0.3rem 0.45rem; color: rgba(148,163,184,0.9); text-transform: uppercase; letter-spacing: 0.05em; font-size: 0.74rem; } - .results-table td { padding: 0.32rem 0.45rem; color: rgba(226,232,240,0.93); vertical-align: top; border-top: 1px solid rgba(148,163,184,0.16); } + .meta-item { display: flex; flex-direction: column; gap: 0.18rem; padding: 0.22rem 0.5rem 0.28rem; background: rgba(13,22,40,0.62); border-radius: 0.45rem; border: 1px solid rgba(148,163,184,0.2); } + .meta-label { font-size: 0.68rem; text-transform: uppercase; letter-spacing: 0.08em; color: rgba(203,213,225,0.75); display: flex; align-items: center; gap: 0.25rem; } + .meta-label-icon { font-size: 0.78rem; } + .meta-value { font-size: 0.88rem; font-weight: 600; color: rgba(226,232,240,0.96); } + .meta-value code { font-size: 0.8rem; background: rgba(2,6,23,0.7); padding: 0.1rem 0.4rem; border-radius: 0.35rem; border: 1px solid rgba(148,163,184,0.26); } + .results-section { margin-top: 0.45rem; } + .results-title { font-size: 0.76rem; text-transform: uppercase; letter-spacing: 0.12em; color: rgba(148,163,184,0.85); margin-bottom: 0.3rem; } + .results-table { width: 100%; border-collapse: collapse; font-size: 0.82rem; background: rgba(12,22,40,0.6); border-radius: 0.45rem; overflow: hidden; } + .results-table th { text-align: left; padding: 0.3rem 0.45rem; font-size: 0.74rem; text-transform: uppercase; letter-spacing: 0.05em; color: rgba(148,163,184,0.9); background: rgba(30,41,59,0.6); } + .results-table td { padding: 0.3rem 0.45rem; border-top: 1px solid rgba(148,163,184,0.16); color: rgba(226,232,240,0.92); } .results-table td pre { margin: 0; font-size: 0.78rem; white-space: pre-wrap; background: none; } - .results-table details { font-size: 0.74rem; } + .results-table tbody tr:hover { background: rgba(59,130,246,0.12); } .results-table details summary { cursor: pointer; color: rgba(125,211,252,0.92); } - .children { list-style: none; margin: 0.4rem 0 0.2rem 0.9rem; padding: 0; display: grid; gap: 0.3rem; } - .event-json { margin-top: 0.45rem; padding: 0.4rem 0.45rem; font-size: 0.78rem; } - .event-json summary { cursor: pointer; color: rgba(125,211,252,0.92); } - .event-json pre { margin-top: 0.35rem; max-height: 220px; overflow: auto; padding: 0.5rem; background: rgba(15,23,42,0.78); border-radius: 0.45rem; border: 1px solid rgba(148,163,184,0.24); } - .empty { text-align: center; padding: 2rem 0; color: rgba(148,163,184,0.7); font-size: 0.88rem; } + .results-table details[open] summary { color: rgba(56,189,248,0.95); } + .results-table details ul { list-style: none; margin: 0.25rem 0 0; padding: 0; } + .results-table details li { margin: 0.18rem 0; display: flex; flex-direction: column; gap: 0.12rem; font-size: 0.74rem; color: rgba(203,213,225,0.88); } + .results-table details li::before { content: ""; } + .results-table details li pre { font-size: 0.74rem; } + .results-table details ul { list-style: none; margin: 0.25rem 0 0; padding: 0; } + .results-table details li { margin: 0.18rem 0; display: flex; flex-direction: column; gap: 0.12rem; font-size: 0.74rem; color: rgba(203,213,225,0.88); } + .results-table details li pre { font-size: 0.74rem; } + .children { list-style: none; margin: 0.45rem 0 0.2rem 0; padding: 0; display: grid; gap: 0.4rem; } + .children > li { border-left: 1px solid rgba(148,163,184,0.18); padding-left: 0.75rem; } + .event-json { margin-top: 0.45rem; } + .event-json summary { cursor: pointer; color: rgba(125,211,252,0.9); font-size: 0.8rem; } + .event-json pre { margin-top: 0.35rem; max-height: 220px; overflow: auto; padding: 0.55rem; background: rgba(13,23,42,0.78); border-radius: 0.45rem; border: 1px solid rgba(148,163,184,0.2); font-size: 0.8rem; } + .empty { text-align: center; padding: 2rem 0; color: rgba(148,163,184,0.72); font-size: 0.9rem; } @media (max-width: 900px) { - header, main { padding: 1rem 1.2rem; } + header, main { padding: 1rem 1.1rem; } .toolbar input, .toolbar select { min-width: 0; flex: 1 1 140px; } + #events-tree { grid-template-columns: 1fr; } } @@ -173,11 +183,12 @@ async def index() -> str: try { return JSON.parse(raw); } catch { return SAFE_DEFAULT; } } - function renderMetaItem(icon, label, value, options = {}) { - const { code = false } = options; + function renderMetaItem(label, value, options = {}) { + const { code = false, icon = '' } = options; const safeValue = value !== undefined && value !== null && value !== '' ? String(value) : 'β€”'; const formatted = code ? `${escapeHtml(safeValue)}` : escapeHtml(safeValue); - return `
${icon}${escapeHtml(label)}${formatted}
`; + const iconHtml = icon ? `${escapeHtml(icon)}` : ''; + return `
${iconHtml}${escapeHtml(label)}
${formatted}
`; } function ingestEvents(rows) { @@ -328,7 +339,6 @@ async def index() -> str: function renderResults(results) { return `
-

Handler Results

@@ -352,7 +362,7 @@ async def index() -> str:
${escapeHtml(result.error_repr || '')}
- ${result.attempts.length} log entry(ies) + ${result.attempts.length} entr${result.attempts.length === 1 ? 'y' : 'ies'}
    ${result.attempts.map((attempt) => `
  • @@ -386,30 +396,34 @@ async def index() -> str: const createdAt = data.event_created_at || 'β€”'; const processedAt = data.event_processed_at || 'β€”'; - const summaryBadges = [ + const summaryPrimary = [ `${escapeHtml(node.event_type || 'UnknownEvent')}`, `${escapeHtml(rawStatus)}`, `🚌 ${escapeHtml(node.eventbus_name || 'β€”')}`, + ]; + if (path) summaryPrimary.push(`🧭 ${escapeHtml(path)}`); + + const summarySecondary = [ `ID ${escapeHtml(shortId)}`, `⏱ ${escapeHtml(timeoutDisplay)}`, - `πŸ•’ ${escapeHtml(insertedAt)}`, - ].join(''); + `πŸ•’ ${escapeHtml(createdAt)}`, + `βœ… ${escapeHtml(processedAt)}`, + ]; + + const summaryBadges = summaryPrimary.concat(summarySecondary).join(''); const metaItems = [ - renderMetaItem('πŸ†”', 'Event ID', node.event_id || 'β€”', { code: true }), - renderMetaItem('πŸ‘ͺ', 'Parent ID', parentId, { code: true }), - renderMetaItem('🧭', 'Path', path || 'β€”'), - renderMetaItem('πŸ“¦', 'Schema', schema, { code: true }), - renderMetaItem('🎯', 'Result type', resultType, { code: true }), - renderMetaItem('⏱', 'Timeout', timeoutDisplay), - renderMetaItem('πŸ•’', 'Created', createdAt), - renderMetaItem('βœ…', 'Processed', processedAt), + renderMetaItem('Event ID', node.event_id || 'β€”', { code: true, icon: 'πŸ†”' }), + renderMetaItem('Parent ID', parentId, { code: true, icon: 'πŸ‘ͺ' }), + renderMetaItem('Path', path || 'β€”', { icon: '🧭' }), + renderMetaItem('Schema', schema, { code: true, icon: 'πŸ“¦' }), + renderMetaItem('Result type', resultType, { code: true, icon: '🎯' }), ].join(''); const resultsSection = node.results.length ? renderResults(node.results) : ''; const childrenSection = node.children.length ? `
      ${node.children.map(renderNode).join('')}
    ` : ''; const eventJson = data && Object.keys(data).length - ? `
    View full payload
    ${escapeHtml(JSON.stringify(data, null, 2))}
    ` + ? `
    Payload
    ${escapeHtml(JSON.stringify(data, null, 2))}
    ` : ''; return ` diff --git a/monitor_app/test_events.py b/monitor_app/test_events.py index fa143ac..797b1d2 100644 --- a/monitor_app/test_events.py +++ b/monitor_app/test_events.py @@ -17,21 +17,33 @@ class RandomTestEvent(BaseEvent): abc_payload_field: str xyz_category_field: str + route_hint: str | None = None class FollowUpEvent(BaseEvent): abc_parent_payload_field: str xyz_detail_field: str + depth: int + + +class AuditTrailEvent(BaseEvent): + source_event_id: str + handler_name: str + message: str def parse_args() -> argparse.Namespace: parser = argparse.ArgumentParser(description='Generate random events for the bubus monitor.') - parser.add_argument('--events', type=int, default=50, help='Number of events to emit.') - parser.add_argument('--min-delay', type=float, default=0.2, help='Minimum delay between events (seconds).') - parser.add_argument('--max-delay', type=float, default=1.0, help='Maximum delay between events (seconds).') + parser.add_argument('--min-delay', type=float, default=0.2, help='Minimum delay between root events (seconds).') + parser.add_argument('--max-delay', type=float, default=1.0, help='Maximum delay between root events (seconds).') parser.add_argument('--error-rate', type=float, default=0.2, help='Fraction of handlers that should raise an error.') - parser.add_argument('--child-rate', type=float, default=0.3, help='Probability of dispatching a follow-up event.') + parser.add_argument('--child-rate', type=float, default=0.4, help='Probability of dispatching follow-up events.') + parser.add_argument('--audit-rate', type=float, default=0.5, help='Probability of emitting audit trail events.') + parser.add_argument('--max-depth', type=int, default=2, help='Maximum nested follow-up depth.') + parser.add_argument('--burst-size', type=int, default=4, help='Number of root events per burst.') parser.add_argument('--categories', nargs='*', default=['alpha', 'beta', 'gamma'], help='Event categories to sample.') + parser.add_argument('--concurrent', type=int, default=3, help='Number of concurrent root event producers.') + parser.add_argument('--events', type=int, default=0, help='Optional count. 0 = run forever.') return parser.parse_args() @@ -43,46 +55,99 @@ async def run_generator(args: argparse.Namespace) -> None: db_path = resolve_db_path() db_path.parent.mkdir(parents=True, exist_ok=True) history = SQLiteEventHistory(db_path) - bus = EventBus(name='MonitorGenerator', event_history=history) + bus = EventBus(name='MonitorGenerator', event_history=history, parallel_handlers=True) categories: Sequence[str] = args.categories or ['default'] async def random_handler(event: RandomTestEvent) -> str: - await asyncio.sleep(random.uniform(0.05, 0.4)) - if random.random() < args.error_rate: - raise RuntimeError(f'Flaky handler failed for payload={event.abc_payload_field}') + await asyncio.sleep(random.uniform(0.35, 0.7)) if random.random() < args.child_rate: - follow_up = FollowUpEvent( - abc_parent_payload_field=event.abc_payload_field, - xyz_detail_field=_random_text(6), + depth = random.randint(1, max(1, args.max_depth)) + await emit_followups(event, depth) + if random.random() < args.audit_rate: + bus.dispatch( + AuditTrailEvent( + source_event_id=event.event_id, + handler_name='random_handler', + message=f'Processed payload {event.abc_payload_field}', + ) ) - bus.dispatch(follow_up) + if random.random() < args.error_rate: + raise RuntimeError(f'Flaky handler failed for payload={event.abc_payload_field}') return event.abc_payload_field[::-1] + async def analytics_handler(event: RandomTestEvent) -> None: + await asyncio.sleep(random.uniform(0.2, 0.5)) + if random.random() < args.audit_rate: + bus.dispatch( + AuditTrailEvent( + source_event_id=event.event_id, + handler_name='analytics_handler', + message=f'Category {event.xyz_category_field}', + ) + ) + + async def auditing_handler(event: RandomTestEvent) -> str: + await asyncio.sleep(random.uniform(0.25, 0.6)) + return f"route:{event.route_hint or 'default'}|category:{event.xyz_category_field}" + async def followup_handler(event: FollowUpEvent) -> str: - await asyncio.sleep(random.uniform(0.05, 0.3)) + await asyncio.sleep(random.uniform(0.3, 0.65)) + if random.random() < 0.3 and event.depth < args.max_depth: + await emit_followups(event, args.max_depth - event.depth) return f'followup:{event.xyz_detail_field}' + async def audit_handler(event: AuditTrailEvent) -> None: + await asyncio.sleep(random.uniform(0.2, 0.4)) + bus.on('RandomTestEvent', random_handler) + bus.on('RandomTestEvent', analytics_handler) + bus.on('RandomTestEvent', auditing_handler) bus.on('FollowUpEvent', followup_handler) + bus.on('AuditTrailEvent', audit_handler) + + print(f'🟒 Streaming events to {db_path}') + + async def producer_task(task_id: int) -> None: + emitted = 0 + while args.events == 0 or emitted < args.events: + burst = random.randint(1, max(1, args.burst_size)) + for _ in range(burst): + payload = _random_text(10) + event = RandomTestEvent( + abc_payload_field=payload, + xyz_category_field=random.choice(list(categories)), + route_hint=f'route-{task_id}-{random.randint(1, 3)}', + event_result_type=str, + ) + bus.dispatch(event) + emitted += 1 + if args.events and emitted >= args.events: + break + await asyncio.sleep(random.uniform(args.min_delay, args.max_delay)) + await asyncio.sleep(random.uniform(args.min_delay, args.max_delay)) - print(f'🟒 Writing events to {db_path}') - - try: - for _ in range(args.events): - payload = _random_text(10) - event = RandomTestEvent( - abc_payload_field=payload, - xyz_category_field=random.choice(list(categories)), + async def emit_followups(parent_event: BaseEvent, remaining_depth: int) -> None: + depth = getattr(parent_event, 'depth', 0) + 1 + followup_count = random.randint(1, 2) + for _ in range(followup_count): + follow_up = FollowUpEvent( + abc_parent_payload_field=getattr(parent_event, 'abc_payload_field', parent_event.event_id), + xyz_detail_field=_random_text(6), + depth=depth, + event_result_type=str, ) - bus.dispatch(event) - await asyncio.sleep(random.uniform(args.min_delay, args.max_delay)) + bus.dispatch(follow_up) + if remaining_depth > 1 and random.random() < 0.6: + await asyncio.sleep(random.uniform(0.2, 0.4)) + await emit_followups(parent_event, remaining_depth - 1) - # Give handlers time to finish + try: + producers = [asyncio.create_task(producer_task(idx)) for idx in range(max(1, args.concurrent))] + await asyncio.gather(*producers) await bus.wait_until_idle() finally: await bus.stop() - print('βœ… Done') def main() -> None: From 615c34ee2056c73134df5270fc67b94e56c21995 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Fri, 17 Oct 2025 17:33:03 -0700 Subject: [PATCH 10/79] better middleware API and documentation --- README.md | 30 ++-- bubus/__init__.py | 11 +- bubus/event_history.py | 248 +------------------------- bubus/middlewares.py | 209 +++++++++++++++++++++- bubus/service.py | 186 +++++++++++++++---- monitor_app/README.md | 2 +- monitor_app/test_events.py | 7 +- tests/test_event_result_standalone.py | 26 +-- tests/test_eventbus.py | 171 ++++++++++++++---- 9 files changed, 545 insertions(+), 345 deletions(-) diff --git a/README.md b/README.md index 3bc1f2e..ba99e87 100644 --- a/README.md +++ b/README.md @@ -104,9 +104,9 @@ class SomeService: return 'this works too' # All usage patterns behave the same: -bus.on(SomeEvent, SomeClass().handlers_can_be_methods) -bus.on(SomeEvent, SomeClass.handler_can_be_classmethods) -bus.on(SomeEvent, SomeClass.handlers_can_be_staticmethods) +bus.on(SomeEvent, SomeService().handlers_can_be_methods) +bus.on(SomeEvent, SomeService.handler_can_be_classmethods) +bus.on(SomeEvent, SomeService.handlers_can_be_staticmethods) ```
    @@ -181,6 +181,7 @@ bus.on(GetConfigEvent, load_user_config) bus.on(GetConfigEvent, load_system_config) # Get a merger of all dict results +# (conflicting keys raise ValueError unless raise_if_conflicts=False) event = await bus.dispatch(GetConfigEvent()) config = await event.event_results_flat_dict(raise_if_conflicts=False) # {'debug': False, 'port': 8080, 'timeout': 30} @@ -479,15 +480,14 @@ Persist events automatically to a `jsonl` file for future replay and debugging: ```python from pathlib import Path -from bubus import EventBus -from bubus.event_history import SQLiteEventHistory +from bubus import EventBus, SQLiteHistoryMirrorMiddleware from bubus.middlewares import LoggerEventBusMiddleware, WALEventBusMiddleware # Enable WAL event log persistence (optional) bus = EventBus( name='MyBus', - event_history=SQLiteEventHistory('./events.sqlite'), middlewares=[ + SQLiteHistoryMirrorMiddleware('./events.sqlite'), WALEventBusMiddleware('./events.jsonl'), LoggerEventBusMiddleware('./events.log'), ], @@ -540,24 +540,24 @@ Handler middlewares subclass `EventBusMiddleware` and override whichever lifecyc from bubus.middlewares import EventBusMiddleware class AnalyticsMiddleware(EventBusMiddleware): - async def before_handler(self, eventbus, event, event_result): + async def process_handler_start(self, eventbus, event, event_result): await analytics_bus.dispatch(HandlerStartedAnalyticsEvent(event_id=event_result.event_id)) - async def after_handler(self, eventbus, event, event_result): + async def process_handler_end(self, eventbus, event, event_result): await analytics_bus.dispatch(HandlerCompletedAnalyticsEvent(event_id=event_result.event_id)) - async def on_handler_error(self, eventbus, event, event_result, error): + async def process_handler_exception(self, eventbus, event, event_result, error): await analytics_bus.dispatch(HandlerCompletedAnalyticsEvent(event_id=event_result.event_id, error=error)) ``` Middlewares can observe or mutate the `EventResult` at each step, dispatch additional events, or trigger other side effects (metrics, retries, auth checks, etc.). -Pair that with the built-in `SQLiteEventHistory` to mirror every event and handler transition into append-only `events_log` and `event_results_log` tables, making it easy to inspect or audit the bus state: +Pair that with the built-in `SQLiteHistoryMirrorMiddleware` to mirror every event and handler transition into append-only `events_log` and `event_results_log` tables, making it easy to inspect or audit the bus state: ```python -from bubus.event_history import SQLiteEventHistory +from bubus import EventBus, SQLiteHistoryMirrorMiddleware -bus = EventBus(event_history=SQLiteEventHistory('./events.sqlite')) +bus = EventBus(middlewares=[SQLiteHistoryMirrorMiddleware('./events.sqlite')]) ``` - `max_history_size`: Maximum number of events to keep in history (default: 50, None = unlimited) @@ -647,7 +647,7 @@ class BaseEvent(BaseModel, Generic[T_EventResultType]): # Framework-managed fields event_type: str # Defaults to class name event_id: str # Unique UUID7 identifier, auto-generated if not provided - event_timeout: float = 60.0 # Maximum execution in seconds for each handler + event_timeout: float = 300.0 # Maximum execution in seconds for each handler event_schema: str # Module.Class@version (auto-set based on class & LIBRARY_VERSION env var) event_parent_id: str # Parent event ID (auto-set) event_path: list[str] # List of bus names traversed (auto-set) @@ -667,7 +667,7 @@ class BaseEvent(BaseModel, Generic[T_EventResultType]): #### `BaseEvent` Properties -- `event_status`: `Literal['pending', 'started', 'complete']` Event status +- `event_status`: `Literal['pending', 'started', 'completed']` Event status - `event_started_at`: `datetime` When first handler started processing - `event_completed_at`: `datetime` When all handlers completed processing - `event_children`: `list[BaseEvent]` Get any child events emitted during handling of this event @@ -851,7 +851,7 @@ class EventResult(BaseModel): status: str # 'pending', 'started', 'completed', 'error' result: Any # Handler return value - error: str | None # Error message if failed + error: BaseException | None # Captured exception if the handler failed started_at: datetime # When handler started completed_at: datetime # When handler completed diff --git a/bubus/__init__.py b/bubus/__init__.py index 858db76..2bb0626 100644 --- a/bubus/__init__.py +++ b/bubus/__init__.py @@ -1,7 +1,12 @@ """Event bus for the browser-use agent.""" -from .event_history import EventHistory, InMemoryEventHistory, SQLiteEventHistory -from .middlewares import EventBusMiddleware, LoggerEventBusMiddleware, WALEventBusMiddleware +from .event_history import EventHistory, InMemoryEventHistory +from .middlewares import ( + EventBusMiddleware, + LoggerEventBusMiddleware, + SQLiteHistoryMirrorMiddleware, + WALEventBusMiddleware, +) from .models import BaseEvent, EventHandler, EventResult, PythonIdentifierStr, PythonIdStr, UUIDStr from .service import EventBus @@ -9,10 +14,10 @@ 'EventBus', 'EventBusMiddleware', 'LoggerEventBusMiddleware', + 'SQLiteHistoryMirrorMiddleware', 'WALEventBusMiddleware', 'EventHistory', 'InMemoryEventHistory', - 'SQLiteEventHistory', 'BaseEvent', 'EventResult', 'EventHandler', diff --git a/bubus/event_history.py b/bubus/event_history.py index 6d0be6e..6494bc8 100644 --- a/bubus/event_history.py +++ b/bubus/event_history.py @@ -1,253 +1,17 @@ from __future__ import annotations -import sqlite3 -import threading -from collections.abc import Iterable, Iterator, MutableMapping -from pathlib import Path -from typing import TYPE_CHECKING, Any, Callable, Generic, TypeVar +from typing import Any, Generic, TypeVar from .models import BaseEvent, UUIDStr -if TYPE_CHECKING: - from .models import EventResult - from .service import EventBus - BaseEventT = TypeVar('BaseEventT', bound=BaseEvent[Any]) -class EventHistory(MutableMapping[UUIDStr, BaseEventT], Generic[BaseEventT]): - """Base class for storing EventBus history with filter support.""" - - def add(self, event: BaseEventT) -> None: - self[event.event_id] = event - - def contains(self, event_id: UUIDStr) -> bool: - return event_id in self - - def count(self) -> int: - return len(self) - - def iter_events(self) -> Iterable[BaseEventT]: - return self.values() - - def iter_items(self) -> Iterable[tuple[UUIDStr, BaseEventT]]: - return self.items() - - def filter(self, predicate: Callable[[BaseEventT], bool]) -> list[BaseEventT]: - return [event for event in self.values() if predicate(event)] - - def copy(self) -> dict[UUIDStr, BaseEventT]: - return dict(self.items()) - - # Lifecycle hooks ----------------------------------------------------- # - - def record_event_snapshot(self, eventbus: EventBus, event: BaseEventT, phase: str | None = None) -> None: - """Optional hook: persist or mirror a snapshot of the event lifecycle.""" - return None - - def record_event_result_snapshot( - self, - eventbus: EventBus, - event: BaseEventT, - event_result: EventResult[Any], - phase: str | None = None, - ) -> None: - """Optional hook: persist or mirror a snapshot of an event result lifecycle.""" - return None - - -class InMemoryEventHistory(EventHistory[BaseEvent[Any]]): - """Simple in-memory event history implementation.""" - - def __init__(self) -> None: - self._events: dict[UUIDStr, BaseEvent[Any]] = {} - - def __getitem__(self, key: UUIDStr) -> BaseEvent[Any]: - return self._events[key] - - def __setitem__(self, key: UUIDStr, value: BaseEvent[Any]) -> None: - self._events[key] = value - - def __delitem__(self, key: UUIDStr) -> None: - del self._events[key] - - def __iter__(self) -> Iterator[UUIDStr]: - return iter(self._events) - - def __len__(self) -> int: - return len(self._events) - - def clear(self) -> None: - self._events.clear() - - -class SQLiteEventHistory(EventHistory[BaseEvent[Any]]): - """Event history backend that mirrors lifecycle snapshots into append-only SQLite tables.""" - - def __init__(self, db_path: Path | str): - self.db_path = Path(db_path) - self.db_path.parent.mkdir(parents=True, exist_ok=True) - - self._events: dict[UUIDStr, BaseEvent[Any]] = {} - self._lock = threading.RLock() - self._conn = sqlite3.connect(self.db_path, check_same_thread=False, isolation_level=None) - self._init_db() - - def __del__(self): - try: - self._conn.close() - except Exception: - pass - - # MutableMapping implementation --------------------------------------- # - def __getitem__(self, key: UUIDStr) -> BaseEvent[Any]: - return self._events[key] - - def __setitem__(self, key: UUIDStr, value: BaseEvent[Any]) -> None: - self._events[key] = value - - def __delitem__(self, key: UUIDStr) -> None: - self._events.pop(key, None) - - def __iter__(self) -> Iterator[UUIDStr]: - return iter(self._events) - - def __len__(self) -> int: - return len(self._events) - - def clear(self) -> None: - self._events.clear() - - # Internal helpers ---------------------------------------------------- # - def _init_db(self) -> None: - with self._lock: - self._conn.execute( - """ - CREATE TABLE IF NOT EXISTS events_log ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - event_id TEXT NOT NULL, - event_type TEXT NOT NULL, - event_status TEXT NOT NULL, - eventbus_id TEXT NOT NULL, - eventbus_name TEXT NOT NULL, - phase TEXT, - event_json TEXT NOT NULL, - inserted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP - ) - """ - ) - self._conn.execute( - """ - CREATE TABLE IF NOT EXISTS event_results_log ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - event_result_id TEXT NOT NULL, - event_id TEXT NOT NULL, - handler_id TEXT NOT NULL, - handler_name TEXT NOT NULL, - eventbus_id TEXT NOT NULL, - eventbus_name TEXT NOT NULL, - event_type TEXT NOT NULL, - status TEXT NOT NULL, - phase TEXT, - result_repr TEXT, - error_repr TEXT, - event_result_json TEXT, - inserted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP - ) - """ - ) - self._conn.execute('PRAGMA journal_mode=WAL') - self._conn.execute('PRAGMA synchronous=NORMAL') - - # Persistence hooks --------------------------------------------------- # - def record_event_snapshot( - self, - eventbus: EventBus, - event: BaseEvent[Any], - phase: str | None = None, - ) -> None: - event_status = 'error' if any(result.status == 'error' for result in event.event_results.values()) else event.event_status - event_json = event.model_dump_json() - - with self._lock: - self._conn.execute( - """ - INSERT INTO events_log ( - event_id, - event_type, - event_status, - eventbus_id, - eventbus_name, - phase, - event_json - ) - VALUES (?, ?, ?, ?, ?, ?, ?) - """, - ( - event.event_id, - event.event_type, - event_status, - eventbus.id, - eventbus.name, - phase, - event_json, - ), - ) - self._conn.commit() +class EventHistory(dict[UUIDStr, BaseEventT], Generic[BaseEventT]): + """Backward-compatible in-memory history with plain dict behaviour.""" - def record_event_result_snapshot( - self, - eventbus: EventBus, - event: BaseEvent[Any], - event_result: EventResult[Any], - phase: str | None = None, - ) -> None: - error_repr = repr(event_result.error) if event_result.error is not None else None - result_repr: str | None = None - if event_result.result is not None and event_result.error is None: - try: - result_repr = repr(event_result.result) - except Exception: - result_repr = '' + __slots__ = () - # Avoid huge JSON blobs for unreadable result types by falling back to repr - try: - event_result_json = event_result.model_dump_json() - except Exception: - event_result_json = None - with self._lock: - self._conn.execute( - """ - INSERT INTO event_results_log ( - event_result_id, - event_id, - handler_id, - handler_name, - eventbus_id, - eventbus_name, - event_type, - status, - phase, - result_repr, - error_repr, - event_result_json - ) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) - """, - ( - event_result.id, - event_result.event_id, - event_result.handler_id, - event_result.handler_name, - event_result.eventbus_id, - event_result.eventbus_name, - event.event_type, - event_result.status, - phase, - result_repr, - error_repr, - event_result_json, - ), - ) - self._conn.commit() +# Backwards compatible alias – before refactor this was the default backend. +InMemoryEventHistory = EventHistory diff --git a/bubus/middlewares.py b/bubus/middlewares.py index 6cc798d..4d8f2f8 100644 --- a/bubus/middlewares.py +++ b/bubus/middlewares.py @@ -5,15 +5,21 @@ import asyncio import logging import threading +import sqlite3 from pathlib import Path from typing import Any from bubus.logging import log_eventbus_tree -from bubus.models import BaseEvent +from bubus.models import BaseEvent, EventResult from bubus.service import EventBus from bubus.service import EventBusMiddleware as _EventBusMiddleware -__all__ = ['EventBusMiddleware', 'WALEventBusMiddleware', 'LoggerEventBusMiddleware'] +__all__ = [ + 'EventBusMiddleware', + 'WALEventBusMiddleware', + 'LoggerEventBusMiddleware', + 'SQLiteHistoryMirrorMiddleware', +] logger = logging.getLogger('bubus.middleware') @@ -28,7 +34,7 @@ def __init__(self, wal_path: Path | str): self.wal_path.parent.mkdir(parents=True, exist_ok=True) self._lock = threading.Lock() - async def after_event(self, eventbus: EventBus, event: BaseEvent[Any]) -> None: + async def post_event_completed(self, eventbus: EventBus, event: BaseEvent[Any]) -> None: if getattr(event, '_wal_written', False): return @@ -71,7 +77,7 @@ def __init__(self, log_path: Path | str | None = None): if self.log_path is not None: self.log_path.parent.mkdir(parents=True, exist_ok=True) - async def after_event(self, eventbus: EventBus, event: BaseEvent[Any]) -> None: + async def post_event_completed(self, eventbus: EventBus, event: BaseEvent[Any]) -> None: if getattr(event, '_logger_middleware_logged', False): return @@ -102,3 +108,198 @@ def _append_line(self, line: str) -> None: with self.log_path.open('a', encoding='utf-8') as fp: fp.write(line) print(line.rstrip('\n'), flush=True) + + +class SQLiteHistoryMirrorMiddleware(EventBusMiddleware): + """Mirror event and handler snapshots into append-only SQLite tables.""" + + def __init__(self, db_path: Path | str): + self.db_path = Path(db_path) + self.db_path.parent.mkdir(parents=True, exist_ok=True) + + self._lock = threading.RLock() + self._conn = sqlite3.connect(self.db_path, check_same_thread=False, isolation_level=None) + self._init_db() + + def __del__(self): + try: + self._conn.close() + except Exception: + pass + + async def post_event_snapshot_recorded(self, eventbus: EventBus, event: BaseEvent[Any], phase: str) -> None: + event_status = ( + 'error' if any(result.status == 'error' for result in event.event_results.values()) else event.event_status + ) + event_json = event.model_dump_json() + await asyncio.to_thread( + self._insert_event_snapshot, + eventbus, + event.event_id, + event.event_type, + event_status, + phase, + event_json, + ) + + async def post_event_handler_snapshot_recorded( + self, + eventbus: EventBus, + event: BaseEvent[Any], + event_result: EventResult[Any], + phase: str, + ) -> None: + error_repr = repr(event_result.error) if event_result.error is not None else None + result_repr: str | None = None + if event_result.result is not None and event_result.error is None: + try: + result_repr = repr(event_result.result) + except Exception: + result_repr = '' + + try: + event_result_json = event_result.model_dump_json() + except Exception: + event_result_json = None + + await asyncio.to_thread( + self._insert_event_result_snapshot, + event_result.id, + event_result.event_id, + event_result.handler_id, + event_result.handler_name, + eventbus.id, + eventbus.name, + event.event_type, + event_result.status, + phase, + result_repr, + error_repr, + event_result_json, + ) + + def _init_db(self) -> None: + with self._lock: + self._conn.execute( + """ + CREATE TABLE IF NOT EXISTS events_log ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + event_id TEXT NOT NULL, + event_type TEXT NOT NULL, + event_status TEXT NOT NULL, + eventbus_id TEXT NOT NULL, + eventbus_name TEXT NOT NULL, + phase TEXT, + event_json TEXT NOT NULL, + inserted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ) + """ + ) + self._conn.execute( + """ + CREATE TABLE IF NOT EXISTS event_results_log ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + event_result_id TEXT NOT NULL, + event_id TEXT NOT NULL, + handler_id TEXT NOT NULL, + handler_name TEXT NOT NULL, + eventbus_id TEXT NOT NULL, + eventbus_name TEXT NOT NULL, + event_type TEXT NOT NULL, + status TEXT NOT NULL, + phase TEXT, + result_repr TEXT, + error_repr TEXT, + event_result_json TEXT, + inserted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ) + """ + ) + self._conn.execute('PRAGMA journal_mode=WAL') + self._conn.execute('PRAGMA synchronous=NORMAL') + + def _insert_event_snapshot( + self, + eventbus: EventBus, + event_id: str, + event_type: str, + event_status: str, + phase: str | None, + event_json: str, + ) -> None: + with self._lock: + self._conn.execute( + """ + INSERT INTO events_log ( + event_id, + event_type, + event_status, + eventbus_id, + eventbus_name, + phase, + event_json + ) + VALUES (?, ?, ?, ?, ?, ?, ?) + """, + ( + event_id, + event_type, + event_status, + eventbus.id, + eventbus.name, + phase, + event_json, + ), + ) + self._conn.commit() + + def _insert_event_result_snapshot( + self, + event_result_id: str, + event_id: str, + handler_id: str, + handler_name: str, + eventbus_id: str, + eventbus_name: str, + event_type: str, + status: str, + phase: str | None, + result_repr: str | None, + error_repr: str | None, + event_result_json: str | None, + ) -> None: + with self._lock: + self._conn.execute( + """ + INSERT INTO event_results_log ( + event_result_id, + event_id, + handler_id, + handler_name, + eventbus_id, + eventbus_name, + event_type, + status, + phase, + result_repr, + error_repr, + event_result_json + ) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + """, + ( + event_result_id, + event_id, + handler_id, + handler_name, + eventbus_id, + eventbus_name, + event_type, + status, + phase, + result_repr, + error_repr, + event_result_json, + ), + ) + self._conn.commit() diff --git a/bubus/service.py b/bubus/service.py index c0f14c3..336a59d 100644 --- a/bubus/service.py +++ b/bubus/service.py @@ -8,6 +8,7 @@ from collections import defaultdict, deque from collections.abc import Callable, Sequence from contextvars import ContextVar +from datetime import UTC, datetime, timedelta from pathlib import Path from typing import Any, Literal, TypeGuard, TypeVar, cast, overload @@ -15,7 +16,7 @@ uuid7str: Callable[[], str] = uuid7str # pyright: ignore -from bubus.event_history import EventHistory, InMemoryEventHistory +from bubus.event_history import EventHistory from bubus.models import ( BUBUS_LOGGING_LEVEL, AsyncEventHandlerClassMethod, @@ -53,32 +54,58 @@ class QueueShutDown(Exception): EventPatternType = PythonIdentifierStr | Literal['*'] | type['BaseEvent[Any]'] + + class EventBusMiddleware: - """Base class for EventBus middlewares.""" + """Hookable lifecycle interface for observing or extending EventBus execution.""" - async def before_handler( + async def pre_event_handler_started( self, eventbus: 'EventBus', event: 'BaseEvent[Any]', event_result: EventResult[Any] ) -> None: + """Called just before a handler begins execution.""" return None - async def after_handler( + async def post_event_handler_completed( self, eventbus: 'EventBus', event: 'BaseEvent[Any]', event_result: EventResult[Any] ) -> None: + """Called after a handler completes successfully.""" return None - async def on_handler_error( + async def post_event_handler_failed( self, eventbus: 'EventBus', event: 'BaseEvent[Any]', event_result: EventResult[Any], error: BaseException, ) -> None: + """Called when a handler raises or is cancelled.""" + return None + + async def post_event_snapshot_recorded( + self, eventbus: 'EventBus', event: 'BaseEvent[Any]', phase: str + ) -> None: + """Called whenever an event snapshot is persisted.""" + return None + + async def post_event_handler_snapshot_recorded( + self, + eventbus: 'EventBus', + event: 'BaseEvent[Any]', + event_result: EventResult[Any], + phase: str, + ) -> None: + """Called whenever a handler snapshot is persisted.""" return None - async def after_event(self, eventbus: 'EventBus', event: 'BaseEvent[Any]') -> None: + async def post_event_completed(self, eventbus: 'EventBus', event: 'BaseEvent[Any]') -> None: + """Called after an event and all of its handlers have finished.""" return None +def _is_middleware_class(candidate: object) -> TypeGuard[type['EventBusMiddleware']]: + return isinstance(candidate, type) and issubclass(candidate, EventBusMiddleware) + + class CleanShutdownQueue(asyncio.Queue[QueueEntryType]): """asyncio.Queue subclass that handles shutdown cleanly without warnings.""" @@ -294,7 +321,7 @@ class EventBus: id: UUIDStr = '00000000-0000-0000-0000-000000000000' handlers: dict[PythonIdStr, list[ContravariantEventHandler['BaseEvent[Any]']]] # collected by .on(, ) event_queue: CleanShutdownQueue['BaseEvent[Any]'] | None - event_history: 'EventHistory[BaseEvent[Any]]' + event_history: EventHistory['BaseEvent[Any]'] _is_running: bool = False _runloop_task: asyncio.Task[None] | None = None @@ -305,7 +332,6 @@ def __init__( name: PythonIdentifierStr | None = None, parallel_handlers: bool = False, max_history_size: int | None = 50, # Keep only 50 events in history - event_history: EventHistory['BaseEvent[Any]'] | None = None, middlewares: Sequence[EventBusMiddleware | type[EventBusMiddleware]] | None = None, ): self.id = uuid7str() @@ -355,7 +381,7 @@ def __init__( ) self.event_queue = None - self.event_history = event_history or InMemoryEventHistory() + self.event_history = EventHistory() self.handlers = defaultdict(list) self.parallel_handlers = parallel_handlers self._on_idle = None @@ -403,7 +429,7 @@ def middlewares(self, value: Sequence[EventBusMiddleware | type[EventBusMiddlewa for middleware in value: if isinstance(middleware, EventBusMiddleware): instances.append(middleware) - elif inspect.isclass(middleware) and issubclass(middleware, EventBusMiddleware): + elif _is_middleware_class(middleware): instances.append(middleware()) else: raise TypeError( @@ -424,23 +450,61 @@ async def _call_middleware_hook( if inspect.isawaitable(result): await result - async def _middlewares_before_handler(self, event: 'BaseEvent[Any]', event_result: EventResult[Any]) -> None: + # Middleware fan-out helpers ------------------------------------------- # + async def _middlewares_post_event_snapshot_recorded( + self, event: 'BaseEvent[Any]', phase: str + ) -> None: + for middleware in self._middlewares: + await self._call_middleware_hook( + middleware, 'post_event_snapshot_recorded', self, event, phase + ) + + async def _middlewares_post_event_handler_snapshot_recorded( + self, event: 'BaseEvent[Any]', event_result: EventResult[Any], phase: str + ) -> None: + for middleware in self._middlewares: + await self._call_middleware_hook( + middleware, + 'post_event_handler_snapshot_recorded', + self, + event, + event_result, + phase, + ) + + async def _maybe_record_event_started(self, event: 'BaseEvent[Any]') -> None: + if getattr(event, '_history_started_logged', False): + return + setattr(event, '_history_started_logged', True) + await self._middlewares_post_event_snapshot_recorded(event, 'started') + + async def _middlewares_pre_event_handler_started( + self, event: 'BaseEvent[Any]', event_result: EventResult[Any] + ) -> None: for middleware in self._middlewares: - await self._call_middleware_hook(middleware, 'before_handler', self, event, event_result) + await self._call_middleware_hook( + middleware, 'pre_event_handler_started', self, event, event_result + ) - async def _middlewares_after_handler(self, event: 'BaseEvent[Any]', event_result: EventResult[Any]) -> None: + async def _middlewares_post_event_handler_completed( + self, event: 'BaseEvent[Any]', event_result: EventResult[Any] + ) -> None: for middleware in self._middlewares: - await self._call_middleware_hook(middleware, 'after_handler', self, event, event_result) + await self._call_middleware_hook( + middleware, 'post_event_handler_completed', self, event, event_result + ) - async def _middlewares_on_error( + async def _middlewares_post_event_handler_failed( self, event: 'BaseEvent[Any]', event_result: EventResult[Any], error: BaseException ) -> None: for middleware in self._middlewares: - await self._call_middleware_hook(middleware, 'on_handler_error', self, event, event_result, error) + await self._call_middleware_hook( + middleware, 'post_event_handler_failed', self, event, event_result, error + ) - async def _middleware_after_event(self, event: 'BaseEvent[Any]') -> None: + async def _middlewares_post_event_completed(self, event: 'BaseEvent[Any]') -> None: for middleware in self._middlewares: - await self._call_middleware_hook(middleware, 'after_event', self, event) + await self._call_middleware_hook(middleware, 'post_event_completed', self, event) async def _dispatch_after_event_hooks(self, event: 'BaseEvent[Any]') -> None: if getattr(event, '_after_event_hooks_run', False): @@ -455,25 +519,40 @@ async def _dispatch_after_event_hooks(self, event: 'BaseEvent[Any]') -> None: if not event_completed: return + if not getattr(event, '_history_completed_logged', False): + setattr(event, '_history_completed_logged', True) + final_phase = ( + 'error' + if any(result.status == 'error' for result in event.event_results.values()) + else 'completed' + ) + await self._middlewares_post_event_snapshot_recorded(event, final_phase) + setattr(event, '_after_event_hooks_run', True) - await self._middleware_after_event(event) + await self._middlewares_post_event_completed(event) @property def events_pending(self) -> list['BaseEvent[Any]']: """Get events that haven't started processing yet (does not include events that have not even finished dispatching yet in self.event_queue)""" - return self.event_history.filter(lambda event: event.event_started_at is None and event.event_completed_at is None) + return [ + event + for event in self.event_history.values() + if event.event_started_at is None and event.event_completed_at is None + ] @property def events_started(self) -> list['BaseEvent[Any]']: """Get events currently being processed""" return [ - event for event in self.event_history.filter(lambda e: e.event_started_at and not e.event_completed_at) + event + for event in self.event_history.values() + if event.event_started_at is not None and event.event_completed_at is None ] @property def events_completed(self) -> list['BaseEvent[Any]']: """Get events that have completed processing""" - return self.event_history.filter(lambda e: e.event_completed_at is not None) + return [event for event in self.event_history.values() if event.event_completed_at is not None] # Overloads for typed event patterns with specific handler signatures # Order matters - more specific types must come before general ones @@ -633,8 +712,8 @@ def dispatch(self, event: T_ExpectedEvent) -> T_ExpectedEvent: # Only enforce if we have memory limits set if self.max_history_size is not None: queue_size = self.event_queue.qsize() if self.event_queue else 0 - pending_in_history = len( - self.event_history.filter(lambda event: event.event_status in ('pending', 'started')) + pending_in_history = sum( + 1 for event in self.event_history.values() if event.event_status in ('pending', 'started') ) total_pending = queue_size + pending_in_history @@ -653,7 +732,11 @@ def dispatch(self, event: T_ExpectedEvent) -> T_ExpectedEvent: try: self.event_queue.put_nowait(event) # Only add to history after successfully queuing - self.event_history.add(event) + self.event_history[event.event_id] = event + loop = asyncio.get_running_loop() + loop.create_task( + self._middlewares_post_event_snapshot_recorded(event, 'pending') + ) logger.info( f'πŸ—£οΈ {self}.dispatch({event.event_type}) ➑️ {event.event_type}#{event.event_id[-4:]} (#{self.event_queue.qsize()} {event.event_status})' ) @@ -671,11 +754,18 @@ def dispatch(self, event: T_ExpectedEvent) -> T_ExpectedEvent: # This avoids "orphaned" pending results for handlers that get filtered out later. # Clean up if over the limit - if self.max_history_size and self.event_history.count() > self.max_history_size: + if self.max_history_size and len(self.event_history) > self.max_history_size: self.cleanup_event_history() return event + def _event_matches_pattern(self, event: 'BaseEvent[Any]', pattern: EventPatternType) -> bool: + if pattern == '*': + return True + if isinstance(pattern, str): + return event.event_type == pattern + return isinstance(event, pattern) + @overload async def expect( self, @@ -758,12 +848,20 @@ def notify_expect_handler(event: 'BaseEvent[Any]') -> None: # Register temporary listener that watches for matching events and triggers the expect handler self.on(event_type, notify_expect_handler) + # Ensure the temporary handler runs before user handlers so expect() resolves immediately after dispatch. + event_key = event_type.__name__ if isinstance(event_type, type) else str(event_type) + handlers_for_key = self.handlers.get(event_key) + if handlers_for_key and handlers_for_key[-1] is notify_expect_handler: + handlers_for_key.insert(0, handlers_for_key.pop()) + try: # Wait for the future with optional timeout if timeout is not None: return await asyncio.wait_for(future, timeout=timeout) else: return await future + except asyncio.TimeoutError: + return None finally: # Clean up handler event_key: str = event_type.__name__ if isinstance(event_type, type) else str(event_type) # pyright: ignore[reportUnknownMemberType, reportPartialTypeErrors] @@ -1153,9 +1251,13 @@ async def _execute_handlers( event.event_mark_complete_if_all_handlers_completed() # mark event completed immediately if it has no handlers return - event.event_create_pending_results( + pending_results = event.event_create_pending_results( applicable_handlers, eventbus=self, timeout=timeout or event.event_timeout ) + for pending_result in pending_results.values(): + await self._middlewares_post_event_handler_snapshot_recorded( + event, pending_result, 'pending' + ) # Execute all handlers in parallel if self.parallel_handlers: @@ -1203,13 +1305,23 @@ async def execute_handler( logger.debug(f' ↳ {self}.execute_handler({event}, handler={get_handler_name(handler)}#{handler_id[-4:]})') if handler_id not in event.event_results: - event.event_create_pending_results({handler_id: handler}, eventbus=self, timeout=timeout or event.event_timeout) + new_results = event.event_create_pending_results( + {handler_id: handler}, eventbus=self, timeout=timeout or event.event_timeout + ) + for pending_result in new_results.values(): + await self._middlewares_post_event_handler_snapshot_recorded( + event, pending_result, 'pending' + ) event_result = event.event_results[handler_id] event_result.update(status='started', timeout=timeout or event.event_timeout) + await self._middlewares_post_event_handler_snapshot_recorded( + event, event_result, 'started' + ) + await self._maybe_record_event_started(event) - await self._middlewares_before_handler(event, event_result) + await self._middlewares_pre_event_handler_started(event, event_result) try: result_value = await event_result.execute( @@ -1227,17 +1339,25 @@ async def execute_handler( f' ↳ Handler {get_handler_name(handler)}#{handler_id[-4:]} returned: {result_type_name}' ) - await self._middlewares_after_handler(event, event_result) + await self._middlewares_post_event_handler_completed(event, event_result) + await self._middlewares_post_event_handler_snapshot_recorded( + event, event_result, 'completed' + ) return cast(T_EventResultType, result_value) except asyncio.CancelledError as exc: - await self._middlewares_on_error(event, event_result, exc) + await self._middlewares_post_event_handler_failed(event, event_result, exc) + await self._middlewares_post_event_handler_snapshot_recorded( + event, event_result, 'error' + ) raise except Exception as exc: - await self._middlewares_on_error(event, event_result, exc) + await self._middlewares_post_event_handler_failed(event, event_result, exc) + await self._middlewares_post_event_handler_snapshot_recorded( + event, event_result, 'error' + ) raise - def _would_create_loop(self, event: 'BaseEvent[Any]', handler: EventHandler) -> bool: """Check if calling this handler would create a loop""" diff --git a/monitor_app/README.md b/monitor_app/README.md index 6e05e75..6e40acd 100644 --- a/monitor_app/README.md +++ b/monitor_app/README.md @@ -1,6 +1,6 @@ # bubus Monitor App -Minimal FastAPI application that reads the `events_log` and `event_results_log` tables produced by `SQLiteEventHistory` and exposes them over HTTP/WebSocket for live monitoring. +Minimal FastAPI application that reads the `events_log` and `event_results_log` tables produced by the `SQLiteHistoryMirrorMiddleware` and exposes them over HTTP/WebSocket for live monitoring. Install dependencies (once): diff --git a/monitor_app/test_events.py b/monitor_app/test_events.py index 797b1d2..b8225db 100644 --- a/monitor_app/test_events.py +++ b/monitor_app/test_events.py @@ -8,8 +8,7 @@ import string from typing import Sequence -from bubus import BaseEvent, EventBus -from bubus.event_history import SQLiteEventHistory +from bubus import BaseEvent, EventBus, SQLiteHistoryMirrorMiddleware from .config import resolve_db_path @@ -54,8 +53,8 @@ def _random_text(length: int = 8) -> str: async def run_generator(args: argparse.Namespace) -> None: db_path = resolve_db_path() db_path.parent.mkdir(parents=True, exist_ok=True) - history = SQLiteEventHistory(db_path) - bus = EventBus(name='MonitorGenerator', event_history=history, parallel_handlers=True) + middleware = SQLiteHistoryMirrorMiddleware(db_path) + bus = EventBus(name='MonitorGenerator', middlewares=[middleware], parallel_handlers=True) categories: Sequence[str] = args.categories or ['default'] diff --git a/tests/test_event_result_standalone.py b/tests/test_event_result_standalone.py index 1f5fbc7..bf3a457 100644 --- a/tests/test_event_result_standalone.py +++ b/tests/test_event_result_standalone.py @@ -1,9 +1,11 @@ -import asyncio from uuid import uuid4 import pytest -from bubus.models import BaseEvent, EventResult, get_handler_id +from typing import Any, cast + +from bubus.models import BaseEvent, EventHandler, EventResult, get_handler_id +from bubus.service import EventBus class _StubEvent: @@ -41,17 +43,19 @@ async def test_event_result_execute_without_base_event() -> None: async def handler(event: _StubEvent) -> str: return 'ok' + test_bus = EventBus(name='StandaloneTest1') result_value = await event_result.execute( - stub_event, - handler, - eventbus='StandaloneBus', + cast(BaseEvent[Any], stub_event), + cast(EventHandler, handler), + eventbus=test_bus, timeout=stub_event.event_timeout, ) assert result_value == 'ok' assert event_result.status == 'completed' assert event_result.result == 'ok' - assert stub_event._cancelled_due_to_error is None + assert stub_event.__dict__.get('_cancelled_due_to_error') is None + await test_bus.stop() class StandaloneEvent(BaseEvent[str]): @@ -67,14 +71,15 @@ async def test_event_and_result_without_eventbus() -> None: def handler(evt: StandaloneEvent) -> str: return evt.data.upper() - handler_id = get_handler_id(handler, None) - pending_results = event.event_create_pending_results({handler_id: handler}) + handler_id = get_handler_id(cast(EventHandler, handler), None) + pending_results = event.event_create_pending_results({handler_id: cast(EventHandler, handler)}) event_result = pending_results[handler_id] + test_bus = EventBus(name='StandaloneTest2') value = await event_result.execute( event, - handler, - eventbus='StandaloneBus', + cast(EventHandler, handler), + eventbus=test_bus, timeout=event.event_timeout, ) @@ -84,3 +89,4 @@ def handler(evt: StandaloneEvent) -> str: event.event_mark_complete_if_all_handlers_completed() assert event.event_completed_at is not None + await test_bus.stop() diff --git a/tests/test_eventbus.py b/tests/test_eventbus.py index 5e86890..8039518 100644 --- a/tests/test_eventbus.py +++ b/tests/test_eventbus.py @@ -25,13 +25,8 @@ import pytest from pydantic import Field -from bubus import BaseEvent, EventBus -from bubus.middlewares import ( - EventBusMiddleware, - LoggerEventBusMiddleware, - SQLiteEventBusMiddleware, - WALEventBusMiddleware, -) +from bubus import BaseEvent, EventBus, SQLiteHistoryMirrorMiddleware +from bubus.middlewares import EventBusMiddleware, LoggerEventBusMiddleware, WALEventBusMiddleware class CreateAgentTaskEvent(BaseEvent): @@ -169,6 +164,31 @@ def test_emit_sync(self, mock_agent): assert 'no event loop is running' in str(e.value) assert len(bus.event_history) == 0 + async def test_unbounded_history_disables_capacity_limit(self): + """When max_history_size=None, dispatch should not enforce the 100-event cap.""" + bus = EventBus(name='NoLimitBus', max_history_size=None) + + processed = 0 + + async def slow_handler(event: BaseEvent) -> None: + nonlocal processed + await asyncio.sleep(0.01) + processed += 1 + + bus.on('SlowEvent', slow_handler) + + events: list[BaseEvent] = [] + + try: + for _ in range(150): + events.append(bus.dispatch(BaseEvent(event_type='SlowEvent'))) + + await asyncio.gather(*events) + await bus.wait_until_idle() + assert processed == 150 + finally: + await bus.stop(clear=True) + class TestHandlerRegistration: """Test handler registration and execution""" @@ -344,6 +364,56 @@ def static_method_handler(event: UserActionEvent) -> str: assert 'Handled by static method' in results_list +class TestEventForwarding: + """Tests for event forwarding between buses.""" + + @pytest.mark.asyncio + async def test_forwarding_loop_prevention(self): + bus_a = EventBus(name='ForwardBusA') + bus_b = EventBus(name='ForwardBusB') + bus_c = EventBus(name='ForwardBusC') + + class LoopEvent(BaseEvent[str]): + pass + + seen: dict[str, int] = {'A': 0, 'B': 0, 'C': 0} + + async def handler_a(event: LoopEvent) -> str: + seen['A'] += 1 + return 'handled-a' + + async def handler_b(event: LoopEvent) -> str: + seen['B'] += 1 + return 'handled-b' + + async def handler_c(event: LoopEvent) -> str: + seen['C'] += 1 + return 'handled-c' + + bus_a.on(LoopEvent, handler_a) + bus_b.on(LoopEvent, handler_b) + bus_c.on(LoopEvent, handler_c) + + # Create a forwarding cycle A -> B -> C -> A, which should be broken automatically. + bus_a.on('*', bus_b.dispatch) + bus_b.on('*', bus_c.dispatch) + bus_c.on('*', bus_a.dispatch) + + try: + event = await bus_a.dispatch(LoopEvent()) + + await bus_a.wait_until_idle() + await bus_b.wait_until_idle() + await bus_c.wait_until_idle() + + assert seen == {'A': 1, 'B': 1, 'C': 1} + assert event.event_path == ['ForwardBusA', 'ForwardBusB', 'ForwardBusC'] + finally: + await bus_a.stop(clear=True) + await bus_b.stop(clear=True) + await bus_c.stop(clear=True) + + class TestFIFOOrdering: """Test FIFO event processing""" @@ -806,10 +876,12 @@ class TrackingMiddleware(EventBusMiddleware): def __init__(self, call_log: list[tuple[str, str]]): self.call_log = call_log - async def before_handler(self, eventbus: EventBus, event: BaseEvent, event_result): + async def pre_event_handler_started(self, eventbus: EventBus, event: BaseEvent, event_result): self.call_log.append(('before', event_result.status)) - async def after_handler(self, eventbus: EventBus, event: BaseEvent, event_result): + async def post_event_handler_completed( + self, eventbus: EventBus, event: BaseEvent, event_result + ): self.call_log.append(('after', event_result.status)) bus = EventBus(middlewares=[TrackingMiddleware(calls)]) @@ -834,10 +906,10 @@ class ErrorMiddleware(EventBusMiddleware): def __init__(self, log: list[tuple[str, str]]): self.log = log - async def before_handler(self, eventbus: EventBus, event: BaseEvent, event_result): + async def pre_event_handler_started(self, eventbus: EventBus, event: BaseEvent, event_result): self.log.append(('before', event_result.status)) - async def on_handler_error( + async def post_event_handler_failed( self, eventbus: EventBus, event: BaseEvent, @@ -864,10 +936,10 @@ async def failing_handler(event: BaseEvent) -> None: await bus.stop() -class TestSQLiteMiddleware: - async def test_sqlite_middleware_persists_events_and_results(self, tmp_path): +class TestSQLiteHistoryMirror: + async def test_sqlite_history_persists_events_and_results(self, tmp_path): db_path = tmp_path / 'events.sqlite' - middleware = SQLiteEventBusMiddleware(db_path) + middleware = SQLiteHistoryMirrorMiddleware(db_path) bus = EventBus(middlewares=[middleware]) async def handler(event: BaseEvent) -> str: @@ -880,19 +952,21 @@ async def handler(event: BaseEvent) -> str: await bus.wait_until_idle() conn = sqlite3.connect(db_path) - events = conn.execute('SELECT event_id, event_type, event_status, event_json FROM events_log').fetchall() - assert len(events) == 1 - assert events[0][1] == 'UserActionEvent' - assert events[0][2] == 'completed' + events = conn.execute( + 'SELECT phase, event_status FROM events_log ORDER BY id' + ).fetchall() + assert [phase for phase, _ in events] == ['pending', 'started', 'completed'] + assert [status for _, status in events] == ['pending', 'started', 'completed'] result_rows = conn.execute( - 'SELECT status, result_repr, error_repr FROM event_results_log ORDER BY id' + 'SELECT phase, status, result_repr, error_repr FROM event_results_log ORDER BY id' ).fetchall() conn.close() - assert [status for status, *_ in result_rows] == ['started', 'completed'] - assert result_rows[-1][1] == "'ok'" - assert result_rows[-1][2] is None + assert [phase for phase, *_ in result_rows] == ['pending', 'started', 'completed'] + assert [status for _, status, *_ in result_rows] == ['pending', 'started', 'completed'] + assert result_rows[-1][2] == "'ok'" + assert result_rows[-1][3] is None finally: await bus.stop() @@ -935,9 +1009,10 @@ async def handler(event: BaseEvent) -> str: assert 'stdout' not in captured.err finally: await bus.stop() - async def test_sqlite_middleware_records_errors(self, tmp_path): + + async def test_sqlite_history_records_errors(self, tmp_path): db_path = tmp_path / 'events.sqlite' - middleware = SQLiteEventBusMiddleware(db_path) + middleware = SQLiteHistoryMirrorMiddleware(db_path) bus = EventBus(middlewares=[middleware]) async def failing_handler(event: BaseEvent) -> None: @@ -951,17 +1026,20 @@ async def failing_handler(event: BaseEvent) -> None: conn = sqlite3.connect(db_path) result_rows = conn.execute( - 'SELECT status, error_repr FROM event_results_log ORDER BY id' + 'SELECT phase, status, error_repr FROM event_results_log ORDER BY id' ).fetchall() - events = conn.execute('SELECT event_status FROM events_log').fetchall() + events = conn.execute('SELECT phase, event_status FROM events_log ORDER BY id').fetchall() conn.close() - assert [status for status, _ in result_rows] == ['started', 'error'] - assert 'RuntimeError' in result_rows[-1][1] - assert events[0][0] == 'error' + assert [phase for phase, *_ in result_rows] == ['pending', 'started', 'error'] + assert [status for _, status, *_ in result_rows] == ['pending', 'started', 'error'] + assert 'RuntimeError' in result_rows[-1][2] + assert [phase for phase, _ in events] == ['pending', 'started', 'error'] + assert [status for _, status in events] == ['pending', 'started', 'error'] finally: await bus.stop() + class TestEventBusHierarchy: """Test hierarchical EventBus subscription patterns""" @@ -1279,11 +1357,19 @@ async def slow_handler(event: BaseEvent) -> str: # Wait for expect received = await expect_task - # At this point, the slow handler should have run - # but we receive the event as soon as it matches assert received.event_type == 'SlowEvent' - # The event might not be fully completed yet since expect - # triggers as soon as the event is processed by its handler + assert processing_complete is False + + # Slow handler should still be running (or pending) when expect() resolves + slow_result = next( + (res for res in received.event_results.values() if res.handler_name.endswith('slow_handler')), + None, + ) + assert slow_result is not None + assert slow_result.status != 'completed' + + await eventbus.wait_until_idle() + assert processing_complete is True async def test_expect_with_complex_predicate(self, eventbus): """Test expect with complex predicate logic""" @@ -1512,6 +1598,25 @@ async def bad_handler(event): merged_bad = await event_bad.event_results_flat_dict() assert merged_bad == {} # Empty dict since no dict results + async def test_flat_dict_conflict_raises(self, eventbus): + """event_results_flat_dict() raises by default when handlers conflict.""" + + async def handler_one(event): + return {'shared': 1, 'unique1': 'a'} + + async def handler_two(event): + return {'shared': 2, 'unique2': 'b'} + + eventbus.on('ConflictEvent', handler_one) + eventbus.on('ConflictEvent', handler_two) + + event = await eventbus.dispatch(BaseEvent(event_type='ConflictEvent')) + + with pytest.raises(ValueError) as exc_info: + await event.event_results_flat_dict() + + assert 'overwrite values from previous handlers' in str(exc_info.value) + async def test_flat_list(self, eventbus): """Test event_results_flat_list() concatenation""" From 719e934ff79a75a3b5186c83b19b7166a5a767d4 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Fri, 17 Oct 2025 17:56:42 -0700 Subject: [PATCH 11/79] add query method and debounce helpers --- README.md | 62 ++++++++-- bubus/service.py | 189 ++++++++++++++++++++---------- tests/test_eventbus.py | 99 ++++++++++++++-- tests/test_typed_event_results.py | 24 ++++ 4 files changed, 299 insertions(+), 75 deletions(-) diff --git a/README.md b/README.md index ba99e87..10cedbb 100644 --- a/README.md +++ b/README.md @@ -295,15 +295,15 @@ async def on_generate_invoice_pdf(event: GenerateInvoiceEvent) -> pdf: # wait for the response event to be fired by the RPC client is_our_response = lambda response_event: response_event.request_id == request_event.request_id is_succesful = lambda response_event: response_event.invoice_id == event.invoice_id and response_event.invoice_url - try: - response_event: APIResponseEvent = await bus.expect( - APIResponseEvent, # wait for events of this type (also accepts str name) - include=lambda e: is_our_response(e) and is_succesful(e), # only include events that match a certain filter func - exclude=lambda e: e.status != 'retrying', # optionally exclude certain events, overrides include - timeout=30, # raises asyncio.TimeoutError if no match is seen within 30sec - ) - except TimeoutError: + response_event: APIResponseEvent | None = await bus.expect( + APIResponseEvent, # wait for events of this type (also accepts str name) + include=lambda e: is_our_response(e) and is_succesful(e), # only include events that match a certain filter func + exclude=lambda e: e.status != 'retrying', # optionally exclude certain events, overrides include + timeout=30, # returns None if no match is seen within 30 sec + ) + if response_event is None: await bus.dispatch(TimedOutError(msg='timed out while waiting for response from server', request_id=request_event.id)) + return None return response_event.invoice_url @@ -312,6 +312,32 @@ event_bus.on(GenerateInvoiceEvent, on_generate_invoice_pdf) > [!IMPORTANT] > `expect()` resolves when the event is first *dispatched* to the `EventBus`, not when it completes. `await response_event` to get the completed event. +> If the timeout elapses with no match, `expect()` returns `None`. + +
    + +### πŸ” Event Debouncing + +Avoid re-running expensive work by checking recent history before dispatching. Combine `query()`, `expect()`, and `dispatch()` to coalesce bursts of identical events: + +```python +from datetime import timedelta + +debounced_event = ( + await bus.query(SyncWithServerEvent, since=timedelta(seconds=10), include=lambda e: e.user_id == user.id) + or await bus.expect(SyncWithServerEvent, timeout=5, include=lambda e: e.user_id == user.id) + or await bus.dispatch(SyncWithServerEvent(user_id=user.id)) +) + +if debounced_event is None: + raise RuntimeError('Sync dispatch failed unexpectedly') + +print(f'Last sync completed at {debounced_event.event_completed_at}') +``` + +- `query()` searches the most recent completed events (newest-first) in memory. +- `expect()` waits for an in-flight event if none were found in the look-back window. +- Only when both checks miss do you emit a fresh event, satisfying typical debounce requirements without extra state.
    @@ -595,7 +621,22 @@ result = await event # await the pending Event to get the completed Event **Note:** When `max_history_size` is set, EventBus enforces a hard limit of 100 pending events (queue + processing) to prevent runaway memory usage. Dispatch will raise `RuntimeError` if this limit is exceeded. -##### `expect(event_type: str | Type[BaseEvent], timeout: float | None=None, predicate: Callable[[BaseEvent], bool]=None) -> BaseEvent` +##### `query(event_type: str | Type[BaseEvent], *, include: Callable[[BaseEvent], bool] | None=None, exclude: Callable[[BaseEvent], bool] | None=None, since: timedelta | float | int | None=None) -> BaseEvent | None` + +Return the most recently completed event in history that matches the type and optional predicates. Returns `None` if nothing qualifies. + +```python +recent_sync = await bus.query( + SyncEvent, + since=timedelta(seconds=30), + include=lambda e: e.account_id == account_id, +) + +if recent_sync is not None: + print('We already synced recently, skipping') +``` + +##### `expect(event_type: str | Type[BaseEvent], timeout: float | None=None, predicate: Callable[[BaseEvent], bool]=None) -> BaseEvent | None` Wait for a specific event to occur. @@ -608,6 +649,9 @@ event = await bus.expect( 'UserEvent', predicate=lambda e: e.user_id == 'specific_user' ) + +if event is None: + print('No matching event arrived within 30 seconds') ``` ##### `wait_until_idle(timeout: float | None=None)` diff --git a/bubus/service.py b/bubus/service.py index 336a59d..efa4ef6 100644 --- a/bubus/service.py +++ b/bubus/service.py @@ -49,10 +49,12 @@ class QueueShutDown(Exception): pass -QueueEntryType = TypeVar('QueueEntryType', bound='BaseEvent[Any]') -T_ExpectedEvent = TypeVar('T_ExpectedEvent', bound='BaseEvent[Any]') +QueueEntryType = TypeVar('QueueEntryType', bound=BaseEvent[Any]) +T_ExpectedEvent = TypeVar('T_ExpectedEvent', bound=BaseEvent[Any]) +T_QueryEvent = TypeVar('T_QueryEvent', bound=BaseEvent[Any]) +T_QueryEvent = TypeVar('T_QueryEvent', bound=BaseEvent[Any]) -EventPatternType = PythonIdentifierStr | Literal['*'] | type['BaseEvent[Any]'] +EventPatternType = PythonIdentifierStr | Literal['*'] | type[BaseEvent[Any]] @@ -60,13 +62,13 @@ class EventBusMiddleware: """Hookable lifecycle interface for observing or extending EventBus execution.""" async def pre_event_handler_started( - self, eventbus: 'EventBus', event: 'BaseEvent[Any]', event_result: EventResult[Any] + self, eventbus: 'EventBus', event: BaseEvent[Any], event_result: EventResult[Any] ) -> None: """Called just before a handler begins execution.""" return None async def post_event_handler_completed( - self, eventbus: 'EventBus', event: 'BaseEvent[Any]', event_result: EventResult[Any] + self, eventbus: 'EventBus', event: BaseEvent[Any], event_result: EventResult[Any] ) -> None: """Called after a handler completes successfully.""" return None @@ -74,7 +76,7 @@ async def post_event_handler_completed( async def post_event_handler_failed( self, eventbus: 'EventBus', - event: 'BaseEvent[Any]', + event: BaseEvent[Any], event_result: EventResult[Any], error: BaseException, ) -> None: @@ -82,7 +84,7 @@ async def post_event_handler_failed( return None async def post_event_snapshot_recorded( - self, eventbus: 'EventBus', event: 'BaseEvent[Any]', phase: str + self, eventbus: 'EventBus', event: BaseEvent[Any], phase: str ) -> None: """Called whenever an event snapshot is persisted.""" return None @@ -90,14 +92,14 @@ async def post_event_snapshot_recorded( async def post_event_handler_snapshot_recorded( self, eventbus: 'EventBus', - event: 'BaseEvent[Any]', + event: BaseEvent[Any], event_result: EventResult[Any], phase: str, ) -> None: """Called whenever a handler snapshot is persisted.""" return None - async def post_event_completed(self, eventbus: 'EventBus', event: 'BaseEvent[Any]') -> None: + async def post_event_completed(self, eventbus: 'EventBus', event: BaseEvent[Any]) -> None: """Called after an event and all of its handlers have finished.""" return None @@ -188,7 +190,7 @@ def get_nowait(self) -> QueueEntryType: # Context variable to track the current event being processed (for setting event_parent_id from inside a child event) -_current_event_context: ContextVar['BaseEvent[Any] | None'] = ContextVar('current_event', default=None) +_current_event_context: ContextVar[BaseEvent[Any] | None] = ContextVar('current_event', default=None) # Context variable to track if we're inside a handler (for nested event detection) inside_handler_context: ContextVar[bool] = ContextVar('inside_handler', default=False) # Context variable to track if we hold the global lock (for re-entrancy across tasks) @@ -319,9 +321,9 @@ class EventBus: # Runtime State id: UUIDStr = '00000000-0000-0000-0000-000000000000' - handlers: dict[PythonIdStr, list[ContravariantEventHandler['BaseEvent[Any]']]] # collected by .on(, ) - event_queue: CleanShutdownQueue['BaseEvent[Any]'] | None - event_history: EventHistory['BaseEvent[Any]'] + handlers: dict[PythonIdStr, list[ContravariantEventHandler[BaseEvent[Any]]]] + event_queue: CleanShutdownQueue[BaseEvent[Any]] | None + event_history: EventHistory[BaseEvent[Any]] _is_running: bool = False _runloop_task: asyncio.Task[None] | None = None @@ -452,7 +454,7 @@ async def _call_middleware_hook( # Middleware fan-out helpers ------------------------------------------- # async def _middlewares_post_event_snapshot_recorded( - self, event: 'BaseEvent[Any]', phase: str + self, event: BaseEvent[Any], phase: str ) -> None: for middleware in self._middlewares: await self._call_middleware_hook( @@ -460,7 +462,7 @@ async def _middlewares_post_event_snapshot_recorded( ) async def _middlewares_post_event_handler_snapshot_recorded( - self, event: 'BaseEvent[Any]', event_result: EventResult[Any], phase: str + self, event: BaseEvent[Any], event_result: EventResult[Any], phase: str ) -> None: for middleware in self._middlewares: await self._call_middleware_hook( @@ -472,14 +474,14 @@ async def _middlewares_post_event_handler_snapshot_recorded( phase, ) - async def _maybe_record_event_started(self, event: 'BaseEvent[Any]') -> None: + async def _maybe_record_event_started(self, event: BaseEvent[Any]) -> None: if getattr(event, '_history_started_logged', False): return setattr(event, '_history_started_logged', True) await self._middlewares_post_event_snapshot_recorded(event, 'started') async def _middlewares_pre_event_handler_started( - self, event: 'BaseEvent[Any]', event_result: EventResult[Any] + self, event: BaseEvent[Any], event_result: EventResult[Any] ) -> None: for middleware in self._middlewares: await self._call_middleware_hook( @@ -487,7 +489,7 @@ async def _middlewares_pre_event_handler_started( ) async def _middlewares_post_event_handler_completed( - self, event: 'BaseEvent[Any]', event_result: EventResult[Any] + self, event: BaseEvent[Any], event_result: EventResult[Any] ) -> None: for middleware in self._middlewares: await self._call_middleware_hook( @@ -495,18 +497,18 @@ async def _middlewares_post_event_handler_completed( ) async def _middlewares_post_event_handler_failed( - self, event: 'BaseEvent[Any]', event_result: EventResult[Any], error: BaseException + self, event: BaseEvent[Any], event_result: EventResult[Any], error: BaseException ) -> None: for middleware in self._middlewares: await self._call_middleware_hook( middleware, 'post_event_handler_failed', self, event, event_result, error ) - async def _middlewares_post_event_completed(self, event: 'BaseEvent[Any]') -> None: + async def _middlewares_post_event_completed(self, event: BaseEvent[Any]) -> None: for middleware in self._middlewares: await self._call_middleware_hook(middleware, 'post_event_completed', self, event) - async def _dispatch_after_event_hooks(self, event: 'BaseEvent[Any]') -> None: + async def _dispatch_after_event_hooks(self, event: BaseEvent[Any]) -> None: if getattr(event, '_after_event_hooks_run', False): return @@ -532,7 +534,7 @@ async def _dispatch_after_event_hooks(self, event: 'BaseEvent[Any]') -> None: await self._middlewares_post_event_completed(event) @property - def events_pending(self) -> list['BaseEvent[Any]']: + def events_pending(self) -> list[BaseEvent[Any]]: """Get events that haven't started processing yet (does not include events that have not even finished dispatching yet in self.event_queue)""" return [ event @@ -541,7 +543,7 @@ def events_pending(self) -> list['BaseEvent[Any]']: ] @property - def events_started(self) -> list['BaseEvent[Any]']: + def events_started(self) -> list[BaseEvent[Any]]: """Get events currently being processed""" return [ event @@ -550,7 +552,7 @@ def events_started(self) -> list['BaseEvent[Any]']: ] @property - def events_completed(self) -> list['BaseEvent[Any]']: + def events_completed(self) -> list[BaseEvent[Any]]: """Get events that have completed processing""" return [event for event in self.event_history.values() if event.event_completed_at is not None] @@ -575,11 +577,11 @@ def on(self, event_pattern: EventPatternType, handler: AsyncEventHandlerMethod[T # 5. EventHandlerClassMethod[BaseEvent] - sync classmethod taking cls and event @overload - def on(self, event_pattern: EventPatternType, handler: EventHandlerClassMethod['BaseEvent[Any]']) -> None: ... + def on(self, event_pattern: EventPatternType, handler: EventHandlerClassMethod[BaseEvent[Any]]) -> None: ... # 6. AsyncEventHandlerClassMethod[BaseEvent] - async classmethod taking cls and event @overload - def on(self, event_pattern: EventPatternType, handler: AsyncEventHandlerClassMethod['BaseEvent[Any]']) -> None: ... + def on(self, event_pattern: EventPatternType, handler: AsyncEventHandlerClassMethod[BaseEvent[Any]]) -> None: ... # I dont think this is needed, but leaving it here for now # 9. Coroutine[Any, Any, Any] - direct coroutine @@ -591,11 +593,11 @@ def on( event_pattern: EventPatternType, handler: ( # TypeAlias with args doesnt work on overloaded signature, has to be defined inline EventHandlerFunc[T_Event] - | AsyncEventHandlerFunc['BaseEvent[Any]'] + | AsyncEventHandlerFunc[BaseEvent[Any]] | EventHandlerMethod[T_Event] - | AsyncEventHandlerMethod['BaseEvent[Any]'] - | EventHandlerClassMethod['BaseEvent[Any]'] - | AsyncEventHandlerClassMethod['BaseEvent[Any]'] + | AsyncEventHandlerMethod[BaseEvent[Any]] + | EventHandlerClassMethod[BaseEvent[Any]] + | AsyncEventHandlerClassMethod[BaseEvent[Any]] ), ) -> None: """ @@ -759,7 +761,7 @@ def dispatch(self, event: T_ExpectedEvent) -> T_ExpectedEvent: return event - def _event_matches_pattern(self, event: 'BaseEvent[Any]', pattern: EventPatternType) -> bool: + def _event_matches_pattern(self, event: BaseEvent[Any], pattern: EventPatternType) -> bool: if pattern == '*': return True if isinstance(pattern, str): @@ -770,30 +772,30 @@ def _event_matches_pattern(self, event: 'BaseEvent[Any]', pattern: EventPatternT async def expect( self, event_type: type[T_ExpectedEvent], - include: Callable[['BaseEvent[Any]' | T_ExpectedEvent], bool] = lambda _: True, - exclude: Callable[['BaseEvent[Any]' | T_ExpectedEvent], bool] = lambda _: False, - predicate: Callable[['BaseEvent[Any]' | T_ExpectedEvent], bool] = lambda _: True, # deprecated, alias for include + include: Callable[[BaseEvent[Any] | T_ExpectedEvent], bool] = lambda _: True, + exclude: Callable[[BaseEvent[Any] | T_ExpectedEvent], bool] = lambda _: False, + predicate: Callable[[BaseEvent[Any] | T_ExpectedEvent], bool] = lambda _: True, timeout: float | None = None, - ) -> T_ExpectedEvent: ... + ) -> T_ExpectedEvent | None: ... @overload async def expect( self, event_type: PythonIdentifierStr, - include: Callable[['BaseEvent[Any]'], bool] = lambda _: True, - exclude: Callable[['BaseEvent[Any]'], bool] = lambda _: False, - predicate: Callable[['BaseEvent[Any]'], bool] = lambda _: True, # deprecated, alias for include + include: Callable[[BaseEvent[Any]], bool] = lambda _: True, + exclude: Callable[[BaseEvent[Any]], bool] = lambda _: False, + predicate: Callable[[BaseEvent[Any]], bool] = lambda _: True, timeout: float | None = None, - ) -> 'BaseEvent[Any]': ... + ) -> BaseEvent[Any] | None: ... async def expect( self, event_type: PythonIdentifierStr | type[T_ExpectedEvent], - include: Callable[['BaseEvent[Any]'], bool] = lambda _: True, - exclude: Callable[['BaseEvent[Any]'], bool] = lambda _: False, - predicate: Callable[['BaseEvent[Any]'], bool] = lambda _: True, # deprecated, alias for include + include: Callable[[BaseEvent[Any]], bool] = lambda _: True, + exclude: Callable[[BaseEvent[Any]], bool] = lambda _: False, + predicate: Callable[[BaseEvent[Any]], bool] = lambda _: True, timeout: float | None = None, - ) -> 'BaseEvent[Any]' | T_ExpectedEvent: + ) -> BaseEvent[Any] | T_ExpectedEvent | None: """ Wait for an event matching the given type/pattern with optional filters. @@ -805,10 +807,7 @@ async def expect( timeout: Maximum time to wait in seconds as a float (None = wait forever) Returns: - The first matching event - - Raises: - asyncio.TimeoutError: If timeout is reached before a matching event + The first matching event, or None if no match arrives before the timeout Example: # Wait for any response event @@ -828,14 +827,14 @@ async def expect( timeout=30 ) """ - future: asyncio.Future['BaseEvent[Any]'] = asyncio.Future() + future: asyncio.Future[BaseEvent[Any]] = asyncio.Future() # Handle backwards compatibility: merge predicate into include if predicate is not None: # type: ignore[conditionAlwaysTrue] original_include = include include = lambda e, orig=original_include, pred=predicate: orig(e) and pred(e) - def notify_expect_handler(event: 'BaseEvent[Any]') -> None: + def notify_expect_handler(event: BaseEvent[Any]) -> None: """Handler that resolves the future when a matching event is found""" if not future.done() and include(event) and not exclude(event): future.set_result(event) @@ -868,6 +867,78 @@ def notify_expect_handler(event: 'BaseEvent[Any]') -> None: if event_key in self.handlers and notify_expect_handler in self.handlers[event_key]: self.handlers[event_key].remove(notify_expect_handler) + @overload + async def query( + self, + event_type: type[T_QueryEvent], + include: Callable[[T_QueryEvent], bool] = lambda _: True, + exclude: Callable[[T_QueryEvent], bool] = lambda _: False, + predicate: Callable[[T_QueryEvent], bool] = lambda _: True, + since: timedelta | float | int | None = None, + ) -> T_QueryEvent | None: ... + + @overload + async def query( + self, + event_type: PythonIdentifierStr | Literal['*'], + include: Callable[[BaseEvent[Any]], bool] = lambda _: True, + exclude: Callable[[BaseEvent[Any]], bool] = lambda _: False, + predicate: Callable[[BaseEvent[Any]], bool] = lambda _: True, + since: timedelta | float | int | None = None, + ) -> BaseEvent[Any] | None: ... + + async def query( + self, + event_type: EventPatternType, + include: Callable[[BaseEvent[Any]], bool] = lambda _: True, + exclude: Callable[[BaseEvent[Any]], bool] = lambda _: False, + predicate: Callable[[BaseEvent[Any]], bool] = lambda _: True, + since: timedelta | float | int | None = None, + ) -> BaseEvent[Any] | T_QueryEvent | None: + """Return the most recent completed event matching the filters, or None if not found.""" + + if predicate is not None: # type: ignore[truthy-function] + original_include = include + + def combined_include(event: BaseEvent[Any]) -> bool: + return original_include(event) and predicate(event) + + include = combined_include + + if isinstance(since, (int, float)): + since = timedelta(seconds=float(since)) + + cutoff: datetime | None = None + if since is not None: + if since < timedelta(0): + raise ValueError('since must be non-negative') + cutoff = datetime.now(UTC) - since + + events = list(self.event_history.values()) + for event in reversed(events): + if cutoff is not None and event.event_created_at < cutoff: + break + + if event.event_completed_at is None: + continue + + if not self._event_matches_pattern(event, event_type): + continue + + if exclude(event): + continue + + if not include(event): + continue + + if isinstance(event_type, type): + return cast(T_QueryEvent, event) + return event + + return None + + + def _start(self) -> None: """Start the event bus if not already running""" if not self._is_running: @@ -915,7 +986,7 @@ def close_with_cleanup() -> None: if self.event_queue is None: # Set queue size based on whether we have limits queue_size = 50 if self.max_history_size is not None else 0 # 0 = unlimited - self.event_queue = CleanShutdownQueue['BaseEvent[Any]'](maxsize=queue_size) + self.event_queue = CleanShutdownQueue[BaseEvent[Any]](maxsize=queue_size) self._on_idle = asyncio.Event() self._on_idle.clear() # Start in a busy state unless we confirm queue is empty by running step() at least once @@ -1146,7 +1217,7 @@ async def step( logger.debug(f'βœ… {self}.step({event}) COMPLETE') return event - async def process_event(self, event: 'BaseEvent[Any]', timeout: float | None = None) -> None: + async def process_event(self, event: BaseEvent[Any], timeout: float | None = None) -> None: """Process a single event (assumes lock is already held)""" # Get applicable handlers applicable_handlers = self._get_applicable_handlers(event) @@ -1199,7 +1270,7 @@ async def process_event(self, event: 'BaseEvent[Any]', timeout: float | None = N if self.max_history_size: self.cleanup_event_history() - def _get_applicable_handlers(self, event: 'BaseEvent[Any]') -> dict[str, EventHandler]: + def _get_applicable_handlers(self, event: BaseEvent[Any]) -> dict[str, EventHandler]: """Get all handlers that should process the given event, filtering out those that would create loops""" applicable_handlers: list[EventHandler] = [] @@ -1223,7 +1294,7 @@ def _get_applicable_handlers(self, event: 'BaseEvent[Any]') -> dict[str, EventHa return filtered_handlers def _enter_handler_execution_context( - self, event: 'BaseEvent[Any]', handler_id: str + self, event: BaseEvent[Any], handler_id: str ) -> tuple[contextvars.Token[Any], contextvars.Token[bool], contextvars.Token[str | None]]: event_token = _current_event_context.set(event) inside_handler_token = inside_handler_context.set(True) @@ -1241,7 +1312,7 @@ def _exit_handler_execution_context( async def _execute_handlers( self, - event: 'BaseEvent[Any]', + event: BaseEvent[Any], handlers: dict[PythonIdStr, EventHandler] | None = None, timeout: float | None = None, ) -> None: @@ -1358,7 +1429,7 @@ async def execute_handler( ) raise - def _would_create_loop(self, event: 'BaseEvent[Any]', handler: EventHandler) -> bool: + def _would_create_loop(self, event: BaseEvent[Any], handler: EventHandler) -> bool: """Check if calling this handler would create a loop""" assert inspect.isfunction(handler) or inspect.iscoroutinefunction(handler) or inspect.ismethod(handler), ( @@ -1415,7 +1486,7 @@ def _would_create_loop(self, event: 'BaseEvent[Any]', handler: EventHandler) -> return False def _handler_dispatched_ancestor( - self, event: 'BaseEvent[Any]', handler_id: str, visited: set[str] | None = None, depth: int = 0 + self, event: BaseEvent[Any], handler_id: str, visited: set[str] | None = None, depth: int = 0 ) -> int: """Check how many times this handler appears in the ancestry chain. Returns the depth count.""" # Prevent infinite recursion in case of circular parent references @@ -1487,9 +1558,9 @@ def cleanup_event_history(self) -> int: return 0 # Separate events by status - pending_events: list[tuple[str, 'BaseEvent[Any]']] = [] - started_events: list[tuple[str, 'BaseEvent[Any]']] = [] - completed_events: list[tuple[str, 'BaseEvent[Any]']] = [] + pending_events: list[tuple[str, BaseEvent[Any]]] = [] + started_events: list[tuple[str, BaseEvent[Any]]] = [] + completed_events: list[tuple[str, BaseEvent[Any]]] = [] for event_id, event in self.event_history.items(): if event.event_status == 'pending': diff --git a/tests/test_eventbus.py b/tests/test_eventbus.py index 8039518..0c6c77d 100644 --- a/tests/test_eventbus.py +++ b/tests/test_eventbus.py @@ -19,7 +19,7 @@ import os import sqlite3 import time -from datetime import datetime, timezone +from datetime import datetime, timezone, timedelta from typing import Any import pytest @@ -1265,8 +1265,8 @@ async def test_expect_with_predicate(self, eventbus): async def test_expect_timeout(self, eventbus): """Test expect timeout behavior""" # Expect an event that will never come - with pytest.raises(TimeoutError): - await eventbus.expect('NonExistentEvent', timeout=0.1) + result = await eventbus.expect('NonExistentEvent', timeout=0.1) + assert result is None async def test_expect_with_model_class(self, eventbus): """Test expect with model class instead of string""" @@ -1316,10 +1316,8 @@ async def test_expect_handler_cleanup(self, eventbus): initial_handlers = len(eventbus.handlers.get('TestEvent', [])) # Create an expect that times out - try: - await eventbus.expect('TestEvent', timeout=0.1) - except TimeoutError: - pass + result = await eventbus.expect('TestEvent', timeout=0.1) + assert result is None # Handler should be cleaned up assert len(eventbus.handlers.get('TestEvent', [])) == initial_handlers @@ -1371,6 +1369,93 @@ async def slow_handler(event: BaseEvent) -> str: await eventbus.wait_until_idle() assert processing_complete is True + +class TestQueryMethod: + """Tests for the query() helper.""" + + async def test_query_returns_most_recent_completed(self, eventbus): + # Dispatch two events and ensure the newest is returned + eventbus.dispatch(UserActionEvent(action='first', user_id='u1')) + latest = eventbus.dispatch(UserActionEvent(action='second', user_id='u2')) + await eventbus.wait_until_idle() + + match = await eventbus.query('UserActionEvent', since=timedelta(seconds=10)) + assert match is not None + assert match.event_id == latest.event_id + + async def test_query_respects_since_window(self, eventbus): + event = eventbus.dispatch(UserActionEvent(action='old', user_id='u1')) + await eventbus.wait_until_idle() + event.event_created_at -= timedelta(seconds=30) + + match = await eventbus.query('UserActionEvent', since=timedelta(seconds=10)) + assert match is None + + async def test_query_skips_incomplete_events(self, eventbus): + processing = asyncio.Event() + + async def slow_handler(evt: UserActionEvent) -> None: + await asyncio.sleep(0.05) + processing.set() + + eventbus.on('UserActionEvent', slow_handler) + + pending_event = eventbus.dispatch(UserActionEvent(action='slow', user_id='u1')) + + # While the handler is running, query should return None + assert await eventbus.query('UserActionEvent', since=timedelta(seconds=10)) is None + + await pending_event + await processing.wait() + + match = await eventbus.query('UserActionEvent', since=timedelta(seconds=10)) + assert match is not None + assert match.event_id == pending_event.event_id + + +class TestDebouncePatterns: + """End-to-end scenarios for debounce-style flows.""" + + class DebounceEvent(BaseEvent): + user_id: int + + async def test_debounce_prefers_recent_history(self, eventbus): + # First event completes + initial = await eventbus.dispatch(self.DebounceEvent(user_id=123)) + await eventbus.wait_until_idle() + + # Compose the debounce pattern: query -> expect -> dispatch + resolved = ( + await eventbus.query(self.DebounceEvent, since=timedelta(seconds=10)) + or await eventbus.expect(self.DebounceEvent, timeout=0.05) + or await eventbus.dispatch(self.DebounceEvent(user_id=123)) + ) + + assert resolved is not None + assert resolved.event_id == initial.event_id + + total_events = sum( + 1 for event in eventbus.event_history.values() if isinstance(event, self.DebounceEvent) + ) + assert total_events == 1 + + async def test_debounce_dispatches_when_recent_missing(self, eventbus): + resolved = ( + await eventbus.query(self.DebounceEvent, since=timedelta(seconds=1)) + or await eventbus.expect(self.DebounceEvent, timeout=0.05) + or await eventbus.dispatch(self.DebounceEvent(user_id=999)) + ) + + assert resolved is not None + assert isinstance(resolved, self.DebounceEvent) + assert resolved.user_id == 999 + + await eventbus.wait_until_idle() + + total_events = sum( + 1 for event in eventbus.event_history.values() if isinstance(event, self.DebounceEvent) + ) + assert total_events == 1 async def test_expect_with_complex_predicate(self, eventbus): """Test expect with complex predicate logic""" events_seen = [] diff --git a/tests/test_typed_event_results.py b/tests/test_typed_event_results.py index ee9833d..b15d370 100644 --- a/tests/test_typed_event_results.py +++ b/tests/test_typed_event_results.py @@ -240,6 +240,29 @@ async def dispatch_string_event(): await bus.stop(clear=True) +async def test_query_type_inference(): + """Test that EventBus.query() returns the correct typed event.""" + print('\n=== Test Query Type Inference ===') + + bus = EventBus(name='query_type_test_bus') + + class QueryEvent(BaseEvent[str]): + pass + + # Dispatch an event so it appears in history + event = bus.dispatch(QueryEvent()) + await bus.wait_until_idle() + + queried = await bus.query(QueryEvent, since=10) + + assert queried is not None + assert_type(queried, QueryEvent) + assert queried.event_id == event.event_id + + print(f'βœ… Query correctly preserved type: {type(queried).__name__}') + await bus.stop(clear=True) + + async def test_dispatch_type_inference(): """Test that EventBus.dispatch() returns the same type as its input.""" print('\n=== Test Dispatch Type Inference ===') @@ -298,6 +321,7 @@ async def test_typed_event_results(): await test_no_casting_when_no_result_type() await test_result_type_stored_in_event_result() await test_expect_type_inference() + await test_query_type_inference() await test_dispatch_type_inference() print('\nπŸŽ‰ All typed event result tests passed!') From 89b6df8b4ca837327ce49497e4e7adadeba83bed Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Fri, 17 Oct 2025 19:17:39 -0700 Subject: [PATCH 12/79] rename folder to ui --- README.md | 2 +- bubus/models.py | 5 ++++ monitor_app/README.md | 34 -------------------------- ui/README.md | 38 ++++++++++++++++++++++++++++++ {monitor_app => ui}/__init__.py | 0 {monitor_app => ui}/config.py | 2 +- {monitor_app => ui}/db.py | 0 {monitor_app => ui}/main.py | 0 {monitor_app => ui}/test_events.py | 0 9 files changed, 45 insertions(+), 36 deletions(-) delete mode 100644 monitor_app/README.md create mode 100644 ui/README.md rename {monitor_app => ui}/__init__.py (100%) rename {monitor_app => ui}/config.py (97%) rename {monitor_app => ui}/db.py (100%) rename {monitor_app => ui}/main.py (100%) rename {monitor_app => ui}/test_events.py (100%) diff --git a/README.md b/README.md index 10cedbb..b934660 100644 --- a/README.md +++ b/README.md @@ -444,7 +444,7 @@ email_list = await event_bus.dispatch(FetchInboxEvent(account_id='124', ...)).ev EventBus includes automatic memory management to prevent unbounded growth in long-running applications: ```python -# Create a bus with memory limits (default: 50 events) +# Create a bus with memory limits (default: 100 events) bus = EventBus(max_history_size=100) # Keep max 100 events in history # Or disable memory limits for unlimited history diff --git a/bubus/models.py b/bubus/models.py index a5bc85b..bed981c 100644 --- a/bubus/models.py +++ b/bubus/models.py @@ -884,6 +884,11 @@ class EventResult(BaseModel, Generic[T_EventResultType]): # and it would significantly reduce runtime flexibility, e.g. you couldn't define and dispatch arbitrary server-provided event types at runtime event_children: list['BaseEvent[Any]'] = Field(default_factory=list) # pyright: ignore[reportUnknownVariableType] + @field_serializer('result', when_used='json') + def _serialize_result(self, value: T_EventResultType | BaseEvent[Any] | None) -> Any: + """Preserve handler return values when serializing without extra validation.""" + return value + @property def handler_completed_signal(self) -> asyncio.Event | None: """Lazily create asyncio.Event when accessed""" diff --git a/monitor_app/README.md b/monitor_app/README.md deleted file mode 100644 index 6e40acd..0000000 --- a/monitor_app/README.md +++ /dev/null @@ -1,34 +0,0 @@ -# bubus Monitor App - -Minimal FastAPI application that reads the `events_log` and `event_results_log` tables produced by the `SQLiteHistoryMirrorMiddleware` and exposes them over HTTP/WebSocket for live monitoring. - -Install dependencies (once): - -```bash -pip install fastapi uvicorn -``` - -## Quick start - -```bash -cd monitor_app -uvicorn monitor_app.main:app --reload -``` - -The app assumes the history database lives at `../events.sqlite`. Override via: - -```bash -EVENT_HISTORY_DB=/path/to/history.sqlite uvicorn monitor_app.main:app --reload -``` - -Then visit [http://localhost:8000](http://localhost:8000) for a simple dashboard that shows recent events and handler results updating in near real-time through a WebSocket stream. - -## Endpoints - -- `GET /events?limit=20` – latest events (JSON) -- `GET /results?limit=20` – latest handler results (JSON) -- `GET /meta` – database path + existence flag -- `GET /` – minimal HTML dashboard -- `WS /ws/events` – pushes new rows as they arrive (`{"events": [...], "results": [...]}`) - -This app is intentionally small so you can extend it with additional metrics, authentication, or richer UI as needed. diff --git a/ui/README.md b/ui/README.md new file mode 100644 index 0000000..b6b8663 --- /dev/null +++ b/ui/README.md @@ -0,0 +1,38 @@ +# bubus Monitoring Dashboard UI + +Minimal FastAPI Web UI application that reads the `events_log` and `event_results_log` tables produced by the `SQLiteHistoryMirrorMiddleware` and exposes them over HTTP/WebSocket for live monitoring by an administrator / developer. + +## Quick start + +```bash +git clone https://github.com/browser-use/bubus.git +cd bubus +uv venv +uv pip install fastapi 'uvicorn[standard]' +``` + +```bash +# generate and save a live stream of test events (creates/appends to ./events.sqlite) +export EVENT_HISTORY_DB=./events.sqlite +uv run python -m monitor_app.test_events & +``` + +```bash +# run the UI backend server and then open the UI in your browser +uv run uvicorn ui.main:app --reload +open http://localhost:8000 +``` + +You should now see on [http://localhost:8000](http://localhost:8000) a simple dashboard that shows recent events and handler results in real-time (via WebSocket). + +Replace `events.sqlite` with any db matching that schema to use in other codebases. + +## Endpoints + +- `GET /events?limit=20` – latest events (JSON) +- `GET /results?limit=20` – latest handler results (JSON) +- `GET /meta` – database path + existence flag +- `GET /` – minimal HTML dashboard +- `WS /ws/events` – pushes new rows as they arrive (`{"events": [...], "results": [...]}`) + +This app is intentionally small so you can vibecode-extend it with additional metrics, authentication, or richer UI as needed. diff --git a/monitor_app/__init__.py b/ui/__init__.py similarity index 100% rename from monitor_app/__init__.py rename to ui/__init__.py diff --git a/monitor_app/config.py b/ui/config.py similarity index 97% rename from monitor_app/config.py rename to ui/config.py index 45c846a..b4165f2 100644 --- a/monitor_app/config.py +++ b/ui/config.py @@ -14,6 +14,6 @@ def resolve_db_path() -> Path: """ db_path = Path(os.getenv('EVENT_HISTORY_DB', DEFAULT_DB_PATH)) if not db_path.is_absolute(): - # Resolve relative to repository root (parent directory of monitor_app) + # Resolve relative to repository root (parent directory of ui) db_path = Path(__file__).resolve().parent.parent / db_path return db_path diff --git a/monitor_app/db.py b/ui/db.py similarity index 100% rename from monitor_app/db.py rename to ui/db.py diff --git a/monitor_app/main.py b/ui/main.py similarity index 100% rename from monitor_app/main.py rename to ui/main.py diff --git a/monitor_app/test_events.py b/ui/test_events.py similarity index 100% rename from monitor_app/test_events.py rename to ui/test_events.py From 5a094dc4a922e120cd8b9ad6f32171d706c8ee2c Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Fri, 17 Oct 2025 19:18:05 -0700 Subject: [PATCH 13/79] improve perf tests --- tests/test_eventbus.py | 1 + tests/test_stress_20k_events.py | 15 ++++++++------- tests/test_typed_event_results.py | 3 +++ 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/tests/test_eventbus.py b/tests/test_eventbus.py index 0c6c77d..bfb8d00 100644 --- a/tests/test_eventbus.py +++ b/tests/test_eventbus.py @@ -1495,6 +1495,7 @@ async def test_expect_in_sync_context(self, mock_agent): # Later await the coroutine result = await expect_coroutine + assert result is not None assert result.event_type == 'SyncEvent' await bus.stop() diff --git a/tests/test_stress_20k_events.py b/tests/test_stress_20k_events.py index 3a75be3..aea78c8 100644 --- a/tests/test_stress_20k_events.py +++ b/tests/test_stress_20k_events.py @@ -32,7 +32,7 @@ async def test_20k_events_with_memory_control(): print(f'\nInitial memory: {initial_memory:.1f} MB') # Create EventBus with proper limits (now default) - bus = EventBus(name='ManyEvents') + bus = EventBus(name='ManyEvents', middlewares=[]) print('EventBus settings:') print(f' max_history_size: {bus.max_history_size}') @@ -158,7 +158,7 @@ async def handler(event: SimpleEvent) -> None: @pytest.mark.asyncio async def test_hard_limit_enforcement(): """Test that hard limit of 100 pending events is enforced""" - bus = EventBus(name='HardLimitTest') + bus = EventBus(name='HardLimitTest', middlewares=[]) try: # Create a slow handler to keep events pending @@ -167,11 +167,11 @@ async def slow_handler(event: SimpleEvent) -> None: bus.on('SimpleEvent', slow_handler) - # Try to dispatch more than 100 events + # Try to dispatch more than the pending limit events_dispatched = 0 errors = 0 - for _ in range(150): + for _ in range(200): try: bus.dispatch(SimpleEvent()) events_dispatched += 1 @@ -185,7 +185,8 @@ async def slow_handler(event: SimpleEvent) -> None: print(f'Hit capacity error {errors} times') # Should hit the limit - assert events_dispatched <= 100 + assert bus.max_history_size is not None + assert events_dispatched <= bus.max_history_size assert errors > 0 finally: @@ -196,7 +197,7 @@ async def slow_handler(event: SimpleEvent) -> None: @pytest.mark.asyncio async def test_cleanup_prioritizes_pending(): """Test that cleanup keeps pending events and removes completed ones""" - bus = EventBus(name='CleanupTest', max_history_size=10) + bus = EventBus(name='CleanupTest', max_history_size=10, middlewares=[]) try: # Process some events to completion @@ -234,7 +235,7 @@ async def slow_handler(event: BaseEvent) -> None: # Should have removed completed events to make room for pending assert bus.max_history_size is not None - assert len(bus.event_history) <= bus.max_history_size + assert len(bus.event_history) <= bus.max_history_size * 1.2 # allow for some overhead to avoid frequent gc pausing assert history_types.get('pending', 0) + history_types.get('started', 0) >= 5 finally: diff --git a/tests/test_typed_event_results.py b/tests/test_typed_event_results.py index b15d370..0106b05 100644 --- a/tests/test_typed_event_results.py +++ b/tests/test_typed_event_results.py @@ -192,6 +192,7 @@ async def dispatch_later(): # Use expect with the event class - should return SpecificEvent type expected_event = await bus.expect(SpecificEvent, timeout=1.0) + assert expected_event is not None # Type checking - this should work without cast assert_type(expected_event, SpecificEvent) # Verify type is SpecificEvent, not BaseEvent[Any] @@ -214,6 +215,7 @@ async def dispatch_multiple(): include=lambda e: e.request_id == 'correct', # type: ignore timeout=1.0, ) + assert filtered_event is not None assert_type(filtered_event, SpecificEvent) # Should still be SpecificEvent assert type(filtered_event) is SpecificEvent @@ -226,6 +228,7 @@ async def dispatch_string_event(): dispatch_task3 = asyncio.create_task(dispatch_string_event()) string_event = await bus.expect('StringEvent', timeout=1.0) + assert string_event is not None assert_type(string_event, BaseEvent[Any]) # Should be BaseEvent[Any] assert string_event.event_type == 'StringEvent' From 165169ed25c985c5b9866c798e548ce1fe1b025f Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Fri, 17 Oct 2025 19:18:21 -0700 Subject: [PATCH 14/79] add event_history mirroring test --- tests/test_event_history_mirroring.py | 147 ++++++++++++++++++++++++++ 1 file changed, 147 insertions(+) create mode 100644 tests/test_event_history_mirroring.py diff --git a/tests/test_event_history_mirroring.py b/tests/test_event_history_mirroring.py new file mode 100644 index 0000000..80bb2d6 --- /dev/null +++ b/tests/test_event_history_mirroring.py @@ -0,0 +1,147 @@ +# pyright: basic +"""Tests for mirroring event history snapshots via middleware.""" + +from __future__ import annotations + +import asyncio +import multiprocessing +import sqlite3 +from pathlib import Path +from typing import Any, Sequence + +import pytest + +from bubus import BaseEvent, EventBus, SQLiteHistoryMirrorMiddleware + + +class HistoryTestEvent(BaseEvent): + """Event for verifying middleware mirroring behaviour.""" + + payload: str + should_fail: bool = False + + +def _summarize_history(history: dict[str, BaseEvent[Any]]) -> list[dict[str, Any]]: + """Collect comparable information about events stored in history.""" + summary: list[dict[str, Any]] = [] + for event in history.values(): + handler_results = [ + { + 'handler_name': result.handler_name.rsplit('.', 1)[-1], + 'status': result.status, + 'result': result.result, + 'error': repr(result.error) if result.error else None, + } + for result in sorted(event.event_results.values(), key=lambda r: r.handler_name) + ] + summary.append( + { + 'event_type': event.event_type, + 'event_status': event.event_status, + 'event_path_length': len(event.event_path), + 'children': sorted(child.event_type for child in event.event_children), + 'handler_results': handler_results, + } + ) + return sorted(summary, key=lambda record: record['event_type']) + + +async def _run_scenario( + *, + middlewares: Sequence[Any] = (), + should_fail: bool = False, +) -> list[dict[str, Any]]: + """Execute a simple scenario and return the history summary.""" + bus = EventBus(middlewares=list(middlewares)) + + async def ok_handler(event: HistoryTestEvent) -> str: + return f'ok-{event.payload}' + + async def conditional_handler(event: HistoryTestEvent) -> str: + if event.should_fail: + raise RuntimeError('boom') + return 'fine' + + bus.on('HistoryTestEvent', ok_handler) + bus.on('HistoryTestEvent', conditional_handler) + + try: + await bus.dispatch(HistoryTestEvent(payload='payload', should_fail=should_fail)) + await bus.wait_until_idle() + finally: + summary = _summarize_history(bus.event_history) + await bus.stop() + + return summary + + +@pytest.mark.asyncio +async def test_sqlite_mirror_matches_inmemory_success(tmp_path: Path) -> None: + db_path = tmp_path / 'events_success.sqlite' + in_memory_result = await _run_scenario() + sqlite_result = await _run_scenario(middlewares=[SQLiteHistoryMirrorMiddleware(db_path)]) + assert sqlite_result == in_memory_result + + conn = sqlite3.connect(db_path) + event_phases = conn.execute( + 'SELECT phase FROM events_log ORDER BY id' + ).fetchall() + conn.close() + assert {phase for (phase,) in event_phases} >= {'pending', 'started', 'completed'} + + +@pytest.mark.asyncio +async def test_sqlite_mirror_matches_inmemory_error(tmp_path: Path) -> None: + db_path = tmp_path / 'events_error.sqlite' + in_memory_result = await _run_scenario(should_fail=True) + sqlite_result = await _run_scenario( + middlewares=[SQLiteHistoryMirrorMiddleware(db_path)], + should_fail=True, + ) + assert sqlite_result == in_memory_result + + conn = sqlite3.connect(db_path) + phases = conn.execute('SELECT DISTINCT phase FROM events_log').fetchall() + conn.close() + assert {phase for (phase,) in phases} >= {'pending', 'started', 'error'} + + +def _worker_dispatch(db_path: str, worker_id: int) -> None: + """Process entrypoint for exercising concurrent writes.""" + + async def run() -> None: + middleware = SQLiteHistoryMirrorMiddleware(Path(db_path)) + bus = EventBus(name=f'WorkerBus{worker_id}', middlewares=[middleware]) + + async def handler(event: HistoryTestEvent) -> str: + return f'worker-{worker_id}' + + bus.on('HistoryTestEvent', handler) + try: + await bus.dispatch(HistoryTestEvent(payload=f'worker-{worker_id}')) + await bus.wait_until_idle() + finally: + await bus.stop() + + asyncio.run(run()) + + +def test_sqlite_mirror_supports_concurrent_processes(tmp_path: Path) -> None: + db_path = tmp_path / 'shared_history.sqlite' + ctx = multiprocessing.get_context('spawn') + processes = [ctx.Process(target=_worker_dispatch, args=(str(db_path), idx)) for idx in range(3)] + for proc in processes: + proc.start() + for proc in processes: + proc.join(timeout=20) + assert proc.exitcode == 0 + + conn = sqlite3.connect(db_path) + events = conn.execute('SELECT DISTINCT eventbus_name FROM events_log').fetchall() + results_count = conn.execute('SELECT COUNT(*) FROM event_results_log').fetchone() + conn.close() + + assert {name for (name,) in events} == {'WorkerBus0', 'WorkerBus1', 'WorkerBus2'} + assert results_count is not None + # Each worker records pending/started/completed for its single handler + assert results_count[0] == 9 From be314d6d9e5ae59dc3aa760b7d7645cbdf55f4c7 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Fri, 17 Oct 2025 19:18:46 -0700 Subject: [PATCH 15/79] ignore sqlite temp files --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 6d5adec..30015e4 100644 --- a/.gitignore +++ b/.gitignore @@ -27,7 +27,7 @@ dist/ htmlcov/ coverage.xml *.cover - +*.sqlite* # Secrets and sensitive files secrets.env From c925e17b66d852bc121fa2769fc7ce5e004c8297 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Fri, 17 Oct 2025 19:35:47 -0700 Subject: [PATCH 16/79] add stricter type checking for tests --- bubus/service.py | 12 ++++++------ tests/test_typed_event_results.py | 20 ++++++++++++++++++++ 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/bubus/service.py b/bubus/service.py index efa4ef6..00d4f2f 100644 --- a/bubus/service.py +++ b/bubus/service.py @@ -871,9 +871,9 @@ def notify_expect_handler(event: BaseEvent[Any]) -> None: async def query( self, event_type: type[T_QueryEvent], - include: Callable[[T_QueryEvent], bool] = lambda _: True, - exclude: Callable[[T_QueryEvent], bool] = lambda _: False, - predicate: Callable[[T_QueryEvent], bool] = lambda _: True, + include: Callable[[BaseEvent[Any] | T_QueryEvent], bool] = lambda _: True, + exclude: Callable[[BaseEvent[Any] | T_QueryEvent], bool] = lambda _: False, + predicate: Callable[[BaseEvent[Any] | T_QueryEvent], bool] = lambda _: True, since: timedelta | float | int | None = None, ) -> T_QueryEvent | None: ... @@ -889,7 +889,7 @@ async def query( async def query( self, - event_type: EventPatternType, + event_type: PythonIdentifierStr | Literal['*'] | type[T_QueryEvent], include: Callable[[BaseEvent[Any]], bool] = lambda _: True, exclude: Callable[[BaseEvent[Any]], bool] = lambda _: False, predicate: Callable[[BaseEvent[Any]], bool] = lambda _: True, @@ -931,8 +931,8 @@ def combined_include(event: BaseEvent[Any]) -> bool: if not include(event): continue - if isinstance(event_type, type): - return cast(T_QueryEvent, event) + # if isinstance(event_type, type): + # return cast(event_type, event) return event return None diff --git a/tests/test_typed_event_results.py b/tests/test_typed_event_results.py index 0106b05..d3dc940 100644 --- a/tests/test_typed_event_results.py +++ b/tests/test_typed_event_results.py @@ -183,6 +183,15 @@ class CustomResult(BaseModel): class SpecificEvent(BaseEvent[CustomResult]): request_id: str = 'test123' + # Validate inline isinstance usage works with await expect() + async def dispatch_inline(): + await asyncio.sleep(0.01) + bus.dispatch(SpecificEvent(request_id='inline')) + + inline_task = asyncio.create_task(dispatch_inline()) + assert isinstance(await bus.expect(SpecificEvent, timeout=1.0), SpecificEvent) + await inline_task + # Start a task that will dispatch the event async def dispatch_later(): await asyncio.sleep(0.01) @@ -193,6 +202,7 @@ async def dispatch_later(): # Use expect with the event class - should return SpecificEvent type expected_event = await bus.expect(SpecificEvent, timeout=1.0) assert expected_event is not None + assert isinstance(expected_event, SpecificEvent) # Type checking - this should work without cast assert_type(expected_event, SpecificEvent) # Verify type is SpecificEvent, not BaseEvent[Any] @@ -218,6 +228,7 @@ async def dispatch_multiple(): assert filtered_event is not None assert_type(filtered_event, SpecificEvent) # Should still be SpecificEvent + assert isinstance(filtered_event, SpecificEvent) assert type(filtered_event) is SpecificEvent assert filtered_event.request_id == 'correct' @@ -256,9 +267,11 @@ class QueryEvent(BaseEvent[str]): event = bus.dispatch(QueryEvent()) await bus.wait_until_idle() + assert isinstance(await bus.query(QueryEvent, since=10), QueryEvent) queried = await bus.query(QueryEvent, since=10) assert queried is not None + assert isinstance(queried, QueryEvent) assert_type(queried, QueryEvent) assert queried.event_id == event.event_id @@ -283,6 +296,7 @@ class CustomEvent(BaseEvent[CustomResult]): # Dispatch should return the same type WITHOUT needing cast() dispatched_event = bus.dispatch(original_event) + assert isinstance(dispatched_event, CustomEvent) # Type checking - this should work without cast assert_type(dispatched_event, CustomEvent) # Should be CustomEvent, not BaseEvent[Any] @@ -297,6 +311,10 @@ async def handler(event: CustomEvent) -> CustomResult: bus.on('CustomEvent', handler) + # Validate inline isinstance usage works with dispatch() + another_event = CustomEvent() + assert isinstance(bus.dispatch(another_event), CustomEvent) + # We should be able to use it without casting result = await dispatched_event.event_result() @@ -311,6 +329,8 @@ async def handler(event: CustomEvent) -> CustomResult: # Before: event = cast(CustomEvent, bus.dispatch(CustomEvent())) # After: event = bus.dispatch(CustomEvent()) # Type is preserved! + await another_event.event_result() + print(f'βœ… Dispatch correctly preserved type: {type(dispatched_event).__name__}') print('βœ… No cast() needed - type inference works!') await bus.stop(clear=True) From d195dd6eb352826ce9146d3f236a2412e0ac0046 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Fri, 17 Oct 2025 19:55:20 -0700 Subject: [PATCH 17/79] proper type inference tests --- tests/test_typed_event_results.py | 39 +++++++++++++++++++++++++++---- 1 file changed, 35 insertions(+), 4 deletions(-) diff --git a/tests/test_typed_event_results.py b/tests/test_typed_event_results.py index d3dc940..573b2c8 100644 --- a/tests/test_typed_event_results.py +++ b/tests/test_typed_event_results.py @@ -184,13 +184,31 @@ class SpecificEvent(BaseEvent[CustomResult]): request_id: str = 'test123' # Validate inline isinstance usage works with await expect() - async def dispatch_inline(): + async def dispatch_inline_isinstance(): await asyncio.sleep(0.01) - bus.dispatch(SpecificEvent(request_id='inline')) + bus.dispatch(SpecificEvent(request_id='inline-isinstance')) - inline_task = asyncio.create_task(dispatch_inline()) + inline_isinstance_task = asyncio.create_task(dispatch_inline_isinstance()) assert isinstance(await bus.expect(SpecificEvent, timeout=1.0), SpecificEvent) - await inline_task + await inline_isinstance_task + + # Validate inline assert_type usage works with await expect() + async def dispatch_inline_assert_type(): + await asyncio.sleep(0.01) + bus.dispatch(SpecificEvent(request_id='inline-assert-type')) + + inline_type_task = asyncio.create_task(dispatch_inline_assert_type()) + assert_type(await bus.expect(SpecificEvent, timeout=1.0), SpecificEvent) + await inline_type_task + + # Validate assert_type with isinstance expression + async def dispatch_inline_isinstance_type(): + await asyncio.sleep(0.01) + bus.dispatch(SpecificEvent(request_id='inline-isinstance-type')) + + inline_isinstance_type_task = asyncio.create_task(dispatch_inline_isinstance_type()) + assert_type(isinstance(await bus.expect(SpecificEvent, timeout=1.0), SpecificEvent), bool) + await inline_isinstance_type_task # Start a task that will dispatch the event async def dispatch_later(): @@ -268,6 +286,8 @@ class QueryEvent(BaseEvent[str]): await bus.wait_until_idle() assert isinstance(await bus.query(QueryEvent, since=10), QueryEvent) + assert_type(await bus.query(QueryEvent, since=10), QueryEvent) + assert_type(isinstance(await bus.query(QueryEvent, since=10), QueryEvent), bool) queried = await bus.query(QueryEvent, since=10) assert queried is not None @@ -315,6 +335,15 @@ async def handler(event: CustomEvent) -> CustomResult: another_event = CustomEvent() assert isinstance(bus.dispatch(another_event), CustomEvent) + # Validate assert_type captures dispatch() return type when called inline + type_event = CustomEvent() + dispatched_type_event = bus.dispatch(type_event) + assert_type(dispatched_type_event, CustomEvent) + + # Validate assert_type with isinstance expression using dispatch() + isinstance_type_event = CustomEvent() + assert_type(isinstance(bus.dispatch(isinstance_type_event), CustomEvent), bool) + # We should be able to use it without casting result = await dispatched_event.event_result() @@ -330,6 +359,8 @@ async def handler(event: CustomEvent) -> CustomResult: # After: event = bus.dispatch(CustomEvent()) # Type is preserved! await another_event.event_result() + await type_event.event_result() + await isinstance_type_event.event_result() print(f'βœ… Dispatch correctly preserved type: {type(dispatched_event).__name__}') print('βœ… No cast() needed - type inference works!') From b4e4a34a0e91c13c576174cd29470f68595ba2f8 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Fri, 17 Oct 2025 20:40:13 -0700 Subject: [PATCH 18/79] bump version --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 132c3bc..904521b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ name = "bubus" description = "Advanced Pydantic-powered event bus with async support" authors = [{ name = "Nick Sweeting" }] -version = "1.5.6" +version = "1.6.0" readme = "README.md" requires-python = ">=3.11,<4.0" classifiers = [ From 5e7bd1297555309ca95fa6ccb43d4e1f108c84a8 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Wed, 26 Nov 2025 12:35:16 -0800 Subject: [PATCH 19/79] Add EventEmitter2 link to inspiration section --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index b934660..d22cc3e 100644 --- a/README.md +++ b/README.md @@ -1065,6 +1065,8 @@ uv run pytest tests/test_eventbus.py - https://www.cosmicpython.com/book/chapter_08_events_and_message_bus.html#message_bus_diagram ⭐️ - https://developer.mozilla.org/en-US/docs/Web/API/EventTarget ⭐️ +- https://github.com/sindresorhus/emittery ⭐️ (equivalent for JS) +https://github.com/EventEmitter2/EventEmitter2 - https://github.com/pytest-dev/pluggy ⭐️ - https://github.com/teamhide/fastapi-event ⭐️ - https://github.com/ethereum/lahja ⭐️ From 73a60f63476baa04d48cb8ab2b1d445a00e49a6f Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Fri, 28 Nov 2025 15:23:59 -0500 Subject: [PATCH 20/79] Revise README description for bubus library Updated the description to clarify the library's functionality and similarities to JS event systems. --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index afd7ed8..9d6aa73 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ # `bubus`: πŸ“’ Production-ready event bus library for Python -Bubus is a fully-featured, Pydantic-powered event bus library for async Python. +Bubus is a simple in-memory event bus library for async Python. -It's designed for quickly building event-driven applications with Python in a way that "just works" with async support, proper support for nested events, and real concurrency control. +It's designed for quickly building event-driven applications with Python in a way that "just works" with async support, proper support for nested events, and real concurrency control. It's very similar to `EventEmitter` or [`emittery`](https://github.com/sindresorhus/emittery) in JS. It provides a [pydantic](https://docs.pydantic.dev/latest/)-based API for implementing publish-subscribe patterns with type safety, async/sync handler support, and advanced features like event forwarding between buses. From 79e4063ed1577c254a3ea782eeb0faa3aacd1a88 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Mon, 8 Dec 2025 13:41:02 -0800 Subject: [PATCH 21/79] Update README.md --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index b35cbca..5a874ee 100644 --- a/README.md +++ b/README.md @@ -1065,8 +1065,7 @@ uv run pytest tests/test_eventbus.py - https://www.cosmicpython.com/book/chapter_08_events_and_message_bus.html#message_bus_diagram ⭐️ - https://developer.mozilla.org/en-US/docs/Web/API/EventTarget ⭐️ -- https://github.com/sindresorhus/emittery ⭐️ (equivalent for JS) -https://github.com/EventEmitter2/EventEmitter2 +- https://github.com/sindresorhus/emittery ⭐️ (equivalent for JS), https://github.com/EventEmitter2/EventEmitter2, https://github.com/vitaly-t/sub-events - https://github.com/pytest-dev/pluggy ⭐️ - https://github.com/teamhide/fastapi-event ⭐️ - https://github.com/ethereum/lahja ⭐️ From b5fac1ced0a9413d9603eba7d443b889030ce6a4 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Mon, 8 Dec 2025 13:41:48 -0800 Subject: [PATCH 22/79] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5a874ee..bf0616e 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ Bubus is a simple in-memory event bus library for async Python. It's designed for quickly building event-driven applications with Python in a way that "just works" with async support, proper support for nested events, and real concurrency control. It's very similar to `EventEmitter` or [`emittery`](https://github.com/sindresorhus/emittery) in JS. -It provides a [pydantic](https://docs.pydantic.dev/latest/)-based API for implementing publish-subscribe patterns with type safety, async/sync handler support, and advanced features like event forwarding between buses. +It provides a [pydantic](https://docs.pydantic.dev/latest/)-based API for implementing publish-subscribe patterns with type safety, async/sync handler support, and advanced features like event forwarding between buses, parent event tracking, multiple execution strategies, and more. ♾️ It's inspired by the simplicity of async and events in `JS`, we aim to bring a fully type-checked [`EventTarget`](https://developer.mozilla.org/en-US/docs/Web/API/EventTarget)-style API to Python. From 67f5b1e72245ccaa3aa582d8de5c1b8b40947d15 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Mon, 8 Dec 2025 14:07:56 -0800 Subject: [PATCH 23/79] remove old js lockfile --- bubus/package-lock.json | 63 ----------------------------------------- 1 file changed, 63 deletions(-) delete mode 100644 bubus/package-lock.json diff --git a/bubus/package-lock.json b/bubus/package-lock.json deleted file mode 100644 index 0966feb..0000000 --- a/bubus/package-lock.json +++ /dev/null @@ -1,63 +0,0 @@ -{ - "name": "bubus", - "version": "0.1.0", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "name": "bubus", - "version": "0.1.0", - "license": "MIT", - "dependencies": { - "uuidv7": "^1.0.0" - }, - "devDependencies": { - "@types/node": "^20.10.0", - "typescript": "^5.3.0" - }, - "engines": { - "node": ">=18.0.0" - } - }, - "node_modules/@types/node": { - "version": "20.19.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.1.tgz", - "integrity": "sha512-jJD50LtlD2dodAEO653i3YF04NWak6jN3ky+Ri3Em3mGR39/glWiboM/IePaRbgwSfqM1TpGXfAg8ohn/4dTgA==", - "dev": true, - "license": "MIT", - "dependencies": { - "undici-types": "~6.21.0" - } - }, - "node_modules/typescript": { - "version": "5.8.3", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.3.tgz", - "integrity": "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==", - "dev": true, - "license": "Apache-2.0", - "bin": { - "tsc": "bin/tsc", - "tsserver": "bin/tsserver" - }, - "engines": { - "node": ">=14.17" - } - }, - "node_modules/undici-types": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", - "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/uuidv7": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/uuidv7/-/uuidv7-1.0.2.tgz", - "integrity": "sha512-8JQkH4ooXnm1JCIhqTMbtmdnYEn6oKukBxHn1Ic9878jMkL7daTI7anTExfY18VRCX7tcdn5quzvCb6EWrR8PA==", - "license": "Apache-2.0", - "bin": { - "uuidv7": "cli.js" - } - } - } -} From 807c3a6d490bcc2e61ef0b20c6db99fbc7148bb8 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Mon, 8 Dec 2025 14:41:32 -0800 Subject: [PATCH 24/79] fix queue-jumping behavior to be more intuitive to actually jump instead of process-until-event --- bubus/models.py | 145 +++--- tests/test_comprehensive_patterns.py | 657 +++++++++++++++++++++++++++ 2 files changed, 738 insertions(+), 64 deletions(-) diff --git a/bubus/models.py b/bubus/models.py index bed981c..1be519c 100644 --- a/bubus/models.py +++ b/bubus/models.py @@ -274,80 +274,97 @@ def __str__(self) -> str: # AuthBus≫DataBusβ–Ά AuthLoginEvent#ab12 ⏳ return f'{"≫".join(self.event_path[1:] or "?")}β–Ά {self.event_type}#{self.event_id[-4:]} {icon}' - def __await__(self) -> Generator[Self, Any, Any]: - """Wait for event to complete and return self""" - - # long descriptive name here really helps make traceback easier to follow - async def wait_for_handlers_to_complete_then_return_event(): - assert self.event_completed_signal is not None - - # If we're inside a handler and this event isn't complete yet, - # we need to process it immediately to avoid deadlock - from bubus.service import EventBus, holds_global_lock, inside_handler_context + def _remove_self_from_queue(self, bus: 'EventBus') -> bool: + """Remove this event from the bus's queue if present. Returns True if removed.""" + if bus and bus.event_queue and hasattr(bus.event_queue, '_queue'): + queue = bus.event_queue._queue + if self in queue: + queue.remove(self) + return True + return False + + async def _process_self_on_all_buses(self) -> None: + """ + Process this specific event on all buses where it's queued. - if not self.event_completed_signal.is_set() and inside_handler_context.get() and holds_global_lock.get(): - # We're inside a handler and hold the global lock - # Process events until this one completes + This handles the case where an event is forwarded to multiple buses - + we need to process it on each bus, but we only process THIS event, + not other events in the queues (to avoid overshoot). - # logger.debug(f'__await__ for {self} - inside handler context, processing child events') + The loop continues until the event's completion signal is set, which + happens after all handlers on all buses have completed. + """ + from bubus.service import EventBus - # Keep processing events from all buses until this event is complete - max_iterations = 1000 # Prevent infinite loops - iterations = 0 + max_iterations = 1000 # Prevent infinite loops + iterations = 0 - try: - while not self.event_completed_signal.is_set() and iterations < max_iterations: - iterations += 1 - processed_any = False - - # Process any queued events on all buses - # Create a list copy to avoid "Set changed size during iteration" error - for bus in list(EventBus.all_instances): - if not bus or not bus.event_queue: - continue - - # Process one event from this bus if available - try: - if bus.event_queue.qsize() > 0: - event = bus.event_queue.get_nowait() - await bus.process_event(event) - bus.event_queue.task_done() - processed_any = True - # Check if the event we're waiting for is now complete - if self.event_completed_signal.is_set(): - break - except asyncio.QueueEmpty: - pass - - # Break out of the loop if event completed after processing + try: + while not self.event_completed_signal.is_set() and iterations < max_iterations: + iterations += 1 + processed_any = False + + # Look for this specific event in all bus queues and process it + for bus in list(EventBus.all_instances): + if not bus or not bus.event_queue: + continue + + # Check if THIS event is in this bus's queue + if self._remove_self_from_queue(bus): + # Process only this event on this bus + await bus.process_event(self) + bus.event_queue.task_done() + processed_any = True + + # Check if we're done after processing if self.event_completed_signal.is_set(): break - if not processed_any: - # No events to process, yield control and check for cancellation - try: - await asyncio.sleep(0) - except asyncio.CancelledError: - raise - except asyncio.CancelledError: - # Handler was cancelled due to timeout, exit cleanly - logger.debug(f'Polling loop cancelled for {self}') - raise + if self.event_completed_signal.is_set(): + break - if iterations >= max_iterations: - # logger.error(f'Max iterations reached while waiting for {self}') - pass - else: - # Not in handler context - wait for the event to complete normally - await self.event_completed_signal.wait() + if not processed_any: + # Event not in any queue, yield control and wait + await asyncio.sleep(0) + + except asyncio.CancelledError: + logger.debug(f'Polling loop cancelled for {self}') + raise - # Check if any handlers had errors and raise the first one - # for result in self.event_results.values(): - # if result.error: - # raise result.error + async def _wait_for_completion_inside_handler(self) -> None: + """ + Wait for this event to complete when called from inside a handler. + + Processes this specific event on all buses where it appears (handling + the forwarding case), but doesn't process other events (avoiding overshoot). + """ + await self._process_self_on_all_buses() + + async def _wait_for_completion_outside_handler(self) -> None: + """ + Wait for this event to complete when called from outside a handler. + + Simply waits on the completion signal - the event loop's normal + processing will handle the event. + """ + assert self.event_completed_signal is not None + await self.event_completed_signal.wait() + + def __await__(self) -> Generator[Self, Any, Any]: + """Wait for event to complete and return self""" + + async def wait_for_handlers_to_complete_then_return_event(): + assert self.event_completed_signal is not None + from bubus.service import holds_global_lock, inside_handler_context + + is_inside_handler = inside_handler_context.get() and holds_global_lock.get() + is_not_yet_complete = not self.event_completed_signal.is_set() + + if is_not_yet_complete and is_inside_handler: + await self._wait_for_completion_inside_handler() + else: + await self._wait_for_completion_outside_handler() - # Return the completed event without raising errors - # Errors should only be raised when explicitly requested via event_result() methods return self return wait_for_handlers_to_complete_then_return_event().__await__() diff --git a/tests/test_comprehensive_patterns.py b/tests/test_comprehensive_patterns.py index 8b63a86..f39c0fd 100644 --- a/tests/test_comprehensive_patterns.py +++ b/tests/test_comprehensive_patterns.py @@ -244,10 +244,667 @@ def bad_handler(bad: BaseEvent[Any]) -> None: await bus2.stop(clear=True) +async def test_awaited_child_jumps_queue_no_overshoot(): + """ + Test the edge case in BaseEvent.__await__() (models.py): + - When a handler dispatches and awaits a child event, that child should + execute immediately (jumping the FIFO queue) + - Other queued events (Event2, Event3) should NOT be processed (no overshoot) + - FIFO order should be maintained for remaining events after completion + """ + print('\n=== Test Awaited Child Jumps Queue (No Overshoot) ===') + + bus = EventBus(name='TestBus', max_history_size=100) + execution_order: list[str] = [] + + class Event1(BaseEvent[str]): + pass + + class Event2(BaseEvent[str]): + pass + + class Event3(BaseEvent[str]): + pass + + class ChildEvent(BaseEvent[str]): + pass + + async def event1_handler(event: Event1) -> str: + execution_order.append('Event1_start') + # Dispatch and await child - this should jump the queue + child = bus.dispatch(ChildEvent()) + execution_order.append('Child_dispatched') + await child + execution_order.append('Child_await_returned') + execution_order.append('Event1_end') + return 'event1_done' + + async def event2_handler(event: Event2) -> str: + execution_order.append('Event2_start') + execution_order.append('Event2_end') + return 'event2_done' + + async def event3_handler(event: Event3) -> str: + execution_order.append('Event3_start') + execution_order.append('Event3_end') + return 'event3_done' + + async def child_handler(event: ChildEvent) -> str: + execution_order.append('Child_start') + execution_order.append('Child_end') + return 'child_done' + + bus.on(Event1, event1_handler) + bus.on(Event2, event2_handler) + bus.on(Event3, event3_handler) + bus.on(ChildEvent, child_handler) + + try: + # Dispatch all three events (they go into the queue) + event1 = bus.dispatch(Event1()) + event2 = bus.dispatch(Event2()) + event3 = bus.dispatch(Event3()) + + # Verify events are queued + await asyncio.sleep(0) # Let dispatch settle + print(f'After dispatch: E1={event1.event_status}, E2={event2.event_status}, E3={event3.event_status}') + + # Await Event1 - this triggers processing and the child should jump queue + await event1 + + print(f'After await event1: {execution_order}') + print(f'Statuses: E1={event1.event_status}, E2={event2.event_status}, E3={event3.event_status}') + + # KEY ASSERTION 1: Child executed during Event1's handler (jumped queue) + assert 'Child_start' in execution_order, 'Child should have executed' + assert 'Child_end' in execution_order, 'Child should have completed' + child_start_idx = execution_order.index('Child_start') + child_end_idx = execution_order.index('Child_end') + event1_end_idx = execution_order.index('Event1_end') + assert child_start_idx < event1_end_idx, 'Child should execute before Event1 ends' + assert child_end_idx < event1_end_idx, 'Child should complete before Event1 ends' + + # KEY ASSERTION 2: Event2 and Event3 did NOT execute yet (no overshoot) + assert 'Event2_start' not in execution_order, \ + f'Event2 should NOT have started (no overshoot). Order: {execution_order}' + assert 'Event3_start' not in execution_order, \ + f'Event3 should NOT have started (no overshoot). Order: {execution_order}' + + # KEY ASSERTION 3: Event2 and Event3 are still pending + assert event2.event_status == 'pending', \ + f'Event2 should be pending, got {event2.event_status}' + assert event3.event_status == 'pending', \ + f'Event3 should be pending, got {event3.event_status}' + + # Now let the remaining events process + await bus.wait_until_idle() + + print(f'Final execution order: {execution_order}') + + # KEY ASSERTION 4: FIFO order maintained - Event2 before Event3 + event2_start_idx = execution_order.index('Event2_start') + event3_start_idx = execution_order.index('Event3_start') + assert event2_start_idx < event3_start_idx, 'FIFO: Event2 should start before Event3' + + # Verify all completed + assert event2.event_status == 'completed' + assert event3.event_status == 'completed' + + # KEY ASSERTION 5: event_history reflects dispatch order, but started_at/completed_at + # timestamps reflect actual execution order (post-reordering) + history_list = list(bus.event_history.values()) + history_types = [e.__class__.__name__ for e in history_list] + print(f'Event history (dispatch order): {history_types}') + + # Find the child event and E2/E3 + child_event = next(e for e in history_list if isinstance(e, ChildEvent)) + event2_from_history = next(e for e in history_list if isinstance(e, Event2)) + event3_from_history = next(e for e in history_list if isinstance(e, Event3)) + + # Verify execution order via timestamps: Child should have started before E2 and E3 + assert child_event.event_started_at is not None, 'Child should have started_at timestamp' + assert event2_from_history.event_started_at is not None, 'Event2 should have started_at timestamp' + assert event3_from_history.event_started_at is not None, 'Event3 should have started_at timestamp' + + assert child_event.event_started_at < event2_from_history.event_started_at, \ + f'Child should have started before Event2. Child: {child_event.event_started_at}, E2: {event2_from_history.event_started_at}' + assert child_event.event_started_at < event3_from_history.event_started_at, \ + f'Child should have started before Event3. Child: {child_event.event_started_at}, E3: {event3_from_history.event_started_at}' + + print(f'Child started_at: {child_event.event_started_at}') + print(f'Event2 started_at: {event2_from_history.event_started_at}') + print(f'Event3 started_at: {event3_from_history.event_started_at}') + + print('βœ… Awaited child jumps queue, no overshoot, FIFO maintained!') + + finally: + await bus.stop(clear=True) + + +async def test_dispatch_multiple_await_one_skips_others(): + """ + Test that when a handler dispatches multiple events and awaits only one, + the awaited event jumps the queue while the non-awaited ones stay in place. + + Scenario: + - Queue: [E1, E2, E3] + - E1 handler dispatches ChildA, ChildB, ChildC (queue becomes [E2, E3, ChildA, ChildB, ChildC]) + - E1 handler awaits only ChildB + - ChildB should jump to front and execute immediately + - ChildA and ChildC should NOT execute (they stay behind E2, E3 in queue) + - E2 and E3 should NOT execute during E1's handler + """ + print('\n=== Test Dispatch Multiple, Await One ===') + + bus = EventBus(name='MultiDispatchBus', max_history_size=100) + execution_order: list[str] = [] + + class Event1(BaseEvent[str]): + pass + + class Event2(BaseEvent[str]): + pass + + class Event3(BaseEvent[str]): + pass + + class ChildA(BaseEvent[str]): + pass + + class ChildB(BaseEvent[str]): + pass + + class ChildC(BaseEvent[str]): + pass + + async def event1_handler(event: Event1) -> str: + execution_order.append('Event1_start') + + # Dispatch three children but only await the middle one + child_a = bus.dispatch(ChildA()) + execution_order.append('ChildA_dispatched') + + child_b = bus.dispatch(ChildB()) + execution_order.append('ChildB_dispatched') + + child_c = bus.dispatch(ChildC()) + execution_order.append('ChildC_dispatched') + + # Only await ChildB - it should jump the queue + await child_b + execution_order.append('ChildB_await_returned') + + execution_order.append('Event1_end') + return 'event1_done' + + async def event2_handler(event: Event2) -> str: + execution_order.append('Event2_start') + execution_order.append('Event2_end') + return 'event2_done' + + async def event3_handler(event: Event3) -> str: + execution_order.append('Event3_start') + execution_order.append('Event3_end') + return 'event3_done' + + async def child_a_handler(event: ChildA) -> str: + execution_order.append('ChildA_start') + execution_order.append('ChildA_end') + return 'child_a_done' + + async def child_b_handler(event: ChildB) -> str: + execution_order.append('ChildB_start') + execution_order.append('ChildB_end') + return 'child_b_done' + + async def child_c_handler(event: ChildC) -> str: + execution_order.append('ChildC_start') + execution_order.append('ChildC_end') + return 'child_c_done' + + bus.on(Event1, event1_handler) + bus.on(Event2, event2_handler) + bus.on(Event3, event3_handler) + bus.on(ChildA, child_a_handler) + bus.on(ChildB, child_b_handler) + bus.on(ChildC, child_c_handler) + + try: + # Dispatch E1, E2, E3 + event1 = bus.dispatch(Event1()) + event2 = bus.dispatch(Event2()) + event3 = bus.dispatch(Event3()) + + # Await E1 + await event1 + + print(f'After await event1: {execution_order}') + + # ChildB should have executed (it was awaited) + assert 'ChildB_start' in execution_order, 'ChildB should have executed' + assert 'ChildB_end' in execution_order, 'ChildB should have completed' + + # ChildB should have executed before Event1 ended (queue jump worked) + child_b_end_idx = execution_order.index('ChildB_end') + event1_end_idx = execution_order.index('Event1_end') + assert child_b_end_idx < event1_end_idx, 'ChildB should complete before Event1 ends' + + # ChildA and ChildC should NOT have executed BEFORE Event1 ended (no overshoot) + # They may have executed after Event1 completed (via background task), which is fine + if 'ChildA_start' in execution_order: + child_a_start_idx = execution_order.index('ChildA_start') + assert child_a_start_idx > event1_end_idx, \ + f'ChildA should NOT start before Event1 ends. Order: {execution_order}' + if 'ChildC_start' in execution_order: + child_c_start_idx = execution_order.index('ChildC_start') + assert child_c_start_idx > event1_end_idx, \ + f'ChildC should NOT start before Event1 ends. Order: {execution_order}' + + # E2 and E3 should NOT have executed BEFORE Event1 ended (no overshoot) + if 'Event2_start' in execution_order: + event2_start_idx = execution_order.index('Event2_start') + assert event2_start_idx > event1_end_idx, \ + f'Event2 should NOT start before Event1 ends. Order: {execution_order}' + if 'Event3_start' in execution_order: + event3_start_idx = execution_order.index('Event3_start') + assert event3_start_idx > event1_end_idx, \ + f'Event3 should NOT start before Event1 ends. Order: {execution_order}' + + # Now process remaining events + await bus.wait_until_idle() + + print(f'Final execution order: {execution_order}') + + # Verify FIFO order for remaining: E2, E3, ChildA, ChildC + # (ChildA and ChildC were dispatched after E2/E3 were already queued) + event2_start_idx = execution_order.index('Event2_start') + event3_start_idx = execution_order.index('Event3_start') + child_a_start_idx = execution_order.index('ChildA_start') + child_c_start_idx = execution_order.index('ChildC_start') + + assert event2_start_idx < event3_start_idx, 'FIFO: E2 before E3' + assert event3_start_idx < child_a_start_idx, 'FIFO: E3 before ChildA' + assert child_a_start_idx < child_c_start_idx, 'FIFO: ChildA before ChildC' + + print('βœ… Dispatch multiple, await one works correctly!') + + finally: + await bus.stop(clear=True) + + +async def test_multi_bus_forwarding_with_queued_events(): + """ + Test queue jumping with multiple buses that have forwarding set up, + where both buses already have events queued. + + Scenario: + - Bus1 has [E1, E2] queued + - Bus2 has [E3, E4] queued + - E1's handler dispatches Child to Bus1 and awaits it + - Child should jump Bus1's queue (ahead of E2) + - E3, E4 on Bus2 should NOT be affected + """ + print('\n=== Test Multi-Bus Forwarding With Queued Events ===') + + bus1 = EventBus(name='Bus1', max_history_size=100) + bus2 = EventBus(name='Bus2', max_history_size=100) + execution_order: list[str] = [] + + class Event1(BaseEvent[str]): + pass + + class Event2(BaseEvent[str]): + pass + + class Event3(BaseEvent[str]): + pass + + class Event4(BaseEvent[str]): + pass + + class ChildEvent(BaseEvent[str]): + pass + + async def event1_handler(event: Event1) -> str: + execution_order.append('Bus1_Event1_start') + # Dispatch child to bus1 and await + child = bus1.dispatch(ChildEvent()) + execution_order.append('Child_dispatched_to_Bus1') + await child + execution_order.append('Child_await_returned') + execution_order.append('Bus1_Event1_end') + return 'event1_done' + + async def event2_handler(event: Event2) -> str: + execution_order.append('Bus1_Event2_start') + execution_order.append('Bus1_Event2_end') + return 'event2_done' + + async def event3_handler(event: Event3) -> str: + execution_order.append('Bus2_Event3_start') + execution_order.append('Bus2_Event3_end') + return 'event3_done' + + async def event4_handler(event: Event4) -> str: + execution_order.append('Bus2_Event4_start') + execution_order.append('Bus2_Event4_end') + return 'event4_done' + + async def child_handler(event: ChildEvent) -> str: + execution_order.append('Child_start') + execution_order.append('Child_end') + return 'child_done' + + # Register handlers on respective buses + bus1.on(Event1, event1_handler) + bus1.on(Event2, event2_handler) + bus1.on(ChildEvent, child_handler) + + bus2.on(Event3, event3_handler) + bus2.on(Event4, event4_handler) + + try: + # Queue events on both buses + event1 = bus1.dispatch(Event1()) + event2 = bus1.dispatch(Event2()) + event3 = bus2.dispatch(Event3()) + event4 = bus2.dispatch(Event4()) + + await asyncio.sleep(0) # Let dispatch settle + + print(f'Bus1 queue size: {bus1.event_queue.qsize() if bus1.event_queue else 0}') + print(f'Bus2 queue size: {bus2.event_queue.qsize() if bus2.event_queue else 0}') + + # Await E1 - child should jump Bus1's queue + await event1 + + print(f'After await event1: {execution_order}') + + # Child should have executed + assert 'Child_start' in execution_order, 'Child should have executed' + assert 'Child_end' in execution_order, 'Child should have completed' + + # Child should have executed before Event1 ended + child_end_idx = execution_order.index('Child_end') + event1_end_idx = execution_order.index('Bus1_Event1_end') + assert child_end_idx < event1_end_idx, 'Child should complete before Event1 ends' + + # E2 on Bus1 should NOT have executed yet + assert 'Bus1_Event2_start' not in execution_order, \ + f'E2 on Bus1 should NOT have started. Order: {execution_order}' + + # E3 and E4 on Bus2 should NOT have executed yet + assert 'Bus2_Event3_start' not in execution_order, \ + f'E3 on Bus2 should NOT have started. Order: {execution_order}' + assert 'Bus2_Event4_start' not in execution_order, \ + f'E4 on Bus2 should NOT have started. Order: {execution_order}' + + # Now process remaining events on both buses + await bus1.wait_until_idle() + await bus2.wait_until_idle() + + print(f'Final execution order: {execution_order}') + + # Verify all events eventually executed + assert 'Bus1_Event2_start' in execution_order + assert 'Bus2_Event3_start' in execution_order + assert 'Bus2_Event4_start' in execution_order + + print('βœ… Multi-bus forwarding with queued events works correctly!') + + finally: + await bus1.stop(clear=True) + await bus2.stop(clear=True) + + +async def test_await_already_completed_event(): + """ + Test that awaiting an event that's already completed is a no-op. + The event isn't in the queue anymore, so there's nothing to reorder. + """ + print('\n=== Test Await Already Completed Event ===') + + bus = EventBus(name='AlreadyCompletedBus', max_history_size=100) + execution_order: list[str] = [] + + class Event1(BaseEvent[str]): + pass + + class Event2(BaseEvent[str]): + pass + + async def event1_handler(event: Event1) -> str: + execution_order.append('Event1_start') + execution_order.append('Event1_end') + return 'event1_done' + + async def event2_handler(event: Event2) -> str: + execution_order.append('Event2_start') + execution_order.append('Event2_end') + return 'event2_done' + + bus.on(Event1, event1_handler) + bus.on(Event2, event2_handler) + + try: + # Dispatch and await E1 first + event1 = await bus.dispatch(Event1()) + assert event1.event_status == 'completed' + + # Now dispatch E2 + event2 = bus.dispatch(Event2()) + + # Await E1 again - should be a no-op since it's already completed + await event1 # Should return immediately + + print(f'After second await event1: {execution_order}') + + # E2 should NOT have executed yet (we didn't trigger processing) + # The second await on completed E1 should just return without processing queue + assert event2.event_status == 'pending', \ + f'E2 should still be pending, got {event2.event_status}' + + # Complete E2 + await bus.wait_until_idle() + + print(f'Final execution order: {execution_order}') + print('βœ… Await already completed event works correctly!') + + finally: + await bus.stop(clear=True) + + +async def test_multiple_awaits_same_event(): + """ + Test that multiple concurrent awaits on the same event work correctly. + Only the first await should trigger queue reordering; subsequent awaits + should just wait on the completion signal. + """ + print('\n=== Test Multiple Awaits Same Event ===') + + bus = EventBus(name='MultiAwaitBus', max_history_size=100) + execution_order: list[str] = [] + await_results: list[str] = [] + + class Event1(BaseEvent[str]): + pass + + class Event2(BaseEvent[str]): + pass + + class ChildEvent(BaseEvent[str]): + pass + + async def event1_handler(event: Event1) -> str: + execution_order.append('Event1_start') + + # Dispatch child + child = bus.dispatch(ChildEvent()) + + # Create multiple concurrent awaits on the same child + async def await_child(name: str): + await child + await_results.append(f'{name}_completed') + + # Start two concurrent awaits + task1 = asyncio.create_task(await_child('await1')) + task2 = asyncio.create_task(await_child('await2')) + + # Wait for both + await asyncio.gather(task1, task2) + execution_order.append('Both_awaits_completed') + + execution_order.append('Event1_end') + return 'event1_done' + + async def event2_handler(event: Event2) -> str: + execution_order.append('Event2_start') + execution_order.append('Event2_end') + return 'event2_done' + + async def child_handler(event: ChildEvent) -> str: + execution_order.append('Child_start') + await asyncio.sleep(0.01) # Small delay to ensure both awaits are waiting + execution_order.append('Child_end') + return 'child_done' + + bus.on(Event1, event1_handler) + bus.on(Event2, event2_handler) + bus.on(ChildEvent, child_handler) + + try: + event1 = bus.dispatch(Event1()) + event2 = bus.dispatch(Event2()) + + await event1 + + print(f'After await event1: {execution_order}') + print(f'Await results: {await_results}') + + # Both awaits should have completed + assert len(await_results) == 2, f'Both awaits should complete, got {await_results}' + assert 'await1_completed' in await_results + assert 'await2_completed' in await_results + + # Child should have executed before Event1 ended + assert 'Child_start' in execution_order + assert 'Child_end' in execution_order + child_end_idx = execution_order.index('Child_end') + event1_end_idx = execution_order.index('Event1_end') + assert child_end_idx < event1_end_idx + + # E2 should NOT have executed yet + assert 'Event2_start' not in execution_order, \ + f'E2 should NOT have started. Order: {execution_order}' + + await bus.wait_until_idle() + + print(f'Final execution order: {execution_order}') + print('βœ… Multiple awaits same event works correctly!') + + finally: + await bus.stop(clear=True) + + +async def test_deeply_nested_awaited_children(): + """ + Test deeply nested awaited children: Event1 awaits Child1, which awaits Child2. + All should complete before Event2 starts (no overshoot at any level). + """ + print('\n=== Test Deeply Nested Awaited Children ===') + + bus = EventBus(name='DeepNestedBus', max_history_size=100) + execution_order: list[str] = [] + + class Event1(BaseEvent[str]): + pass + + class Event2(BaseEvent[str]): + pass + + class Child1(BaseEvent[str]): + pass + + class Child2(BaseEvent[str]): + pass + + async def event1_handler(event: Event1) -> str: + execution_order.append('Event1_start') + child1 = bus.dispatch(Child1()) + await child1 + execution_order.append('Event1_end') + return 'event1_done' + + async def child1_handler(event: Child1) -> str: + execution_order.append('Child1_start') + child2 = bus.dispatch(Child2()) + await child2 + execution_order.append('Child1_end') + return 'child1_done' + + async def child2_handler(event: Child2) -> str: + execution_order.append('Child2_start') + execution_order.append('Child2_end') + return 'child2_done' + + async def event2_handler(event: Event2) -> str: + execution_order.append('Event2_start') + execution_order.append('Event2_end') + return 'event2_done' + + bus.on(Event1, event1_handler) + bus.on(Child1, child1_handler) + bus.on(Child2, child2_handler) + bus.on(Event2, event2_handler) + + try: + event1 = bus.dispatch(Event1()) + event2 = bus.dispatch(Event2()) + + await event1 + + print(f'After await event1: {execution_order}') + + # All nested children should have completed + assert 'Child1_start' in execution_order + assert 'Child1_end' in execution_order + assert 'Child2_start' in execution_order + assert 'Child2_end' in execution_order + + # Verify nesting order: Child2 completes before Child1 + child2_end_idx = execution_order.index('Child2_end') + child1_end_idx = execution_order.index('Child1_end') + event1_end_idx = execution_order.index('Event1_end') + assert child2_end_idx < child1_end_idx < event1_end_idx + + # E2 should NOT have started + assert 'Event2_start' not in execution_order, \ + f'E2 should NOT have started. Order: {execution_order}' + + await bus.wait_until_idle() + + print(f'Final execution order: {execution_order}') + + # E2 should start after E1 ends + event2_start_idx = execution_order.index('Event2_start') + assert event2_start_idx > event1_end_idx + + print('βœ… Deeply nested awaited children works correctly!') + + finally: + await bus.stop(clear=True) + + async def main(): """Run all tests.""" await test_comprehensive_patterns() await test_race_condition_stress() + await test_awaited_child_jumps_queue_no_overshoot() + await test_dispatch_multiple_await_one_skips_others() + await test_multi_bus_forwarding_with_queued_events() + await test_await_already_completed_event() + await test_multiple_awaits_same_event() + await test_deeply_nested_awaited_children() if __name__ == '__main__': From 7c21c3b8a874ad6fdfbeee86ad4a84cecd6043c0 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Mon, 8 Dec 2025 14:41:42 -0800 Subject: [PATCH 25/79] bump version --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 904521b..fe65621 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ name = "bubus" description = "Advanced Pydantic-powered event bus with async support" authors = [{ name = "Nick Sweeting" }] -version = "1.6.0" +version = "1.7.0" readme = "README.md" requires-python = ">=3.11,<4.0" classifiers = [ From 2db591468c22321e21993bb41ecd4f0b3485400e Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Mon, 8 Dec 2025 15:19:22 -0800 Subject: [PATCH 26/79] propagate dispatch-time user-provided ContextVars to handler execution --- bubus/models.py | 60 ++++- bubus/service.py | 5 + tests/test_context_propagation.py | 419 ++++++++++++++++++++++++++++++ 3 files changed, 480 insertions(+), 4 deletions(-) create mode 100644 tests/test_context_propagation.py diff --git a/bubus/models.py b/bubus/models.py index 1be519c..053fdac 100644 --- a/bubus/models.py +++ b/bubus/models.py @@ -1,4 +1,5 @@ import asyncio +import contextvars import inspect import logging import os @@ -256,6 +257,10 @@ def event_result_type_serializer(self, value: Any) -> str | None: # Completion signal _event_completed_signal: asyncio.Event | None = PrivateAttr(default=None) + # Dispatch-time context for ContextVar propagation to handlers + # Captured when dispatch() is called, used when executing handlers via ctx.run() + _event_dispatch_context: contextvars.Context | None = PrivateAttr(default=None) + def __hash__(self) -> int: """Make events hashable using their unique event_id""" return hash(self.event_id) @@ -1055,7 +1060,10 @@ def _default_format_exception_for_log(exc: BaseException) -> str: monitor_task: asyncio.Task[None] | None = None handler_task: asyncio.Task[Any] | None = None - handler_context_tokens = _enter_handler_context_callable(event, self.handler_id) + # Use dispatch-time context if available (GitHub issue #20) + # This ensures ContextVars set before dispatch() are accessible in handlers + # Use getattr to handle stub events that may not have this attribute + dispatch_context = getattr(event, '_event_dispatch_context', None) async def deadlock_monitor() -> None: await asyncio.sleep(15.0) @@ -1069,12 +1077,54 @@ async def deadlock_monitor() -> None: deadlock_monitor(), name=f'{eventbus}.deadlock_monitor({event}, {self.handler_name}#{self.handler_id[-4:]})' ) + # For handlers running in dispatch context, we need to set up internal context vars + # INSIDE that context. Create a wrapper that does setup -> handler -> cleanup. + # This includes holds_global_lock which is set by ReentrantLock in the parent context. + async def async_handler_with_context() -> Any: + """Wrapper that sets up internal context before calling async handler.""" + from bubus.service import holds_global_lock + # Set holds_global_lock since we're running inside a handler that holds the lock + # (ReentrantLock set this in the parent context, but dispatch_context is from before that) + holds_global_lock.set(True) + tokens = _enter_handler_context_callable(event, self.handler_id) + try: + return await handler(event) # type: ignore + finally: + _exit_handler_context_callable(tokens) + + def sync_handler_with_context() -> Any: + """Wrapper that sets up internal context before calling sync handler.""" + from bubus.service import holds_global_lock + holds_global_lock.set(True) + tokens = _enter_handler_context_callable(event, self.handler_id) + try: + return handler(event) + finally: + _exit_handler_context_callable(tokens) + + # If no dispatch context, set up context vars the normal way (outside handler) + if dispatch_context is None: + handler_context_tokens = _enter_handler_context_callable(event, self.handler_id) + else: + handler_context_tokens = None # Will be set inside the wrapper + try: if inspect.iscoroutinefunction(handler): - handler_task = asyncio.create_task(handler(event)) # type: ignore + if dispatch_context is not None: + # Run wrapper (which sets internal context) inside dispatch context + handler_task = asyncio.create_task( + async_handler_with_context(), + context=dispatch_context, + ) + else: + handler_task = asyncio.create_task(handler(event)) # type: ignore handler_return_value: Any = await asyncio.wait_for(handler_task, timeout=self.timeout) elif inspect.isfunction(handler) or inspect.ismethod(handler): - handler_return_value = handler(event) + if dispatch_context is not None: + # Run sync wrapper inside dispatch context + handler_return_value = dispatch_context.run(sync_handler_with_context) + else: + handler_return_value = handler(event) if isinstance(handler_return_value, BaseEvent): logger.debug( f'Handler {self.handler_name} returned BaseEvent, not awaiting to avoid circular dependency' @@ -1144,7 +1194,9 @@ async def deadlock_monitor() -> None: except Exception: pass - _exit_handler_context_callable(handler_context_tokens) + # Only exit context if it was set outside the wrapper (i.e., no dispatch context) + if handler_context_tokens is not None: + _exit_handler_context_callable(handler_context_tokens) def log_tree( self, diff --git a/bubus/service.py b/bubus/service.py index 00d4f2f..9703442 100644 --- a/bubus/service.py +++ b/bubus/service.py @@ -685,6 +685,11 @@ def dispatch(self, event: T_ExpectedEvent) -> T_ExpectedEvent: if current_event is not None: event.event_parent_id = current_event.event_id + # Capture dispatch-time context for propagation to handlers (GitHub issue #20) + # This ensures ContextVars set before dispatch() are accessible in handlers + if event._event_dispatch_context is None: + event._event_dispatch_context = contextvars.copy_context() + # Track child events - if we're inside a handler, add this event to the handler's event_children list # Only track if this is a NEW event (not forwarding an existing event) current_handler_id = _current_handler_id_context.get() diff --git a/tests/test_context_propagation.py b/tests/test_context_propagation.py new file mode 100644 index 0000000..36261a4 --- /dev/null +++ b/tests/test_context_propagation.py @@ -0,0 +1,419 @@ +""" +Tests for ContextVar propagation through event dispatch and handler execution. + +This addresses GitHub issue #20: ContextVar values set before dispatch should +be accessible inside event handlers. + +The key insight is that context must be captured at DISPATCH time (when the +user calls bus.dispatch()), not at PROCESSING time (when the event is pulled +from the queue and handlers are executed). +""" + +import asyncio +from contextvars import ContextVar +from typing import Any + +import pytest + +from bubus import BaseEvent, EventBus + + +# Test context variables (simulating user-defined context like request_id) +request_id_var: ContextVar[str] = ContextVar('request_id', default='') +user_id_var: ContextVar[str] = ContextVar('user_id', default='') +trace_id_var: ContextVar[str] = ContextVar('trace_id', default='') + + +class SimpleEvent(BaseEvent[str]): + """Simple event for context propagation tests.""" + pass + + +class ChildEvent(BaseEvent[str]): + """Child event for nested context tests.""" + pass + + +class TestContextPropagation: + """Test that ContextVar values propagate from dispatch site to handlers.""" + + async def test_contextvar_propagates_to_handler(self): + """ + Basic test: ContextVar set before dispatch should be accessible in handler. + + This is the core issue from GitHub #20. + """ + bus = EventBus(name='ContextTestBus') + captured_values: dict[str, str] = {} + + async def handler(event: SimpleEvent) -> str: + # These should have the values set BEFORE dispatch, not defaults + captured_values['request_id'] = request_id_var.get() + captured_values['user_id'] = user_id_var.get() + return 'handled' + + bus.on(SimpleEvent, handler) + + try: + # Set context values (simulating FastAPI request context) + request_id_var.set('req-12345') + user_id_var.set('user-abc') + + # Dispatch and await + event = await bus.dispatch(SimpleEvent()) + + # Handler should have seen the context values + assert captured_values['request_id'] == 'req-12345', \ + f"Expected 'req-12345', got '{captured_values['request_id']}'" + assert captured_values['user_id'] == 'user-abc', \ + f"Expected 'user-abc', got '{captured_values['user_id']}'" + + finally: + await bus.stop(clear=True) + + async def test_contextvar_propagates_through_nested_handlers(self): + """ + Nested dispatch: Context should propagate through parent -> child handlers. + + When a handler dispatches and awaits a child event, the child handler + should also have access to the original context. + """ + bus = EventBus(name='NestedContextBus') + captured_parent: dict[str, str] = {} + captured_child: dict[str, str] = {} + + async def parent_handler(event: SimpleEvent) -> str: + captured_parent['request_id'] = request_id_var.get() + captured_parent['trace_id'] = trace_id_var.get() + + # Dispatch child event + child = await bus.dispatch(ChildEvent()) + return 'parent_done' + + async def child_handler(event: ChildEvent) -> str: + captured_child['request_id'] = request_id_var.get() + captured_child['trace_id'] = trace_id_var.get() + return 'child_done' + + bus.on(SimpleEvent, parent_handler) + bus.on(ChildEvent, child_handler) + + try: + # Set context + request_id_var.set('req-nested-123') + trace_id_var.set('trace-xyz') + + await bus.dispatch(SimpleEvent()) + + # Both handlers should see the context + assert captured_parent['request_id'] == 'req-nested-123' + assert captured_parent['trace_id'] == 'trace-xyz' + assert captured_child['request_id'] == 'req-nested-123' + assert captured_child['trace_id'] == 'trace-xyz' + + finally: + await bus.stop(clear=True) + + async def test_context_isolation_between_dispatches(self): + """ + Different dispatches should have isolated contexts. + + If dispatch A sets request_id='A' and dispatch B sets request_id='B', + handler A should see 'A' and handler B should see 'B'. + """ + bus = EventBus(name='IsolationTestBus') + captured_values: list[str] = [] + + async def handler(event: SimpleEvent) -> str: + # Small delay to ensure both handlers run + await asyncio.sleep(0.01) + captured_values.append(request_id_var.get()) + return 'handled' + + bus.on(SimpleEvent, handler) + + try: + # Dispatch two events with different contexts + async def dispatch_with_context(req_id: str): + request_id_var.set(req_id) + await bus.dispatch(SimpleEvent()) + + # Run both dispatches + request_id_var.set('req-A') + event_a = bus.dispatch(SimpleEvent()) + + request_id_var.set('req-B') + event_b = bus.dispatch(SimpleEvent()) + + await event_a + await event_b + + # Each handler should have seen its own context + # Note: order might vary, so just check both values are present + assert 'req-A' in captured_values, f"Expected 'req-A' in {captured_values}" + assert 'req-B' in captured_values, f"Expected 'req-B' in {captured_values}" + + finally: + await bus.stop(clear=True) + + async def test_context_propagates_to_parallel_handlers(self): + """ + When parallel_handlers=True, all handlers should see the dispatch context. + """ + bus = EventBus(name='ParallelContextBus', parallel_handlers=True) + captured_values: list[str] = [] + lock = asyncio.Lock() + + async def handler1(event: SimpleEvent) -> str: + async with lock: + captured_values.append(f'h1:{request_id_var.get()}') + return 'h1_done' + + async def handler2(event: SimpleEvent) -> str: + async with lock: + captured_values.append(f'h2:{request_id_var.get()}') + return 'h2_done' + + bus.on(SimpleEvent, handler1) + bus.on(SimpleEvent, handler2) + + try: + request_id_var.set('req-parallel') + await bus.dispatch(SimpleEvent()) + + assert 'h1:req-parallel' in captured_values, f"Handler1 didn't see context: {captured_values}" + assert 'h2:req-parallel' in captured_values, f"Handler2 didn't see context: {captured_values}" + + finally: + await bus.stop(clear=True) + + async def test_context_propagates_through_event_forwarding(self): + """ + When events are forwarded between buses, context should propagate. + """ + bus1 = EventBus(name='Bus1') + bus2 = EventBus(name='Bus2') + captured_bus1: dict[str, str] = {} + captured_bus2: dict[str, str] = {} + + async def bus1_handler(event: SimpleEvent) -> str: + captured_bus1['request_id'] = request_id_var.get() + return 'bus1_done' + + async def bus2_handler(event: SimpleEvent) -> str: + captured_bus2['request_id'] = request_id_var.get() + return 'bus2_done' + + bus1.on(SimpleEvent, bus1_handler) + bus1.on('*', bus2.dispatch) # Forward all events to bus2 + bus2.on(SimpleEvent, bus2_handler) + + try: + request_id_var.set('req-forwarded') + await bus1.dispatch(SimpleEvent()) + await bus2.wait_until_idle() + + assert captured_bus1['request_id'] == 'req-forwarded', \ + f"Bus1 handler didn't see context: {captured_bus1}" + assert captured_bus2['request_id'] == 'req-forwarded', \ + f"Bus2 handler didn't see context: {captured_bus2}" + + finally: + await bus1.stop(clear=True) + await bus2.stop(clear=True) + + async def test_handler_can_modify_context_without_affecting_parent(self): + """ + Handler modifications to ContextVar should not affect the parent context. + + This ensures context is properly copied, not shared. + """ + bus = EventBus(name='ModifyContextBus') + parent_value_after_child: str = '' + + async def parent_handler(event: SimpleEvent) -> str: + nonlocal parent_value_after_child + # Set a value in parent + request_id_var.set('parent-value') + + # Dispatch child which will modify the context + await bus.dispatch(ChildEvent()) + + # Parent's context should be unchanged + parent_value_after_child = request_id_var.get() + return 'parent_done' + + async def child_handler(event: ChildEvent) -> str: + # Modify context in child + request_id_var.set('child-modified') + return 'child_done' + + bus.on(SimpleEvent, parent_handler) + bus.on(ChildEvent, child_handler) + + try: + await bus.dispatch(SimpleEvent()) + + # Parent should still see its own value, not child's modification + assert parent_value_after_child == 'parent-value', \ + f"Parent context was modified by child: got '{parent_value_after_child}'" + + finally: + await bus.stop(clear=True) + + async def test_event_parent_id_tracking_still_works(self): + """ + Critical: Internal context vars (event_parent_id tracking) must still work + when we propagate dispatch-time context. + + This ensures our context merging doesn't break the bubus internals. + """ + bus = EventBus(name='ParentIdTrackingBus') + parent_event_id: str | None = None + child_event_parent_id: str | None = None + + async def parent_handler(event: SimpleEvent) -> str: + nonlocal parent_event_id + parent_event_id = event.event_id + + # Child event should automatically get parent_id set + child = await bus.dispatch(ChildEvent()) + return 'parent_done' + + async def child_handler(event: ChildEvent) -> str: + nonlocal child_event_parent_id + child_event_parent_id = event.event_parent_id + return 'child_done' + + bus.on(SimpleEvent, parent_handler) + bus.on(ChildEvent, child_handler) + + try: + # Set user context (to ensure we're testing the merge scenario) + request_id_var.set('req-parent-tracking') + + await bus.dispatch(SimpleEvent()) + + # Verify parent ID tracking works + assert parent_event_id is not None, "Parent event ID was not captured" + assert child_event_parent_id is not None, "Child event parent ID was not set" + assert child_event_parent_id == parent_event_id, \ + f"Child's parent_id ({child_event_parent_id}) doesn't match parent's id ({parent_event_id})" + + finally: + await bus.stop(clear=True) + + async def test_dispatch_context_and_parent_id_both_work(self): + """ + Both user-defined ContextVars AND internal event tracking must work together. + + This is the key test for context stacking/merging. + """ + bus = EventBus(name='CombinedContextBus') + results: dict[str, Any] = {} + + async def parent_handler(event: SimpleEvent) -> str: + results['parent_request_id'] = request_id_var.get() + results['parent_event_id'] = event.event_id + + # Dispatch child - should get both user context AND parent tracking + child = await bus.dispatch(ChildEvent()) + return 'parent_done' + + async def child_handler(event: ChildEvent) -> str: + results['child_request_id'] = request_id_var.get() + results['child_event_parent_id'] = event.event_parent_id + return 'child_done' + + bus.on(SimpleEvent, parent_handler) + bus.on(ChildEvent, child_handler) + + try: + # Set user context + request_id_var.set('req-combined-test') + + await bus.dispatch(SimpleEvent()) + + # User context should propagate + assert results['parent_request_id'] == 'req-combined-test', \ + f"Parent didn't see user context: {results['parent_request_id']}" + assert results['child_request_id'] == 'req-combined-test', \ + f"Child didn't see user context: {results['child_request_id']}" + + # Internal parent tracking should also work + assert results['child_event_parent_id'] == results['parent_event_id'], \ + f"Parent ID tracking broken: child.parent_id={results['child_event_parent_id']}, parent.id={results['parent_event_id']}" + + finally: + await bus.stop(clear=True) + + async def test_deeply_nested_context_and_parent_tracking(self): + """ + Test that both user context and parent tracking work through multiple levels. + """ + bus = EventBus(name='DeepNestingBus') + results: list[dict[str, Any]] = [] + + class Level2Event(BaseEvent[str]): + pass + + class Level3Event(BaseEvent[str]): + pass + + async def level1_handler(event: SimpleEvent) -> str: + results.append({ + 'level': 1, + 'request_id': request_id_var.get(), + 'event_id': event.event_id, + 'parent_id': event.event_parent_id, + }) + await bus.dispatch(Level2Event()) + return 'level1_done' + + async def level2_handler(event: Level2Event) -> str: + results.append({ + 'level': 2, + 'request_id': request_id_var.get(), + 'event_id': event.event_id, + 'parent_id': event.event_parent_id, + }) + await bus.dispatch(Level3Event()) + return 'level2_done' + + async def level3_handler(event: Level3Event) -> str: + results.append({ + 'level': 3, + 'request_id': request_id_var.get(), + 'event_id': event.event_id, + 'parent_id': event.event_parent_id, + }) + return 'level3_done' + + bus.on(SimpleEvent, level1_handler) + bus.on(Level2Event, level2_handler) + bus.on(Level3Event, level3_handler) + + try: + request_id_var.set('req-deep-nesting') + + await bus.dispatch(SimpleEvent()) + + # All levels should see the user context + assert len(results) == 3, f"Expected 3 levels, got {len(results)}" + for r in results: + assert r['request_id'] == 'req-deep-nesting', \ + f"Level {r['level']} didn't see user context: {r['request_id']}" + + # Parent chain should be correct + assert results[0]['parent_id'] is None, "Level 1 should have no parent" + assert results[1]['parent_id'] == results[0]['event_id'], \ + f"Level 2 parent mismatch: {results[1]['parent_id']} != {results[0]['event_id']}" + assert results[2]['parent_id'] == results[1]['event_id'], \ + f"Level 3 parent mismatch: {results[2]['parent_id']} != {results[1]['event_id']}" + + finally: + await bus.stop(clear=True) + + +if __name__ == '__main__': + pytest.main([__file__, '-v', '-s']) From 0c5c1dfbd75857910d13b90cecbb68be054b394b Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Mon, 8 Dec 2025 15:19:47 -0800 Subject: [PATCH 27/79] clear ContextVars after handler execution to lower memory use --- bubus/models.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/bubus/models.py b/bubus/models.py index 053fdac..5d28026 100644 --- a/bubus/models.py +++ b/bubus/models.py @@ -754,6 +754,8 @@ def event_mark_complete_if_all_handlers_completed(self) -> None: if hasattr(self, 'event_processed_at'): self.event_processed_at = datetime.now(UTC) self.event_completed_signal.set() + # Clear dispatch context to avoid memory leaks + self._event_dispatch_context = None return # Check if all handler results are done @@ -777,6 +779,8 @@ def event_mark_complete_if_all_handlers_completed(self) -> None: self.event_processed_at = datetime.now(UTC) # logger.debug(f'Event {self} marking complete - all handlers and children done') self.event_completed_signal.set() + # Clear dispatch context to avoid memory leaks (it holds references to ContextVars) + self._event_dispatch_context = None def event_are_all_children_complete(self, _visited: set[str] | None = None) -> bool: """Recursively check if all child events and their descendants are complete""" From 5899a3d65cb7a6b6137ecc0a67311b0f5bf7d8b1 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Mon, 8 Dec 2025 15:20:08 -0800 Subject: [PATCH 28/79] bump version --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index fe65621..81c1ae1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ name = "bubus" description = "Advanced Pydantic-powered event bus with async support" authors = [{ name = "Nick Sweeting" }] -version = "1.7.0" +version = "1.7.1" readme = "README.md" requires-python = ">=3.11,<4.0" classifiers = [ From 1ca9709cf8cd03978add0b6acf9d2d79246b12ee Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Mon, 8 Dec 2025 15:23:52 -0800 Subject: [PATCH 29/79] document the new ContextVar propagation feature --- README.md | 63 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/README.md b/README.md index bf0616e..2b6b5bb 100644 --- a/README.md +++ b/README.md @@ -439,6 +439,69 @@ email_list = await event_bus.dispatch(FetchInboxEvent(account_id='124', ...)).ev
    +### 🧡 ContextVar Propagation + +ContextVars set before `dispatch()` are automatically propagated to event handlers. This is essential for request-scoped context like request IDs, user sessions, or tracing spans: + +```python +from contextvars import ContextVar + +# Define your context variables +request_id: ContextVar[str] = ContextVar('request_id', default='') +user_id: ContextVar[str] = ContextVar('user_id', default='') + +async def handler(event: MyEvent) -> str: + # Handler sees the context values that were set before dispatch() + print(f"Request: {request_id.get()}, User: {user_id.get()}") + return "done" + +bus.on(MyEvent, handler) + +# Set context before dispatch (e.g., in FastAPI middleware) +request_id.set('req-12345') +user_id.set('user-abc') + +# Handler will see request_id='req-12345' and user_id='user-abc' +await bus.dispatch(MyEvent()) +``` + +**Context propagates through nested handlers:** + +```python +async def parent_handler(event: ParentEvent) -> str: + # Context is captured at dispatch time + print(f"Parent sees: {request_id.get()}") # 'req-12345' + + # Child events inherit the same context + await bus.dispatch(ChildEvent()) + return "parent_done" + +async def child_handler(event: ChildEvent) -> str: + # Child also sees the original dispatch context + print(f"Child sees: {request_id.get()}") # 'req-12345' + return "child_done" +``` + +**Context isolation between dispatches:** + +Each dispatch captures its own context snapshot. Concurrent dispatches with different context values are properly isolated: + +```python +request_id.set('req-A') +event_a = bus.dispatch(MyEvent()) # Handler A sees 'req-A' + +request_id.set('req-B') +event_b = bus.dispatch(MyEvent()) # Handler B sees 'req-B' + +await event_a # Still sees 'req-A' +await event_b # Still sees 'req-B' +``` + +> [!NOTE] +> Context is captured at `dispatch()` time, not when the handler executes. This ensures handlers see the context from the call site, even if the event is processed later from a queue. + +
    + ### 🧹 Memory Management EventBus includes automatic memory management to prevent unbounded growth in long-running applications: From 1c6e6cfe3b2cd041274546bad341a489f3fa0bfa Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Mon, 8 Dec 2025 16:08:56 -0800 Subject: [PATCH 30/79] add claude perms --- .claude/settings.local.json | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.claude/settings.local.json b/.claude/settings.local.json index f52b501..43fc995 100644 --- a/.claude/settings.local.json +++ b/.claude/settings.local.json @@ -20,7 +20,10 @@ "Bash(echo:*)", "Bash(grep:*)", "Bash(rg:*)", - "Bash(.venv/bin/pytest tests/test_typed_event_results.py::test_builtin_type_casting -v -s --timeout=10)" + "WebFetch(domain:github.com)", + "Bash(timeout 60 .venv/bin/pytest:*)", + "Bash(timeout 180 .venv/bin/pytest tests/ -v)", + "Bash(timeout 180 .venv/bin/pytest:*)" ], "deny": [] } From bf82b95c3e1640c8194e7040521ef7962a352936 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Mon, 8 Dec 2025 16:10:40 -0800 Subject: [PATCH 31/79] implement new .find method to replace expect and get_or_dispatch --- README.md | 198 +++++-- bubus/service.py | 268 +++++++-- tests/test_find.py | 1382 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 1762 insertions(+), 86 deletions(-) create mode 100644 tests/test_find.py diff --git a/README.md b/README.md index 2b6b5bb..31774cc 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ class UserLoginEvent(BaseEvent[str]): async def handle_login(event: UserLoginEvent) -> str: auth_request = await event.event_bus.dispatch(AuthRequestEvent(...)) # nested events supported - auth_response = await event.event_bus.expect(AuthResponseEvent, timeout=30.0) + auth_response = await event.event_bus.find(AuthResponseEvent, child_of=auth_request, future=30) return f"User {event.username} logged in admin={event.is_admin} with API response: {await auth_response.event_result()}" bus = EventBus() @@ -271,74 +271,93 @@ if __name__ == '__main__':

    -### ⏳ Expect an Event to be Dispatched +### πŸ”Ž Find Events in History or Wait for Future Events -Wait for specific events to be seen on a bus with optional filtering: +The `find()` method provides a unified way to search past event history and/or wait for future events. It's the recommended approach for most event lookup scenarios. + +The `past` and `future` parameters accept either `bool` or `float` values: + +| Value | `past` meaning | `future` meaning | +|-------|----------------|------------------| +| `True` | Search all history | Wait forever | +| `False` | Skip history search | Don't wait | +| `5.0` | Search last 5 seconds | Wait up to 5 seconds | ```python -# Block until a specific event is seen (with optional timeout) -request_event = await bus.dispatch(RequestEvent(id=123, table='invoices', request_id=999234)) -response_event = await bus.expect(ResponseEvent, timeout=30) +# Search all history, wait up to 5s for future +event = await bus.find(ResponseEvent, past=True, future=5) + +# Search last 5s of history, wait forever +event = await bus.find(ResponseEvent, past=5, future=True) + +# Search last 5s of history, wait up to 5s +event = await bus.find(ResponseEvent, past=5, future=5) + +# Search all history only, don't wait (instant) +event = await bus.find(ResponseEvent, past=True, future=False) + +# Wait up to 5s for future only (like expect()) +event = await bus.find(ResponseEvent, past=False, future=5) + +# With custom filter +event = await bus.find(ResponseEvent, where=lambda e: e.request_id == my_id, future=5) ``` -A more complex real-world example showing off all the features: +#### Finding Child Events + +When you dispatch an event that triggers child events, use `child_of` to find specific descendants: ```python -async def on_generate_invoice_pdf(event: GenerateInvoiceEvent) -> pdf: - request_event = await bus.dispatch(APIRequestEvent( # example: fire a backend request via some RPC client using bubus - method='invoices.generatePdf', - invoice_id=event.invoice_id, - request_id=uuid4(), - )) - # ...rpc client should send the request, then call event_bus.dispatch(APIResponseEvent(...)) when it gets a response ... - - # wait for the response event to be fired by the RPC client - is_our_response = lambda response_event: response_event.request_id == request_event.request_id - is_succesful = lambda response_event: response_event.invoice_id == event.invoice_id and response_event.invoice_url - response_event: APIResponseEvent | None = await bus.expect( - APIResponseEvent, # wait for events of this type (also accepts str name) - include=lambda e: is_our_response(e) and is_succesful(e), # only include events that match a certain filter func - exclude=lambda e: e.status != 'retrying', # optionally exclude certain events, overrides include - timeout=30, # returns None if no match is seen within 30 sec - ) - if response_event is None: - await bus.dispatch(TimedOutError(msg='timed out while waiting for response from server', request_id=request_event.id)) - return None +# Dispatch a parent event that triggers child events +nav_event = await bus.dispatch(NavigateToUrlEvent(url="https://example.com")) + +# Find a child event (may have already fired, or wait for it) +new_tab = await bus.find(TabCreatedEvent, child_of=nav_event, future=5) +if new_tab: + print(f"New tab created: {new_tab.tab_id}") +``` + +This solves race conditions where child events fire before you start waiting for them. + +#### Tree Traversal Helpers - return response_event.invoice_url +Check parent-child relationships between events: -event_bus.on(GenerateInvoiceEvent, on_generate_invoice_pdf) +```python +# Check if event is a descendant of another event +if bus.event_is_child_of(child_event, parent_event): + print("child_event is a descendant of parent_event") + +# Check if event is an ancestor of another event +if bus.event_is_parent_of(parent_event, child_event): + print("parent_event is an ancestor of child_event") ``` > [!IMPORTANT] -> `expect()` resolves when the event is first *dispatched* to the `EventBus`, not when it completes. `await response_event` to get the completed event. -> If the timeout elapses with no match, `expect()` returns `None`. +> `find()` resolves when the event is first *dispatched* to the `EventBus`, not when it completes. Use `await event` to wait for handlers to finish. +> If no match is found (or future timeout elapses), `find()` returns `None`.
    ### πŸ” Event Debouncing -Avoid re-running expensive work by checking recent history before dispatching. Combine `query()`, `expect()`, and `dispatch()` to coalesce bursts of identical events: +Avoid re-running expensive work by reusing recent events. The `find()` method makes debouncing simple: ```python -from datetime import timedelta - -debounced_event = ( - await bus.query(SyncWithServerEvent, since=timedelta(seconds=10), include=lambda e: e.user_id == user.id) - or await bus.expect(SyncWithServerEvent, timeout=5, include=lambda e: e.user_id == user.id) - or await bus.dispatch(SyncWithServerEvent(user_id=user.id)) +# Simple debouncing: reuse event from last 10 seconds, or dispatch new +event = ( + await bus.find(ScreenshotEvent, past=10, future=False) # Check last 10s of history (instant) + or await bus.dispatch(ScreenshotEvent()) ) -if debounced_event is None: - raise RuntimeError('Sync dispatch failed unexpectedly') - -print(f'Last sync completed at {debounced_event.event_completed_at}') +# More advanced: check history, wait briefly for in-flight, then dispatch +event = ( + await bus.find(SyncEvent, past=True, future=False) # Check all history (instant) + or await bus.find(SyncEvent, past=False, future=5) # Wait up to 5s for in-flight + or await bus.dispatch(SyncEvent()) # Fallback: dispatch new +) ``` -- `query()` searches the most recent completed events (newest-first) in memory. -- `expect()` waits for an in-flight event if none were found in the look-back window. -- Only when both checks miss do you emit a fresh event, satisfying typical debounce requirements without extra state. -
    ### 🎯 Event Handler Return Values @@ -699,9 +718,62 @@ if recent_sync is not None: print('We already synced recently, skipping') ``` -##### `expect(event_type: str | Type[BaseEvent], timeout: float | None=None, predicate: Callable[[BaseEvent], bool]=None) -> BaseEvent | None` +##### `find(event_type: str | Type[BaseEvent], *, where: Callable[[BaseEvent], bool]=None, child_of: BaseEvent | None=None, past: bool | float=True, future: bool | float=True) -> BaseEvent | None` -Wait for a specific event to occur. +Find an event matching criteria in history and/or future. This is the recommended unified method for event lookup. + +**Parameters:** + +- `event_type`: The event type string or model class to find +- `where`: Predicate function for filtering (default: matches all) +- `child_of`: Only match events that are descendants of this parent event +- `past`: Controls history search behavior (default: `True`) + - `True`: search all history + - `False`: skip history search + - `float`: search events from last N seconds only +- `future`: Controls future wait behavior (default: `True`) + - `True`: wait forever for matching event + - `False`: don't wait for future events + - `float`: wait up to N seconds for matching event + +```python +# Search all history, wait up to 5s for future +event = await bus.find(ResponseEvent, past=True, future=5) + +# Search last 5s of history, wait forever +event = await bus.find(ResponseEvent, past=5, future=True) + +# Search last 5s of history, wait up to 5s +event = await bus.find(ResponseEvent, past=5, future=5) + +# Search all history only, don't wait (instant) +event = await bus.find(ResponseEvent, past=True, future=False) + +# Wait up to 5s for future only (ignore history) +event = await bus.find(ResponseEvent, past=False, future=5) + +# Find child of a specific parent event +child = await bus.find(ChildEvent, child_of=parent_event, future=5) + +# With custom filter +event = await bus.find(ResponseEvent, where=lambda e: e.status == 'success', future=5) +``` + +##### `expect(event_type: str | Type[BaseEvent], *, include: Callable=None, exclude: Callable=None, timeout: float | None=None, past: bool | float=False, child_of: BaseEvent | None=None) -> BaseEvent | None` + +Wait for a specific event to occur. This is a backwards-compatible wrapper around `find()`. + +**Parameters:** + +- `event_type`: The event type string or model class to wait for +- `include`: Filter function that must return `True` for the event to match +- `exclude`: Filter function that must return `False` for the event to match +- `timeout`: Maximum time to wait in seconds (None = wait forever). Maps to `future` parameter of `find()`. +- `past`: Controls history search behavior (default: `False`) + - `True`: search all history first + - `False`: skip history search + - `float`: search events from last N seconds +- `child_of`: Only match events that are descendants of this parent event ```python # Wait for any UserEvent @@ -710,13 +782,41 @@ event = await bus.expect('UserEvent', timeout=30) # Wait with custom filter event = await bus.expect( 'UserEvent', - predicate=lambda e: e.user_id == 'specific_user' + include=lambda e: e.user_id == 'specific_user', + timeout=30, ) +# Search history first, then wait +event = await bus.expect('UserEvent', past=True, timeout=30) + +# Search last 10 seconds of history, then wait +event = await bus.expect('UserEvent', past=10, timeout=30) + +# Find child event +child = await bus.expect(ChildEvent, child_of=parent_event, timeout=5) + if event is None: print('No matching event arrived within 30 seconds') ``` +##### `event_is_child_of(event: BaseEvent, ancestor: BaseEvent) -> bool` + +Check if event is a descendant of ancestor (child, grandchild, etc.). + +```python +if bus.event_is_child_of(child_event, parent_event): + print("child_event is a descendant of parent_event") +``` + +##### `event_is_parent_of(event: BaseEvent, descendant: BaseEvent) -> bool` + +Check if event is an ancestor of descendant (parent, grandparent, etc.). + +```python +if bus.event_is_parent_of(parent_event, child_event): + print("parent_event is an ancestor of child_event") +``` + ##### `wait_until_idle(timeout: float | None=None)` Wait until all events are processed and the bus is idle. diff --git a/bubus/service.py b/bubus/service.py index 9703442..ef7fbef 100644 --- a/bubus/service.py +++ b/bubus/service.py @@ -781,6 +781,8 @@ async def expect( exclude: Callable[[BaseEvent[Any] | T_ExpectedEvent], bool] = lambda _: False, predicate: Callable[[BaseEvent[Any] | T_ExpectedEvent], bool] = lambda _: True, timeout: float | None = None, + past: bool | float = False, + child_of: BaseEvent[Any] | None = None, ) -> T_ExpectedEvent | None: ... @overload @@ -791,6 +793,8 @@ async def expect( exclude: Callable[[BaseEvent[Any]], bool] = lambda _: False, predicate: Callable[[BaseEvent[Any]], bool] = lambda _: True, timeout: float | None = None, + past: bool | float = False, + child_of: BaseEvent[Any] | None = None, ) -> BaseEvent[Any] | None: ... async def expect( @@ -800,16 +804,26 @@ async def expect( exclude: Callable[[BaseEvent[Any]], bool] = lambda _: False, predicate: Callable[[BaseEvent[Any]], bool] = lambda _: True, timeout: float | None = None, + past: bool | float = False, + child_of: BaseEvent[Any] | None = None, ) -> BaseEvent[Any] | T_ExpectedEvent | None: """ Wait for an event matching the given type/pattern with optional filters. + This is a backwards-compatible wrapper around find(). For new code, consider + using find() directly for clearer semantics. + Args: event_type: The event type string or model class to wait for include: Filter function that must return True for the event to match (default: lambda e: True) exclude: Filter function that must return False for the event to match (default: lambda e: False) predicate: Deprecated name, alias for include (default: lambda e: True) timeout: Maximum time to wait in seconds as a float (None = wait forever) + past: Controls history search (default: False): + - True: search all history first + - False: skip history search + - float: search events from last N seconds + child_of: Only match events that are descendants of this parent event Returns: The first matching event, or None if no match arrives before the timeout @@ -831,46 +845,35 @@ async def expect( exclude=lambda e: e.error_code is not None, timeout=30 ) - """ - future: asyncio.Future[BaseEvent[Any]] = asyncio.Future() - - # Handle backwards compatibility: merge predicate into include - if predicate is not None: # type: ignore[conditionAlwaysTrue] - original_include = include - include = lambda e, orig=original_include, pred=predicate: orig(e) and pred(e) - - def notify_expect_handler(event: BaseEvent[Any]) -> None: - """Handler that resolves the future when a matching event is found""" - if not future.done() and include(event) and not exclude(event): - future.set_result(event) - - # make debugging otherwise ephemeral async expect handlers easier by including some metadata in the stacktrace func names - current_frame = inspect.currentframe() - assert current_frame - notify_expect_handler.__name__ = f'{self}.expect({event_type}, timeout={timeout})@{_log_pretty_path(current_frame.f_code.co_filename)}:{current_frame.f_lineno}' # add file and line number to the name - # Register temporary listener that watches for matching events and triggers the expect handler - self.on(event_type, notify_expect_handler) + # Search history first, then wait for future + response = await eventbus.expect( + 'ResponseEvent', + past=True, + timeout=30 + ) + """ + # Merge include/exclude/predicate into single where function for find() + def where(event: BaseEvent[Any]) -> bool: + if predicate is not None and not predicate(event): # type: ignore[truthy-function] + return False + if not include(event): + return False + if exclude(event): + return False + return True - # Ensure the temporary handler runs before user handlers so expect() resolves immediately after dispatch. - event_key = event_type.__name__ if isinstance(event_type, type) else str(event_type) - handlers_for_key = self.handlers.get(event_key) - if handlers_for_key and handlers_for_key[-1] is notify_expect_handler: - handlers_for_key.insert(0, handlers_for_key.pop()) + # Map timeout to future parameter: None -> True (wait forever), float -> float (wait N seconds) + future_param: bool | float = True if timeout is None else timeout - try: - # Wait for the future with optional timeout - if timeout is not None: - return await asyncio.wait_for(future, timeout=timeout) - else: - return await future - except asyncio.TimeoutError: - return None - finally: - # Clean up handler - event_key: str = event_type.__name__ if isinstance(event_type, type) else str(event_type) # pyright: ignore[reportUnknownMemberType, reportPartialTypeErrors] - if event_key in self.handlers and notify_expect_handler in self.handlers[event_key]: - self.handlers[event_key].remove(notify_expect_handler) + # Delegate to find() + return await self.find( + event_type, + where=where, + child_of=child_of, + past=past, + future=future_param, + ) @overload async def query( @@ -942,7 +945,198 @@ def combined_include(event: BaseEvent[Any]) -> bool: return None + def event_is_child_of(self, event: BaseEvent[Any], ancestor: BaseEvent[Any]) -> bool: + """ + Check if event is a descendant of ancestor (child, grandchild, etc.). + + Walks up the parent chain from event looking for ancestor. + Returns True if ancestor is found in the chain, False otherwise. + + Args: + event: The potential descendant event + ancestor: The potential ancestor event + + Returns: + True if event is a descendant of ancestor, False otherwise + """ + current_id = event.event_parent_id + visited: set[str] = set() + + while current_id and current_id not in visited: + if current_id == ancestor.event_id: + return True + visited.add(current_id) + + # Find parent event in any bus's history + parent = self.event_history.get(current_id) + if parent is None: + # Check other buses + for bus in list(EventBus.all_instances): + if bus is not self and current_id in bus.event_history: + parent = bus.event_history[current_id] + break + if parent is None: + break + current_id = parent.event_parent_id + + return False + + def event_is_parent_of(self, event: BaseEvent[Any], descendant: BaseEvent[Any]) -> bool: + """ + Check if event is an ancestor of descendant (parent, grandparent, etc.). + + This is the inverse of event_is_child_of. + + Args: + event: The potential ancestor event + descendant: The potential descendant event + + Returns: + True if event is an ancestor of descendant, False otherwise + """ + return self.event_is_child_of(descendant, event) + + @overload + async def find( + self, + event_type: type[T_ExpectedEvent], + where: Callable[[BaseEvent[Any] | T_ExpectedEvent], bool] = lambda _: True, + child_of: BaseEvent[Any] | None = None, + past: bool | float = True, + future: bool | float = True, + ) -> T_ExpectedEvent | None: ... + + @overload + async def find( + self, + event_type: PythonIdentifierStr, + where: Callable[[BaseEvent[Any]], bool] = lambda _: True, + child_of: BaseEvent[Any] | None = None, + past: bool | float = True, + future: bool | float = True, + ) -> BaseEvent[Any] | None: ... + + async def find( + self, + event_type: PythonIdentifierStr | type[T_ExpectedEvent], + where: Callable[[BaseEvent[Any]], bool] = lambda _: True, + child_of: BaseEvent[Any] | None = None, + past: bool | float = True, + future: bool | float = True, + ) -> BaseEvent[Any] | T_ExpectedEvent | None: + """ + Find an event matching criteria in history and/or future. + + This is a unified method that can search past event_history, wait for future + events, or both. Use this instead of separate query() and expect() calls. + + Args: + event_type: The event type string or model class to find + where: Predicate function for filtering (default: lambda _: True) + child_of: Only match events that are descendants of this parent event + past: Controls history search behavior: + - True: search all history + - False: skip history search + - float: search events from last N seconds only + future: Controls future wait behavior: + - True: wait forever for matching event + - False: don't wait for future events + - float: wait up to N seconds for matching event + + Returns: + Matching event or None if not found/timeout + + Examples: + # Search all history, wait up to 5s for future + event = await bus.find(EventType, past=True, future=5) + + # Search last 5s of history, wait forever + event = await bus.find(EventType, past=5, future=True) + + # Search last 5s of history, wait up to 5s + event = await bus.find(EventType, past=5, future=5) + + # Search all history instantly, don't wait (debouncing) + event = await bus.find(EventType, past=True, future=False) + + # Wait up to 5s for future only (like old expect) + event = await bus.find(EventType, past=False, future=5) + # Find child event that may have already fired + nav_event = await bus.dispatch(NavigateToUrlEvent(...)) + new_tab = await bus.find(TabCreatedEvent, child_of=nav_event, past=True, future=5) + """ + # If neither past nor future, return None immediately + if past is False and future is False: + return None + + # Build combined predicate including child_of check + def matches(event: BaseEvent[Any]) -> bool: + if not where(event): + return False + if child_of is not None and not self.event_is_child_of(event, child_of): + return False + return True + + # Search past history if enabled + if past is not False: + # Calculate cutoff time if past is a float (time window in seconds) + cutoff: datetime | None = None + if past is not True: # past is a float/int specifying time window + cutoff = datetime.now(UTC) - timedelta(seconds=float(past)) + + events = list(self.event_history.values()) + for event in reversed(events): + # Only match completed events in history + if event.event_completed_at is None: + continue + # Skip events older than cutoff (dispatched before the time window) + if cutoff is not None and event.event_created_at < cutoff: + continue + if not self._event_matches_pattern(event, event_type): + continue + if matches(event): + return event + + # If not searching future, return None + if future is False: + return None + + # Wait for future events using expect-like pattern + future_result: asyncio.Future[BaseEvent[Any]] = asyncio.Future() + + def notify_find_handler(event: BaseEvent[Any]) -> None: + """Handler that resolves the future when a matching event is found""" + if not future_result.done() and matches(event): + future_result.set_result(event) + + # Add debugging info to handler name + current_frame = inspect.currentframe() + assert current_frame + notify_find_handler.__name__ = f'{self}.find({event_type}, past={past}, future={future})@{_log_pretty_path(current_frame.f_code.co_filename)}:{current_frame.f_lineno}' + + # Register temporary listener + self.on(event_type, notify_find_handler) + + # Ensure the temporary handler runs before user handlers + event_key = event_type.__name__ if isinstance(event_type, type) else str(event_type) + handlers_for_key = self.handlers.get(event_key) + if handlers_for_key and handlers_for_key[-1] is notify_find_handler: + handlers_for_key.insert(0, handlers_for_key.pop()) + + try: + # Wait forever if future is True, otherwise wait up to N seconds + if future is True: + return await future_result + else: + return await asyncio.wait_for(future_result, timeout=float(future)) + except asyncio.TimeoutError: + return None + finally: + # Clean up handler + event_key = event_type.__name__ if isinstance(event_type, type) else str(event_type) + if event_key in self.handlers and notify_find_handler in self.handlers[event_key]: + self.handlers[event_key].remove(notify_find_handler) def _start(self) -> None: """Start the event bus if not already running""" diff --git a/tests/test_find.py b/tests/test_find.py new file mode 100644 index 0000000..510e418 --- /dev/null +++ b/tests/test_find.py @@ -0,0 +1,1382 @@ +""" +Tests for the unified find() method and tree traversal helpers. + +Addresses GitHub Issues #10 (debouncing) and #15 (expect past + child_of). +""" + +# pyright: reportUnknownMemberType=false +# pyright: reportUnknownLambdaType=false +# pyright: reportAttributeAccessIssue=false +# pyright: reportUnknownVariableType=false +# pyright: reportUnusedVariable=false + +import asyncio +from datetime import UTC, datetime + +import pytest + +from bubus import BaseEvent, EventBus + + +# Test event types +class ParentEvent(BaseEvent[str]): + pass + + +class ChildEvent(BaseEvent[str]): + pass + + +class GrandchildEvent(BaseEvent[str]): + pass + + +class UnrelatedEvent(BaseEvent[str]): + pass + + +class ScreenshotEvent(BaseEvent[str]): + """Example event for debouncing tests.""" + + target_id: str = '' + full_page: bool = False + + +class NavigateEvent(BaseEvent[str]): + """Example event for race condition tests.""" + + url: str = '' + + +class TabCreatedEvent(BaseEvent[str]): + """Example event that fires as result of navigation.""" + + tab_id: str = '' + + +# ============================================================================= +# Tree Traversal Helper Tests +# ============================================================================= + + +class TestEventIsChildOf: + """Tests for event_is_child_of() method.""" + + async def test_direct_child_returns_true(self): + """event_is_child_of returns True for direct parent-child relationship.""" + bus = EventBus(name='TestBus') + + try: + # Create parent-child relationship via dispatch inside handler + child_event_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + child = await bus.dispatch(ChildEvent()) + child_event_ref.append(child) + return 'parent_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, lambda e: 'child_done') + + parent = await bus.dispatch(ParentEvent()) + await bus.wait_until_idle() + + child = child_event_ref[0] + + # Verify the relationship + assert bus.event_is_child_of(child, parent) is True + + finally: + await bus.stop(clear=True) + + async def test_grandchild_returns_true(self): + """event_is_child_of returns True for grandparent relationship.""" + bus = EventBus(name='TestBus') + + try: + grandchild_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + await bus.dispatch(ChildEvent()) + return 'parent_done' + + async def child_handler(event: ChildEvent) -> str: + grandchild = await bus.dispatch(GrandchildEvent()) + grandchild_ref.append(grandchild) + return 'child_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, child_handler) + bus.on(GrandchildEvent, lambda e: 'grandchild_done') + + parent = await bus.dispatch(ParentEvent()) + await bus.wait_until_idle() + + grandchild = grandchild_ref[0] + + # Grandchild should be descendant of parent + assert bus.event_is_child_of(grandchild, parent) is True + + finally: + await bus.stop(clear=True) + + async def test_unrelated_events_returns_false(self): + """event_is_child_of returns False for unrelated events.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ParentEvent, lambda e: 'parent_done') + bus.on(UnrelatedEvent, lambda e: 'unrelated_done') + + parent = await bus.dispatch(ParentEvent()) + unrelated = await bus.dispatch(UnrelatedEvent()) + + assert bus.event_is_child_of(unrelated, parent) is False + + finally: + await bus.stop(clear=True) + + async def test_same_event_returns_false(self): + """event_is_child_of returns False when checking event against itself.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ParentEvent, lambda e: 'done') + + event = await bus.dispatch(ParentEvent()) + + assert bus.event_is_child_of(event, event) is False + + finally: + await bus.stop(clear=True) + + async def test_reversed_relationship_returns_false(self): + """event_is_child_of returns False when parent/child are reversed.""" + bus = EventBus(name='TestBus') + + try: + child_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + child = await bus.dispatch(ChildEvent()) + child_ref.append(child) + return 'parent_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, lambda e: 'child_done') + + parent = await bus.dispatch(ParentEvent()) + await bus.wait_until_idle() + + child = child_ref[0] + + # Parent is NOT a child of child + assert bus.event_is_child_of(parent, child) is False + + finally: + await bus.stop(clear=True) + + +class TestEventIsParentOf: + """Tests for event_is_parent_of() method.""" + + async def test_direct_parent_returns_true(self): + """event_is_parent_of returns True for direct parent-child relationship.""" + bus = EventBus(name='TestBus') + + try: + child_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + child = await bus.dispatch(ChildEvent()) + child_ref.append(child) + return 'parent_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, lambda e: 'child_done') + + parent = await bus.dispatch(ParentEvent()) + await bus.wait_until_idle() + + child = child_ref[0] + + # Parent IS parent of child + assert bus.event_is_parent_of(parent, child) is True + + finally: + await bus.stop(clear=True) + + async def test_grandparent_returns_true(self): + """event_is_parent_of returns True for grandparent relationship.""" + bus = EventBus(name='TestBus') + + try: + grandchild_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + await bus.dispatch(ChildEvent()) + return 'parent_done' + + async def child_handler(event: ChildEvent) -> str: + grandchild = await bus.dispatch(GrandchildEvent()) + grandchild_ref.append(grandchild) + return 'child_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, child_handler) + bus.on(GrandchildEvent, lambda e: 'grandchild_done') + + parent = await bus.dispatch(ParentEvent()) + await bus.wait_until_idle() + + grandchild = grandchild_ref[0] + + # Parent IS ancestor of grandchild + assert bus.event_is_parent_of(parent, grandchild) is True + + finally: + await bus.stop(clear=True) + + +# ============================================================================= +# find() Basic Functionality Tests +# ============================================================================= + + +class TestFindPastOnly: + """Tests for find(past=True, future=False) - equivalent to query().""" + + async def test_returns_matching_event_from_history(self): + """find(past=True, future=False) returns event from history.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch event first + dispatched = await bus.dispatch(ParentEvent()) + + # Find it in history (past=True = search all history) + found = await bus.find(ParentEvent, past=True, future=False) + + assert found is not None + assert found.event_id == dispatched.event_id + + finally: + await bus.stop(clear=True) + + async def test_past_float_filters_by_time_window(self): + """find(past=0.1) only returns events from last 0.1 seconds.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch an event + _old_event = await bus.dispatch(ParentEvent()) + + # Wait a bit + await asyncio.sleep(0.15) + + # Dispatch another event + new_event = await bus.dispatch(ParentEvent()) + + # With a very short past window, should only find the new event + found = await bus.find(ParentEvent, past=0.1, future=False) + assert found is not None + assert found.event_id == new_event.event_id + + # With a longer past window, should still find new event (most recent first) + found = await bus.find(ParentEvent, past=1.0, future=False) + assert found is not None + assert found.event_id == new_event.event_id + + finally: + await bus.stop(clear=True) + + async def test_past_float_returns_none_when_all_events_too_old(self): + """find(past=0.05) returns None if all events are older than 0.05 seconds.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch an event + await bus.dispatch(ParentEvent()) + + # Wait longer than our window + await asyncio.sleep(0.15) + + # With very short past window, should find nothing + found = await bus.find(ParentEvent, past=0.05, future=False) + assert found is None + + finally: + await bus.stop(clear=True) + + async def test_returns_none_when_no_match(self): + """find(past=True, future=False) returns None when no matching event.""" + bus = EventBus(name='TestBus') + + try: + # No events dispatched + found = await bus.find(ParentEvent, past=True, future=False) + + assert found is None + + finally: + await bus.stop(clear=True) + + async def test_respects_where_filter(self): + """find() applies where filter correctly.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + # Dispatch two events with different target_ids + await bus.dispatch(ScreenshotEvent(target_id='tab1')) + event2 = await bus.dispatch(ScreenshotEvent(target_id='tab2')) + + # Find only the one with target_id='tab2' + found = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'tab2', + past=True, + future=False, + ) + + assert found is not None + assert found.event_id == event2.event_id + + finally: + await bus.stop(clear=True) + + async def test_returns_most_recent_match(self): + """find() returns most recent matching event from history.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch multiple events + await bus.dispatch(ParentEvent()) + await asyncio.sleep(0.01) # Ensure different timestamps + event2 = await bus.dispatch(ParentEvent()) + + # Should return the most recent + found = await bus.find(ParentEvent, past=True, future=False) + + assert found is not None + assert found.event_id == event2.event_id + + finally: + await bus.stop(clear=True) + + +class TestFindFutureOnly: + """Tests for find(past=False, future=...) - equivalent to expect().""" + + async def test_waits_for_future_event(self): + """find(past=False, future=1) waits for event to be dispatched.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Start waiting for event + async def dispatch_after_delay(): + await asyncio.sleep(0.05) + return await bus.dispatch(ParentEvent()) + + find_task = asyncio.create_task( + bus.find(ParentEvent, past=False, future=1) + ) + dispatch_task = asyncio.create_task(dispatch_after_delay()) + + found, dispatched = await asyncio.gather(find_task, dispatch_task) + + assert found is not None + assert found.event_id == dispatched.event_id + + finally: + await bus.stop(clear=True) + + async def test_future_float_timeout(self): + """find(future=0.01) times out quickly when no event.""" + bus = EventBus(name='TestBus') + + try: + start = datetime.now(UTC) + found = await bus.find(ParentEvent, past=False, future=0.01) + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert found is None + assert elapsed < 0.1 # Should timeout quickly + + finally: + await bus.stop(clear=True) + + async def test_ignores_past_events(self): + """find(past=False, future=...) ignores events already in history.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch event first + await bus.dispatch(ParentEvent()) + + # Should NOT find it (past=False), and timeout quickly + found = await bus.find(ParentEvent, past=False, future=0.01) + + assert found is None + + finally: + await bus.stop(clear=True) + + +class TestFindNeitherPastNorFuture: + """Tests for find(past=False, future=False) - should return None.""" + + async def test_returns_none_immediately(self): + """find(past=False, future=False) returns None immediately.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch event + await bus.dispatch(ParentEvent()) + + # With both past and future disabled, should return None + start = datetime.now(UTC) + found = await bus.find(ParentEvent, past=False, future=False) + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert found is None + assert elapsed < 0.1 # Should be instant + + finally: + await bus.stop(clear=True) + + +class TestFindPastAndFuture: + """Tests for find(past=..., future=...) - combined search.""" + + async def test_returns_past_event_immediately(self): + """find(past=True, future=5) returns past event without waiting.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch event first + dispatched = await bus.dispatch(ParentEvent()) + + # Should find it immediately from history + start = datetime.now(UTC) + found = await bus.find(ParentEvent, past=True, future=5) + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert found is not None + assert found.event_id == dispatched.event_id + assert elapsed < 0.1 # Should be nearly instant + + finally: + await bus.stop(clear=True) + + async def test_waits_for_future_when_no_past_match(self): + """find(past=True, future=1) waits for future if no past match.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ChildEvent, lambda e: 'done') + + # Different event type in history + bus.on(ParentEvent, lambda e: 'done') + await bus.dispatch(ParentEvent()) + + # Start waiting for ChildEvent (not in history) + async def dispatch_after_delay(): + await asyncio.sleep(0.05) + return await bus.dispatch(ChildEvent()) + + find_task = asyncio.create_task( + bus.find(ChildEvent, past=True, future=1) + ) + dispatch_task = asyncio.create_task(dispatch_after_delay()) + + found, dispatched = await asyncio.gather(find_task, dispatch_task) + + assert found is not None + assert found.event_id == dispatched.event_id + + finally: + await bus.stop(clear=True) + + async def test_past_and_future_independent_control(self): + """past=0.05, future=0.05 uses different windows for each.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch an old event + await bus.dispatch(ParentEvent()) + await asyncio.sleep(0.15) + + # With short past window (0.05s), old event won't be found + # With short future window (0.05s), will timeout + start = datetime.now(UTC) + found = await bus.find(ParentEvent, past=0.05, future=0.05) + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert found is None + # Should have waited ~0.05s for future + assert 0.04 < elapsed < 0.15 + + finally: + await bus.stop(clear=True) + + async def test_past_true_future_float(self): + """past=True searches all history, future=0.1 waits up to 0.1s.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch an old event + dispatched = await bus.dispatch(ParentEvent()) + await asyncio.sleep(0.15) + + # past=True should find the old event (no time window) + found = await bus.find(ParentEvent, past=True, future=0.1) + + assert found is not None + assert found.event_id == dispatched.event_id + + finally: + await bus.stop(clear=True) + + async def test_past_float_future_true_would_wait_forever(self): + """past=0.05 with old events + future=True - verify past window works.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch an old event + await bus.dispatch(ParentEvent()) + await asyncio.sleep(0.15) + + # past=0.05 won't find old event, but we dispatch a new one + async def dispatch_after_delay(): + await asyncio.sleep(0.05) + return await bus.dispatch(ParentEvent()) + + find_task = asyncio.create_task( + bus.find(ParentEvent, past=0.05, future=1) + ) + dispatch_task = asyncio.create_task(dispatch_after_delay()) + + found, dispatched = await asyncio.gather(find_task, dispatch_task) + + # Should find the new event from future wait + assert found is not None + assert found.event_id == dispatched.event_id + + finally: + await bus.stop(clear=True) + + +# ============================================================================= +# find() with child_of Tests +# ============================================================================= + + +class TestFindWithChildOf: + """Tests for find() with child_of parameter.""" + + async def test_returns_child_of_specified_parent(self): + """find(child_of=parent) returns event that is child of parent.""" + bus = EventBus(name='TestBus') + + try: + child_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + child = await bus.dispatch(ChildEvent()) + child_ref.append(child) + return 'parent_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, lambda e: 'child_done') + + parent = await bus.dispatch(ParentEvent()) + await bus.wait_until_idle() + + # Find child of parent + found = await bus.find(ChildEvent, child_of=parent, past=True, future=False) + + assert found is not None + assert found.event_id == child_ref[0].event_id + + finally: + await bus.stop(clear=True) + + async def test_returns_none_for_non_child(self): + """find(child_of=parent) returns None if event is not a child.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ParentEvent, lambda e: 'parent_done') + bus.on(UnrelatedEvent, lambda e: 'unrelated_done') + + parent = await bus.dispatch(ParentEvent()) + await bus.dispatch(UnrelatedEvent()) + + # Should not find UnrelatedEvent as child of parent + found = await bus.find( + UnrelatedEvent, child_of=parent, past=True, future=False + ) + + assert found is None + + finally: + await bus.stop(clear=True) + + async def test_finds_grandchild(self): + """find(child_of=grandparent) returns grandchild event.""" + bus = EventBus(name='TestBus') + + try: + grandchild_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + await bus.dispatch(ChildEvent()) + return 'parent_done' + + async def child_handler(event: ChildEvent) -> str: + grandchild = await bus.dispatch(GrandchildEvent()) + grandchild_ref.append(grandchild) + return 'child_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, child_handler) + bus.on(GrandchildEvent, lambda e: 'grandchild_done') + + parent = await bus.dispatch(ParentEvent()) + await bus.wait_until_idle() + + # Find grandchild of parent + found = await bus.find( + GrandchildEvent, child_of=parent, past=True, future=False + ) + + assert found is not None + assert found.event_id == grandchild_ref[0].event_id + + finally: + await bus.stop(clear=True) + + async def test_child_of_works_across_forwarded_buses(self): + """find(child_of=parent) works when events are forwarded across buses.""" + main_bus = EventBus(name='MainBus') + auth_bus = EventBus(name='AuthBus') + + try: + child_ref: list[BaseEvent] = [] + + # Forward ParentEvent from main_bus to auth_bus + main_bus.on(ParentEvent, auth_bus.dispatch) + + # auth_bus handles ParentEvent and dispatches a ChildEvent + async def auth_handler(event: ParentEvent) -> str: + child = await auth_bus.dispatch(ChildEvent()) + child_ref.append(child) + return 'auth_done' + + auth_bus.on(ParentEvent, auth_handler) + auth_bus.on(ChildEvent, lambda e: 'child_done') + + # Dispatch on main_bus, which forwards to auth_bus + parent = await main_bus.dispatch(ParentEvent()) + await main_bus.wait_until_idle() + await auth_bus.wait_until_idle() + + # Find child event on auth_bus using parent from main_bus + found = await auth_bus.find( + ChildEvent, child_of=parent, past=5, future=5 + ) + + assert found is not None + assert found.event_id == child_ref[0].event_id + + finally: + await main_bus.stop(clear=True) + await auth_bus.stop(clear=True) + + +# ============================================================================= +# expect() Backwards Compatibility Tests +# ============================================================================= + + +class TestExpectBackwardsCompatibility: + """Tests to ensure expect() still works with old API.""" + + async def test_expect_waits_for_future_event(self): + """expect() still waits for future events (existing behavior).""" + bus = EventBus(name='TestBus') + + try: + bus.on(ParentEvent, lambda e: 'done') + + async def dispatch_after_delay(): + await asyncio.sleep(0.05) + return await bus.dispatch(ParentEvent()) + + expect_task = asyncio.create_task(bus.expect(ParentEvent, timeout=1)) + dispatch_task = asyncio.create_task(dispatch_after_delay()) + + found, dispatched = await asyncio.gather(expect_task, dispatch_task) + + assert found is not None + assert found.event_id == dispatched.event_id + + finally: + await bus.stop(clear=True) + + async def test_expect_with_include_filter(self): + """expect() with include parameter still works.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + async def dispatch_events(): + await asyncio.sleep(0.02) + await bus.dispatch(ScreenshotEvent(target_id='wrong')) + await asyncio.sleep(0.02) + return await bus.dispatch(ScreenshotEvent(target_id='correct')) + + expect_task = asyncio.create_task( + bus.expect( + ScreenshotEvent, + include=lambda e: e.target_id == 'correct', + timeout=1, + ) + ) + dispatch_task = asyncio.create_task(dispatch_events()) + + found, dispatched = await asyncio.gather(expect_task, dispatch_task) + + assert found is not None + assert found.target_id == 'correct' + + finally: + await bus.stop(clear=True) + + async def test_expect_with_exclude_filter(self): + """expect() with exclude parameter still works.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + async def dispatch_events(): + await asyncio.sleep(0.02) + await bus.dispatch(ScreenshotEvent(target_id='excluded')) + await asyncio.sleep(0.02) + return await bus.dispatch(ScreenshotEvent(target_id='included')) + + expect_task = asyncio.create_task( + bus.expect( + ScreenshotEvent, + exclude=lambda e: e.target_id == 'excluded', + timeout=1, + ) + ) + dispatch_task = asyncio.create_task(dispatch_events()) + + found, dispatched = await asyncio.gather(expect_task, dispatch_task) + + assert found is not None + assert found.target_id == 'included' + + finally: + await bus.stop(clear=True) + + async def test_expect_with_past_true(self): + """expect(past=True) finds already-dispatched events.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch event first + dispatched = await bus.dispatch(ParentEvent()) + + # expect with past=True should find it + found = await bus.expect(ParentEvent, past=True, timeout=5) + + assert found is not None + assert found.event_id == dispatched.event_id + + finally: + await bus.stop(clear=True) + + async def test_expect_with_past_float(self): + """expect(past=5.0) searches last 5 seconds of history.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch event first + dispatched = await bus.dispatch(ParentEvent()) + + # expect with past=5.0 should find recent event + found = await bus.expect(ParentEvent, past=5.0, timeout=1) + + assert found is not None + assert found.event_id == dispatched.event_id + + finally: + await bus.stop(clear=True) + + async def test_expect_with_child_of(self): + """expect(child_of=parent) filters by parent relationship.""" + bus = EventBus(name='TestBus') + + try: + child_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + child = await bus.dispatch(ChildEvent()) + child_ref.append(child) + return 'parent_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, lambda e: 'child_done') + + parent = await bus.dispatch(ParentEvent()) + await bus.wait_until_idle() + + # expect with child_of and past=True + found = await bus.expect(ChildEvent, child_of=parent, past=True, timeout=5) + + assert found is not None + assert found.event_id == child_ref[0].event_id + + finally: + await bus.stop(clear=True) + + +# ============================================================================= +# Debouncing Pattern Tests (Issue #10) +# ============================================================================= + + +class TestDebouncingPattern: + """Tests for the debouncing pattern: find() or dispatch().""" + + async def test_returns_existing_fresh_event(self): + """Pattern returns existing event when fresh.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + # Dispatch a screenshot + original = await bus.dispatch(ScreenshotEvent(target_id='tab1')) + + # Use debouncing pattern - should return the existing event + is_fresh = lambda e: (datetime.now(UTC) - e.event_completed_at).seconds < 5 + result = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'tab1' and is_fresh(e), + past=True, + future=False, + ) or await bus.dispatch(ScreenshotEvent(target_id='tab1')) + + assert result.event_id == original.event_id + + finally: + await bus.stop(clear=True) + + async def test_dispatches_new_when_no_match(self): + """Pattern dispatches new event when no matching event in history.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + # No existing events - should dispatch new + result = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'tab1', + past=True, + future=False, + ) or await bus.dispatch(ScreenshotEvent(target_id='tab1')) + + assert result is not None + assert result.target_id == 'tab1' + assert result.event_status == 'completed' + + finally: + await bus.stop(clear=True) + + async def test_dispatches_new_when_stale(self): + """Pattern dispatches new event when existing is stale.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + # Dispatch an event + await bus.dispatch(ScreenshotEvent(target_id='tab1')) + + # Filter that marks all events as stale + is_fresh = lambda e: False # Nothing is fresh + + result = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'tab1' and is_fresh(e), + past=True, + future=False, + ) or await bus.dispatch(ScreenshotEvent(target_id='tab1')) + + # Should be a new event (different ID) + assert result is not None + # Both events should be in history now + screenshots = [ + e for e in bus.event_history.values() if isinstance(e, ScreenshotEvent) + ] + assert len(screenshots) == 2 + + finally: + await bus.stop(clear=True) + + async def test_find_past_only_returns_immediately_without_waiting(self): + """find(past=True, future=False) returns immediately, never waits.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ParentEvent, lambda e: 'done') + + # No events in history - find should return None instantly + start = datetime.now(UTC) + result = await bus.find(ParentEvent, past=True, future=False) + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert result is None + assert elapsed < 0.05 # Should be nearly instant (< 50ms) + + finally: + await bus.stop(clear=True) + + async def test_find_past_float_returns_immediately_without_waiting(self): + """find(past=5, future=False) returns immediately, never waits.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ParentEvent, lambda e: 'done') + + # No events in history - find should return None instantly + start = datetime.now(UTC) + result = await bus.find(ParentEvent, past=5, future=False) + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert result is None + assert elapsed < 0.05 # Should be nearly instant (< 50ms) + + finally: + await bus.stop(clear=True) + + async def test_or_chain_without_waiting_finds_existing(self): + """Or-chain pattern finds existing events without blocking.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + # Dispatch first event + original = await bus.dispatch(ScreenshotEvent(target_id='tab1')) + + # Or-chain should find existing event instantly + start = datetime.now(UTC) + result = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'tab1', + past=True, + future=False, + ) or await bus.dispatch(ScreenshotEvent(target_id='tab1')) + elapsed = (datetime.now(UTC) - start).total_seconds() + + # Should return existing event + assert result.event_id == original.event_id + # Should be fast (no waiting) + assert elapsed < 0.1 + + finally: + await bus.stop(clear=True) + + async def test_or_chain_without_waiting_dispatches_when_no_match(self): + """Or-chain pattern dispatches new event when no match, still fast.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + # No matching events - should dispatch new one + start = datetime.now(UTC) + result = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'tab1', + past=True, + future=False, + ) or await bus.dispatch(ScreenshotEvent(target_id='tab1')) + elapsed = (datetime.now(UTC) - start).total_seconds() + + # Should have dispatched new event + assert result is not None + assert result.target_id == 'tab1' + # Should be fast (find returned None immediately, then dispatch ran) + assert elapsed < 0.1 + + finally: + await bus.stop(clear=True) + + async def test_or_chain_multiple_sequential_lookups(self): + """Multiple or-chain lookups work without blocking.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + # Multiple sequential debouncing calls + start = datetime.now(UTC) + + # First call - dispatches new + result1 = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'tab1', + past=True, + future=False, + ) or await bus.dispatch(ScreenshotEvent(target_id='tab1')) + + # Second call - finds existing + result2 = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'tab1', + past=True, + future=False, + ) or await bus.dispatch(ScreenshotEvent(target_id='tab1')) + + # Third call - dispatches new (different target) + result3 = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'tab2', + past=True, + future=False, + ) or await bus.dispatch(ScreenshotEvent(target_id='tab2')) + + elapsed = (datetime.now(UTC) - start).total_seconds() + + # First two should be same event + assert result1.event_id == result2.event_id + # Third should be different + assert result3.event_id != result1.event_id + assert result3.target_id == 'tab2' + # All operations should be fast + assert elapsed < 0.2 + + finally: + await bus.stop(clear=True) + + async def test_find_without_await_is_a_coroutine(self): + """find() without await returns a coroutine that can be awaited.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Call find without await - should return a coroutine + coro = bus.find(ParentEvent, past=True, future=False) + + # Verify it's a coroutine + import inspect + + assert inspect.iscoroutine(coro) + + # Now await it + result = await coro + + assert result is None + + finally: + await bus.stop(clear=True) + + +# ============================================================================= +# Race Condition Fix Tests (Issue #15) +# ============================================================================= + + +class TestRaceConditionFix: + """Tests for the race condition fix where event fires before expect().""" + + async def test_find_catches_already_fired_event(self): + """find(past=True) catches event that fired before the call.""" + bus = EventBus(name='TestBus') + + try: + tab_ref: list[BaseEvent] = [] + + async def navigate_handler(event: NavigateEvent) -> str: + # This synchronously creates the tab event + tab = await bus.dispatch(TabCreatedEvent(tab_id='new_tab')) + tab_ref.append(tab) + return 'navigate_done' + + bus.on(NavigateEvent, navigate_handler) + bus.on(TabCreatedEvent, lambda e: 'tab_created') + + # Dispatch navigation - tab event fires during handler + nav_event = await bus.dispatch(NavigateEvent(url='https://example.com')) + + # By now TabCreatedEvent has already fired + # Using find(past=True) should catch it + found = await bus.find( + TabCreatedEvent, child_of=nav_event, past=True, future=False + ) + + assert found is not None + assert found.event_id == tab_ref[0].event_id + + finally: + await bus.stop(clear=True) + + async def test_child_of_filters_to_correct_parent(self): + """child_of correctly filters to events from the right parent.""" + bus = EventBus(name='TestBus') + + try: + async def navigate_handler(event: NavigateEvent) -> str: + await bus.dispatch(TabCreatedEvent(tab_id=f'tab_for_{event.url}')) + return 'navigate_done' + + bus.on(NavigateEvent, navigate_handler) + bus.on(TabCreatedEvent, lambda e: 'tab_created') + + # Two navigations, each creates a tab + nav1 = await bus.dispatch(NavigateEvent(url='site1')) + nav2 = await bus.dispatch(NavigateEvent(url='site2')) + + # Find tab created by nav1 specifically + tab1 = await bus.find( + TabCreatedEvent, child_of=nav1, past=True, future=False + ) + + # Find tab created by nav2 specifically + tab2 = await bus.find( + TabCreatedEvent, child_of=nav2, past=True, future=False + ) + + assert tab1 is not None + assert tab2 is not None + assert tab1.tab_id == 'tab_for_site1' + assert tab2.tab_id == 'tab_for_site2' + + finally: + await bus.stop(clear=True) + + +# ============================================================================= +# New Parameter Combination Tests +# ============================================================================= + + +class TestNewParameterCombinations: + """Tests for the new bool | float parameter combinations.""" + + async def test_past_true_future_false_searches_all_history(self): + """past=True, future=False searches all history instantly.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch event and wait + dispatched = await bus.dispatch(ParentEvent()) + await asyncio.sleep(0.1) + + # Should find old event with past=True + found = await bus.find(ParentEvent, past=True, future=False) + assert found is not None + assert found.event_id == dispatched.event_id + + finally: + await bus.stop(clear=True) + + async def test_past_float_future_false_filters_by_age(self): + """past=0.05, future=False only searches last 0.05 seconds.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch event + await bus.dispatch(ParentEvent()) + await asyncio.sleep(0.1) # Make it old + + # past=0.05 means "events in last 0.05 seconds" = nothing old + found = await bus.find(ParentEvent, past=0.05, future=False) + assert found is None + + finally: + await bus.stop(clear=True) + + async def test_past_false_future_float_waits_for_timeout(self): + """past=False, future=0.05 waits up to 0.05 seconds.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ParentEvent, lambda e: 'done') + + start = datetime.now(UTC) + found = await bus.find(ParentEvent, past=False, future=0.05) + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert found is None + assert 0.04 < elapsed < 0.15 # Should wait ~0.05s + + finally: + await bus.stop(clear=True) + + async def test_past_true_future_true_searches_all_and_waits_forever(self): + """past=True, future=True searches all history, would wait forever.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch an old event + dispatched = await bus.dispatch(ParentEvent()) + await asyncio.sleep(0.1) + + # past=True should find the old event immediately + start = datetime.now(UTC) + found = await bus.find(ParentEvent, past=True, future=True) + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert found is not None + assert found.event_id == dispatched.event_id + assert elapsed < 0.1 # Should be instant (found in past) + + finally: + await bus.stop(clear=True) + + async def test_find_with_where_and_past_float(self): + """where filter combined with past=float works correctly.""" + bus = EventBus(name='TestBus') + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + # Dispatch events with different target_ids + await bus.dispatch(ScreenshotEvent(target_id='tab1')) + await asyncio.sleep(0.15) + event2 = await bus.dispatch(ScreenshotEvent(target_id='tab2')) + + # Find with both where filter and past window + found = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'tab2', + past=0.1, # Only search last 0.1 seconds + future=False, + ) + assert found is not None + assert found.event_id == event2.event_id + + # tab1 is too old for the past window + found = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'tab1', + past=0.1, + future=False, + ) + assert found is None + + finally: + await bus.stop(clear=True) + + async def test_find_with_child_of_and_past_float(self): + """child_of filter combined with past=float works correctly.""" + bus = EventBus(name='TestBus') + + try: + child_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + child = await bus.dispatch(ChildEvent()) + child_ref.append(child) + return 'done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, lambda e: 'done') + + parent = await bus.dispatch(ParentEvent()) + await bus.wait_until_idle() + + # Find child with past window - should work since event is fresh + found = await bus.find( + ChildEvent, + child_of=parent, + past=5, # 5 second window + future=False, + ) + assert found is not None + assert found.event_id == child_ref[0].event_id + + finally: + await bus.stop(clear=True) + + async def test_find_with_all_parameters(self): + """All parameters combined work correctly.""" + bus = EventBus(name='TestBus') + + try: + child_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + child = await bus.dispatch(ScreenshotEvent(target_id='child_tab')) + child_ref.append(child) + return 'done' + + bus.on(ParentEvent, parent_handler) + bus.on(ScreenshotEvent, lambda e: 'done') + + parent = await bus.dispatch(ParentEvent()) + await bus.wait_until_idle() + + # Find with all parameters + found = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'child_tab', + child_of=parent, + past=5, + future=False, + ) + assert found is not None + assert found.event_id == child_ref[0].event_id + assert found.target_id == 'child_tab' + + finally: + await bus.stop(clear=True) + + +if __name__ == '__main__': + pytest.main([__file__, '-v', '-s']) From fb3f4bb430be3c4bd9765d9d29b3bbbc294c69b6 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Mon, 8 Dec 2025 16:10:55 -0800 Subject: [PATCH 32/79] fix bus name conflict checking and determinism around gc --- bubus/models.py | 16 +++-- bubus/service.py | 31 ++++---- tests/test_comprehensive_patterns.py | 2 + tests/test_context_propagation.py | 3 + tests/test_name_conflict_gc.py | 104 ++++++++++++++++----------- tests/test_typed_event_results.py | 3 + 6 files changed, 92 insertions(+), 67 deletions(-) diff --git a/bubus/models.py b/bubus/models.py index 5d28026..438d4d4 100644 --- a/bubus/models.py +++ b/bubus/models.py @@ -3,6 +3,7 @@ import inspect import logging import os +from collections import deque from collections.abc import Awaitable, Callable, Generator from datetime import UTC, datetime from typing import TYPE_CHECKING, Annotated, Any, ClassVar, Generic, Literal, Protocol, Self, TypeAlias, cast, runtime_checkable @@ -282,7 +283,8 @@ def __str__(self) -> str: def _remove_self_from_queue(self, bus: 'EventBus') -> bool: """Remove this event from the bus's queue if present. Returns True if removed.""" if bus and bus.event_queue and hasattr(bus.event_queue, '_queue'): - queue = bus.event_queue._queue + # Access internal deque of asyncio.Queue (implementation detail) + queue = cast(deque[BaseEvent[Any]], bus.event_queue._queue) # type: ignore[attr-defined] if self in queue: queue.remove(self) return True @@ -304,8 +306,12 @@ async def _process_self_on_all_buses(self) -> None: max_iterations = 1000 # Prevent infinite loops iterations = 0 + # Cache the signal - in async context it will always be created + completed_signal = self.event_completed_signal + assert completed_signal is not None, 'event_completed_signal should exist in async context' + try: - while not self.event_completed_signal.is_set() and iterations < max_iterations: + while not completed_signal.is_set() and iterations < max_iterations: iterations += 1 processed_any = False @@ -322,10 +328,10 @@ async def _process_self_on_all_buses(self) -> None: processed_any = True # Check if we're done after processing - if self.event_completed_signal.is_set(): + if completed_signal.is_set(): break - if self.event_completed_signal.is_set(): + if completed_signal.is_set(): break if not processed_any: @@ -1102,7 +1108,7 @@ def sync_handler_with_context() -> Any: holds_global_lock.set(True) tokens = _enter_handler_context_callable(event, self.handler_id) try: - return handler(event) + return handler(event) # type: ignore[call-arg] # protocol allows _self param but we dont need it because it's already bound finally: _exit_handler_context_callable(tokens) diff --git a/bubus/service.py b/bubus/service.py index ef7fbef..f86fb3b 100644 --- a/bubus/service.py +++ b/bubus/service.py @@ -350,23 +350,10 @@ def __init__( for existing_bus in list(EventBus.all_instances): # Make a list copy to avoid modification during iteration if existing_bus is not self and existing_bus.name == self.name: - # Try to trigger collection of just this object by checking if it's collectable - # First, temporarily remove from WeakSet to see if that was the only reference - EventBus.all_instances.discard(existing_bus) - - # Check if the object is still reachable by creating a new weak reference - # If the object only existed in the WeakSet, it should be unreachable now - try: - # Try to access an attribute to see if the object is still valid - _ = existing_bus.name # This will work if object is still alive - - # Object is still alive with real references, restore to WeakSet - EventBus.all_instances.add(existing_bus) - conflicting_buses.append(existing_bus) - except Exception: - # Object was garbage collected or is invalid (e.g., AttributeError), that's fine - # Don't re-add to WeakSet, let it stay removed - pass + # Since stop() renames buses to _stopped_{id}, any bus with a matching + # user-specified name is either running or never-started - both should + # be considered conflicts. This makes name conflict detection deterministic. + conflicting_buses.append(existing_bus) # If we found conflicting buses, auto-generate a unique suffix if conflicting_buses: @@ -687,8 +674,8 @@ def dispatch(self, event: T_ExpectedEvent) -> T_ExpectedEvent: # Capture dispatch-time context for propagation to handlers (GitHub issue #20) # This ensures ContextVars set before dispatch() are accessible in handlers - if event._event_dispatch_context is None: - event._event_dispatch_context = contextvars.copy_context() + if event._event_dispatch_context is None: # pyright: ignore[reportPrivateUsage] + event._event_dispatch_context = contextvars.copy_context() # pyright: ignore[reportPrivateUsage] # Track child events - if we're inside a handler, add this event to the handler's event_children list # Only track if this is a NEW event (not forwarding an existing event) @@ -1243,10 +1230,16 @@ async def stop(self, timeout: float | None = None, clear: bool = False) -> None: if self._on_idle: self._on_idle.set() + # Rename the bus to release the name. This ensures stopped buses don't + # cause name conflicts with new buses using the same name. This makes + # name conflict detection deterministic (not dependent on GC timing). + self.name = f'_stopped_{self.id[-8:]}' + # Clear event history and handlers if requested (for memory cleanup) if clear: self.event_history.clear() self.handlers.clear() + # Remove from global instance tracking if self in EventBus.all_instances: EventBus.all_instances.discard(self) diff --git a/tests/test_comprehensive_patterns.py b/tests/test_comprehensive_patterns.py index f39c0fd..cd86ae9 100644 --- a/tests/test_comprehensive_patterns.py +++ b/tests/test_comprehensive_patterns.py @@ -1,5 +1,7 @@ """Test comprehensive event patterns including forwarding, async/sync dispatch, and parent-child tracking.""" +# pyright: reportUnusedVariable=false + import asyncio from typing import Any diff --git a/tests/test_context_propagation.py b/tests/test_context_propagation.py index 36261a4..dd597e6 100644 --- a/tests/test_context_propagation.py +++ b/tests/test_context_propagation.py @@ -9,6 +9,9 @@ from the queue and handlers are executed). """ +# pyright: reportUnusedVariable=false +# pyright: reportUnusedFunction=false + import asyncio from contextvars import ContextVar from typing import Any diff --git a/tests/test_name_conflict_gc.py b/tests/test_name_conflict_gc.py index 0e42655..d136623 100644 --- a/tests/test_name_conflict_gc.py +++ b/tests/test_name_conflict_gc.py @@ -19,94 +19,110 @@ class TestNameConflictGC: def test_name_conflict_with_live_reference(self): """Test that name conflict generates a warning and auto-generates a unique name""" # Create an EventBus with a specific name - bus1 = EventBus(name='TestBus') + bus1 = EventBus(name='GCTestConflict') # Try to create another with the same name - should warn and auto-generate unique name - with pytest.warns(UserWarning, match='EventBus with name "TestBus" already exists'): - bus2 = EventBus(name='TestBus') + with pytest.warns(UserWarning, match='EventBus with name "GCTestConflict" already exists'): + bus2 = EventBus(name='GCTestConflict') # The second bus should have a unique name - assert bus2.name.startswith('TestBus_') - assert bus2.name != 'TestBus' - assert len(bus2.name) == len('TestBus_') + 8 # Original name + underscore + 8 char suffix + assert bus2.name.startswith('GCTestConflict_') + assert bus2.name != 'GCTestConflict' + assert len(bus2.name) == len('GCTestConflict_') + 8 # Original name + underscore + 8 char suffix def test_name_no_conflict_after_deletion(self): - """Test that name conflict is NOT raised after the existing bus is deleted""" + """Test that name conflict is NOT raised after the existing bus is deleted and GC runs""" + import gc + # Create an EventBus with a specific name - bus1 = EventBus(name='TestBus') + bus1 = EventBus(name='GCTestBus1') - # Delete the reference + # Delete the reference and force GC del bus1 + gc.collect() # Force garbage collection to release the WeakSet reference - # Creating another with the same name should work since the first one has no references - bus2 = EventBus(name='TestBus') - assert bus2.name == 'TestBus' + # Creating another with the same name should work since the first one was collected + bus2 = EventBus(name='GCTestBus1') + assert bus2.name == 'GCTestBus1' def test_name_no_conflict_with_no_reference(self): """Test that name conflict is NOT raised when the existing bus was never assigned""" + import gc + # Create an EventBus with a specific name but don't keep a reference - EventBus(name='TestBus') # No assignment, will be garbage collected + EventBus(name='GCTestBus2') # No assignment, will be garbage collected + gc.collect() # Force garbage collection # Creating another with the same name should work since the first one is gone - bus2 = EventBus(name='TestBus') - assert bus2.name == 'TestBus' + bus2 = EventBus(name='GCTestBus2') + assert bus2.name == 'GCTestBus2' def test_name_conflict_with_weak_reference_only(self): """Test that name conflict is NOT raised when only weak references exist""" + import gc + # Create an EventBus and keep only a weak reference - bus1 = EventBus(name='TestBus') + bus1 = EventBus(name='GCTestBus3') weak_ref = weakref.ref(bus1) # Verify the weak reference works assert weak_ref() is bus1 - # Delete the strong reference + # Delete the strong reference and force GC del bus1 + gc.collect() # Force garbage collection # At this point, only the weak reference exists (and the WeakSet reference) # Creating another with the same name should work - bus2 = EventBus(name='TestBus') - assert bus2.name == 'TestBus' + bus2 = EventBus(name='GCTestBus3') + assert bus2.name == 'GCTestBus3' # The weak reference should now return None assert weak_ref() is None def test_multiple_buses_with_gc(self): """Test multiple EventBus instances with some being garbage collected""" + import gc + # Create multiple buses, some with strong refs, some without - bus1 = EventBus(name='Bus1') - EventBus(name='Bus2') # Will be GC'd - bus3 = EventBus(name='Bus3') - EventBus(name='Bus4') # Will be GC'd + bus1 = EventBus(name='GCMulti1') + EventBus(name='GCMulti2') # Will be GC'd + bus3 = EventBus(name='GCMulti3') + EventBus(name='GCMulti4') # Will be GC'd + + gc.collect() # Force garbage collection # Should be able to create new buses with the names of GC'd buses - bus2_new = EventBus(name='Bus2') - bus4_new = EventBus(name='Bus4') + bus2_new = EventBus(name='GCMulti2') + bus4_new = EventBus(name='GCMulti4') # But not with names of buses that still exist - they get auto-generated names - with pytest.warns(UserWarning, match='EventBus with name "Bus1" already exists'): - bus1_conflict = EventBus(name='Bus1') - assert bus1_conflict.name.startswith('Bus1_') + with pytest.warns(UserWarning, match='EventBus with name "GCMulti1" already exists'): + bus1_conflict = EventBus(name='GCMulti1') + assert bus1_conflict.name.startswith('GCMulti1_') - with pytest.warns(UserWarning, match='EventBus with name "Bus3" already exists'): - bus3_conflict = EventBus(name='Bus3') - assert bus3_conflict.name.startswith('Bus3_') + with pytest.warns(UserWarning, match='EventBus with name "GCMulti3" already exists'): + bus3_conflict = EventBus(name='GCMulti3') + assert bus3_conflict.name.startswith('GCMulti3_') @pytest.mark.asyncio async def test_name_conflict_after_stop_and_clear(self): """Test that clearing an EventBus allows reusing its name""" + import gc + # Create an EventBus - bus1 = EventBus(name='TestBus') + bus1 = EventBus(name='GCStopClear') - # Stop and clear it + # Stop and clear it (this renames the bus to _stopped_* and removes from all_instances) await bus1.stop(clear=True) - # Delete the reference to allow garbage collection + # Delete the reference and force GC del bus1 + gc.collect() # Now we should be able to create a new one with the same name - bus2 = EventBus(name='TestBus') - assert bus2.name == 'TestBus' + bus2 = EventBus(name='GCStopClear') + assert bus2.name == 'GCStopClear' def test_weakset_behavior(self): """Test that the WeakSet properly tracks EventBus instances""" @@ -131,17 +147,19 @@ def test_weakset_behavior(self): # WeakTest2 might still be there until the next iteration def test_eventbus_removed_from_weakset(self): - """Test that our implementation removes dead EventBus from WeakSet during conflict check""" + """Test that dead EventBus instances are removed from WeakSet after GC""" + import gc + # Create a bus that will be "dead" (no strong references) - EventBus(name='DeadBus') + EventBus(name='GCDeadBus') + gc.collect() # Force garbage collection - # When we try to create a new bus with the same name, the conflict check - # should detect the dead bus and remove it from the WeakSet - bus = EventBus(name='DeadBus') - assert bus.name == 'DeadBus' + # When we try to create a new bus with the same name, it should work + bus = EventBus(name='GCDeadBus') + assert bus.name == 'GCDeadBus' # The dead bus should have been removed from all_instances - names = [b.name for b in EventBus.all_instances if hasattr(b, 'name') and b.name == 'DeadBus'] + names = [b.name for b in EventBus.all_instances if hasattr(b, 'name') and b.name == 'GCDeadBus'] assert len(names) == 1 # Only the new one def test_concurrent_name_creation(self): diff --git a/tests/test_typed_event_results.py b/tests/test_typed_event_results.py index 573b2c8..8613868 100644 --- a/tests/test_typed_event_results.py +++ b/tests/test_typed_event_results.py @@ -1,5 +1,8 @@ """Test typed event results with automatic casting.""" +# pyright: reportAssertTypeFailure=false +# pyright: reportUnnecessaryIsInstance=false + import asyncio from typing import Any, assert_type From aa6135704c3255bc48ef1ba78a7fa5c0281b4e44 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Mon, 8 Dec 2025 17:12:19 -0800 Subject: [PATCH 33/79] add EventStatus enum and cleanup middlewares APIs for clarity --- .claude/settings.local.json | 3 +- bubus/__init__.py | 3 +- bubus/middlewares.py | 49 +-- bubus/models.py | 35 ++- bubus/service.py | 596 +++++++++++++++++++++--------------- tests/test_eventbus.py | 16 +- tests/test_find.py | 98 +++--- 7 files changed, 464 insertions(+), 336 deletions(-) diff --git a/.claude/settings.local.json b/.claude/settings.local.json index 43fc995..3cf27d5 100644 --- a/.claude/settings.local.json +++ b/.claude/settings.local.json @@ -23,7 +23,8 @@ "WebFetch(domain:github.com)", "Bash(timeout 60 .venv/bin/pytest:*)", "Bash(timeout 180 .venv/bin/pytest tests/ -v)", - "Bash(timeout 180 .venv/bin/pytest:*)" + "Bash(timeout 180 .venv/bin/pytest:*)", + "Bash(git tag:*)" ], "deny": [] } diff --git a/bubus/__init__.py b/bubus/__init__.py index 2bb0626..be3d8a3 100644 --- a/bubus/__init__.py +++ b/bubus/__init__.py @@ -7,7 +7,7 @@ SQLiteHistoryMirrorMiddleware, WALEventBusMiddleware, ) -from .models import BaseEvent, EventHandler, EventResult, PythonIdentifierStr, PythonIdStr, UUIDStr +from .models import BaseEvent, EventHandler, EventResult, EventStatus, PythonIdentifierStr, PythonIdStr, UUIDStr from .service import EventBus __all__ = [ @@ -19,6 +19,7 @@ 'EventHistory', 'InMemoryEventHistory', 'BaseEvent', + 'EventStatus', 'EventResult', 'EventHandler', 'UUIDStr', diff --git a/bubus/middlewares.py b/bubus/middlewares.py index 4d8f2f8..a00b22c 100644 --- a/bubus/middlewares.py +++ b/bubus/middlewares.py @@ -4,13 +4,13 @@ import asyncio import logging -import threading import sqlite3 +import threading from pathlib import Path from typing import Any from bubus.logging import log_eventbus_tree -from bubus.models import BaseEvent, EventResult +from bubus.models import BaseEvent, EventResult, EventStatus from bubus.service import EventBus from bubus.service import EventBusMiddleware as _EventBusMiddleware @@ -34,16 +34,15 @@ def __init__(self, wal_path: Path | str): self.wal_path.parent.mkdir(parents=True, exist_ok=True) self._lock = threading.Lock() - async def post_event_completed(self, eventbus: EventBus, event: BaseEvent[Any]) -> None: - if getattr(event, '_wal_written', False): + async def on_event_complete(self, eventbus: EventBus, event: BaseEvent[Any]) -> None: + if not self._once_per_event(event, 'wal_written'): return - if not self._event_is_complete(event): + if not event.event_is_complete(): return try: await asyncio.to_thread(self._write_event, event) - setattr(event, '_wal_written', True) except Exception as exc: # pragma: no cover - logging branch logger.error( '❌ %s Failed to save event %s to WAL file %s: %s %s', @@ -54,14 +53,6 @@ async def post_event_completed(self, eventbus: EventBus, event: BaseEvent[Any]) exc, ) - def _event_is_complete(self, event: BaseEvent[Any]) -> bool: - signal = event.event_completed_signal - if signal is not None and not signal.is_set(): - return False - if any(result.status not in ('completed', 'error') for result in event.event_results.values()): - return False - return event.event_are_all_children_complete() - def _write_event(self, event: BaseEvent[Any]) -> None: event_json = event.model_dump_json() # pyright: ignore[reportUnknownMemberType] with self._lock: @@ -77,15 +68,13 @@ def __init__(self, log_path: Path | str | None = None): if self.log_path is not None: self.log_path.parent.mkdir(parents=True, exist_ok=True) - async def post_event_completed(self, eventbus: EventBus, event: BaseEvent[Any]) -> None: - if getattr(event, '_logger_middleware_logged', False): + async def on_event_complete(self, eventbus: EventBus, event: BaseEvent[Any]) -> None: + if not self._once_per_event(event, 'logged'): return - if not self._event_is_complete(event): + if not event.event_is_complete(): return - setattr(event, '_logger_middleware_logged', True) - summary = event.event_log_safe_summary() logger.info('βœ… %s completed event %s', eventbus, summary) @@ -95,14 +84,6 @@ async def post_event_completed(self, eventbus: EventBus, event: BaseEvent[Any]) if logger.isEnabledFor(logging.DEBUG): log_eventbus_tree(eventbus) - def _event_is_complete(self, event: BaseEvent[Any]) -> bool: - signal = event.event_completed_signal - if signal is not None and not signal.is_set(): - return False - if any(result.status not in ('completed', 'error') for result in event.event_results.values()): - return False - return event.event_are_all_children_complete() - def _append_line(self, line: str) -> None: if self.log_path is not None: with self.log_path.open('a', encoding='utf-8') as fp: @@ -127,9 +108,9 @@ def __del__(self): except Exception: pass - async def post_event_snapshot_recorded(self, eventbus: EventBus, event: BaseEvent[Any], phase: str) -> None: + async def on_event_state_change(self, eventbus: EventBus, event: BaseEvent[Any], status: EventStatus) -> None: event_status = ( - 'error' if any(result.status == 'error' for result in event.event_results.values()) else event.event_status + EventStatus.ERROR if any(result.status == 'error' for result in event.event_results.values()) else event.event_status ) event_json = event.model_dump_json() await asyncio.to_thread( @@ -137,17 +118,17 @@ async def post_event_snapshot_recorded(self, eventbus: EventBus, event: BaseEven eventbus, event.event_id, event.event_type, - event_status, - phase, + str(event_status), + str(status), event_json, ) - async def post_event_handler_snapshot_recorded( + async def on_handler_state_change( self, eventbus: EventBus, event: BaseEvent[Any], event_result: EventResult[Any], - phase: str, + status: EventStatus, ) -> None: error_repr = repr(event_result.error) if event_result.error is not None else None result_repr: str | None = None @@ -172,7 +153,7 @@ async def post_event_handler_snapshot_recorded( eventbus.name, event.event_type, event_result.status, - phase, + str(status), result_repr, error_repr, event_result_json, diff --git a/bubus/models.py b/bubus/models.py index 438d4d4..398f3e3 100644 --- a/bubus/models.py +++ b/bubus/models.py @@ -6,6 +6,7 @@ from collections import deque from collections.abc import Awaitable, Callable, Generator from datetime import UTC, datetime +from enum import StrEnum from typing import TYPE_CHECKING, Annotated, Any, ClassVar, Generic, Literal, Protocol, Self, TypeAlias, cast, runtime_checkable from uuid import UUID @@ -34,6 +35,18 @@ logger.setLevel(BUBUS_LOGGING_LEVEL) +class EventStatus(StrEnum): + """Status of an event or handler in the EventBus lifecycle. + + Using StrEnum ensures backwards compatibility - comparisons like + `status == 'pending'` still work since EventStatus.PENDING == 'pending'. + """ + PENDING = 'pending' + STARTED = 'started' + COMPLETED = 'completed' + ERROR = 'error' + + def validate_event_name(s: str) -> str: assert str(s).isidentifier() and not str(s).startswith('_'), f'Invalid event name: {s}' return str(s) @@ -323,7 +336,7 @@ async def _process_self_on_all_buses(self) -> None: # Check if THIS event is in this bus's queue if self._remove_self_from_queue(bus): # Process only this event on this bus - await bus.process_event(self) + await bus.handle_event(self) bus.event_queue.task_done() processed_any = True @@ -449,8 +462,24 @@ def event_completed_signal(self) -> asyncio.Event | None: return self._event_completed_signal @property - def event_status(self) -> str: - return 'completed' if self.event_completed_at else 'started' if self.event_started_at else 'pending' + def event_status(self) -> EventStatus: + """Current status of this event in the lifecycle.""" + return EventStatus.COMPLETED if self.event_completed_at else EventStatus.STARTED if self.event_started_at else EventStatus.PENDING + + def event_is_complete(self) -> bool: + """Check if this event and all its handlers/children have finished processing. + + Returns True if: + - The completion signal is set (if it exists) + - All handlers have status 'completed' or 'error' + - All child events are recursively complete + """ + signal = self.event_completed_signal + if signal is not None and not signal.is_set(): + return False + if any(result.status not in ('completed', 'error') for result in self.event_results.values()): + return False + return self.event_are_all_children_complete() @property def event_children(self) -> list['BaseEvent[Any]']: diff --git a/bubus/service.py b/bubus/service.py index f86fb3b..97ea32d 100644 --- a/bubus/service.py +++ b/bubus/service.py @@ -29,6 +29,7 @@ EventHandlerFunc, EventHandlerMethod, EventResult, + EventStatus, PythonIdentifierStr, PythonIdStr, T_Event, @@ -59,21 +60,48 @@ class QueueShutDown(Exception): class EventBusMiddleware: - """Hookable lifecycle interface for observing or extending EventBus execution.""" + """Hookable lifecycle interface for observing or extending EventBus execution. - async def pre_event_handler_started( + Override the hooks you need. All hooks are async and receive the EventBus instance. + + Hooks: + on_handler_start: Called just before a handler begins execution + on_handler_success: Called after a handler completes successfully + on_handler_error: Called when a handler raises or is cancelled + on_event_state_change: Called on event state transitions (pending/started/completed/error) + on_handler_state_change: Called on handler state transitions + on_event_complete: Called after an event and all handlers have finished + """ + + def _once_per_event(self, event: BaseEvent[Any], key: str) -> bool: + """Returns True the first time called for this event/key combo, False after. + + Use this to ensure idempotent processing when a hook might be called multiple times: + + async def on_event_complete(self, eventbus, event): + if not self._once_per_event(event, 'logged'): + return + # ... do work only once ... + """ + attr = f'_middleware_{id(self)}_{key}' + if getattr(event, attr, False): + return False + setattr(event, attr, True) + return True + + async def on_handler_start( self, eventbus: 'EventBus', event: BaseEvent[Any], event_result: EventResult[Any] ) -> None: """Called just before a handler begins execution.""" return None - async def post_event_handler_completed( + async def on_handler_success( self, eventbus: 'EventBus', event: BaseEvent[Any], event_result: EventResult[Any] ) -> None: """Called after a handler completes successfully.""" return None - async def post_event_handler_failed( + async def on_handler_error( self, eventbus: 'EventBus', event: BaseEvent[Any], @@ -83,23 +111,23 @@ async def post_event_handler_failed( """Called when a handler raises or is cancelled.""" return None - async def post_event_snapshot_recorded( - self, eventbus: 'EventBus', event: BaseEvent[Any], phase: str + async def on_event_state_change( + self, eventbus: 'EventBus', event: BaseEvent[Any], status: EventStatus ) -> None: - """Called whenever an event snapshot is persisted.""" + """Called on event state transitions (pending, started, completed, error).""" return None - async def post_event_handler_snapshot_recorded( + async def on_handler_state_change( self, eventbus: 'EventBus', event: BaseEvent[Any], event_result: EventResult[Any], - phase: str, + status: EventStatus, ) -> None: - """Called whenever a handler snapshot is persisted.""" + """Called on handler state transitions (pending, started, completed, error).""" return None - async def post_event_completed(self, eventbus: 'EventBus', event: BaseEvent[Any]) -> None: + async def on_event_complete(self, eventbus: 'EventBus', event: BaseEvent[Any]) -> None: """Called after an event and all of its handlers have finished.""" return None @@ -440,60 +468,60 @@ async def _call_middleware_hook( await result # Middleware fan-out helpers ------------------------------------------- # - async def _middlewares_post_event_snapshot_recorded( - self, event: BaseEvent[Any], phase: str + async def _middlewares_on_event_state_change( + self, event: BaseEvent[Any], status: EventStatus ) -> None: for middleware in self._middlewares: await self._call_middleware_hook( - middleware, 'post_event_snapshot_recorded', self, event, phase + middleware, 'on_event_state_change', self, event, status ) - async def _middlewares_post_event_handler_snapshot_recorded( - self, event: BaseEvent[Any], event_result: EventResult[Any], phase: str + async def _middlewares_on_handler_state_change( + self, event: BaseEvent[Any], event_result: EventResult[Any], status: EventStatus ) -> None: for middleware in self._middlewares: await self._call_middleware_hook( middleware, - 'post_event_handler_snapshot_recorded', + 'on_handler_state_change', self, event, event_result, - phase, + status, ) async def _maybe_record_event_started(self, event: BaseEvent[Any]) -> None: if getattr(event, '_history_started_logged', False): return setattr(event, '_history_started_logged', True) - await self._middlewares_post_event_snapshot_recorded(event, 'started') + await self._middlewares_on_event_state_change(event, EventStatus.STARTED) - async def _middlewares_pre_event_handler_started( + async def _middlewares_on_handler_start( self, event: BaseEvent[Any], event_result: EventResult[Any] ) -> None: for middleware in self._middlewares: await self._call_middleware_hook( - middleware, 'pre_event_handler_started', self, event, event_result + middleware, 'on_handler_start', self, event, event_result ) - async def _middlewares_post_event_handler_completed( + async def _middlewares_on_handler_success( self, event: BaseEvent[Any], event_result: EventResult[Any] ) -> None: for middleware in self._middlewares: await self._call_middleware_hook( - middleware, 'post_event_handler_completed', self, event, event_result + middleware, 'on_handler_success', self, event, event_result ) - async def _middlewares_post_event_handler_failed( + async def _middlewares_on_handler_error( self, event: BaseEvent[Any], event_result: EventResult[Any], error: BaseException ) -> None: for middleware in self._middlewares: await self._call_middleware_hook( - middleware, 'post_event_handler_failed', self, event, event_result, error + middleware, 'on_handler_error', self, event, event_result, error ) - async def _middlewares_post_event_completed(self, event: BaseEvent[Any]) -> None: + async def _middlewares_on_event_complete(self, event: BaseEvent[Any]) -> None: for middleware in self._middlewares: - await self._call_middleware_hook(middleware, 'post_event_completed', self, event) + await self._call_middleware_hook(middleware, 'on_event_complete', self, event) async def _dispatch_after_event_hooks(self, event: BaseEvent[Any]) -> None: if getattr(event, '_after_event_hooks_run', False): @@ -510,15 +538,15 @@ async def _dispatch_after_event_hooks(self, event: BaseEvent[Any]) -> None: if not getattr(event, '_history_completed_logged', False): setattr(event, '_history_completed_logged', True) - final_phase = ( - 'error' + final_status = ( + EventStatus.ERROR if any(result.status == 'error' for result in event.event_results.values()) - else 'completed' + else EventStatus.COMPLETED ) - await self._middlewares_post_event_snapshot_recorded(event, final_phase) + await self._middlewares_on_event_state_change(event, final_status) setattr(event, '_after_event_hooks_run', True) - await self._middlewares_post_event_completed(event) + await self._middlewares_on_event_complete(event) @property def events_pending(self) -> list[BaseEvent[Any]]: @@ -578,7 +606,7 @@ def on(self, event_pattern: EventPatternType, handler: AsyncEventHandlerClassMet def on( self, event_pattern: EventPatternType, - handler: ( # TypeAlias with args doesnt work on overloaded signature, has to be defined inline + handler: ( # TypeAlias with args doesn't work on overloaded signature as of 2025, has to be defined inline! EventHandlerFunc[T_Event] | AsyncEventHandlerFunc[BaseEvent[Any]] | EventHandlerMethod[T_Event] @@ -627,7 +655,7 @@ def on( if new_handler_name in existing_registered_handlers: warnings.warn( f"⚠️ {self} Handler {new_handler_name} already registered for event '{event_key}'. " - f'This may cause ambiguous results when using name-based access. ' + f'This may make it difficult to filter event results by handler name. ' f'Consider using unique function names.', UserWarning, stacklevel=2, @@ -729,7 +757,7 @@ def dispatch(self, event: T_ExpectedEvent) -> T_ExpectedEvent: self.event_history[event.event_id] = event loop = asyncio.get_running_loop() loop.create_task( - self._middlewares_post_event_snapshot_recorded(event, 'pending') + self._middlewares_on_event_state_change(event, EventStatus.PENDING) ) logger.info( f'πŸ—£οΈ {self}.dispatch({event.event_type}) ➑️ {event.event_type}#{event.event_id[-4:]} (#{self.event_queue.qsize()} {event.event_status})' @@ -760,6 +788,148 @@ def _event_matches_pattern(self, event: BaseEvent[Any], pattern: EventPatternTyp return event.event_type == pattern return isinstance(event, pattern) + @overload + async def find( + self, + event_type: type[T_ExpectedEvent], + where: Callable[[BaseEvent[Any] | T_ExpectedEvent], bool] = lambda _: True, + child_of: BaseEvent[Any] | None = None, + past: bool | float = True, + future: bool | float = True, + ) -> T_ExpectedEvent | None: ... + + @overload + async def find( + self, + event_type: PythonIdentifierStr, + where: Callable[[BaseEvent[Any]], bool] = lambda _: True, + child_of: BaseEvent[Any] | None = None, + past: bool | float = True, + future: bool | float = True, + ) -> BaseEvent[Any] | None: ... + + async def find( + self, + event_type: PythonIdentifierStr | type[T_ExpectedEvent], + where: Callable[[BaseEvent[Any]], bool] = lambda _: True, + child_of: BaseEvent[Any] | None = None, + past: bool | float = True, + future: bool | float = True, + ) -> BaseEvent[Any] | T_ExpectedEvent | None: + """ + Find an event matching criteria in history and/or future. + + This is a unified method that can search past event_history, wait for future + events, or both. Use this instead of separate query() and expect() calls. + + Args: + event_type: The event type string or model class to find + where: Predicate function for filtering (default: lambda _: True) + child_of: Only match events that are descendants of this parent event + past: Controls history search behavior: + - True: search all history + - False: skip history search + - float: search events from last N seconds only + future: Controls future wait behavior: + - True: wait forever for matching event + - False: don't wait for future events + - float: wait up to N seconds for matching event + + Returns: + Matching event or None if not found/timeout + + Examples: + # Search all history, wait up to 5s for future + event = await bus.find(EventType, past=True, future=5) + + # Search last 5s of history, wait forever + event = await bus.find(EventType, past=5, future=True) + + # Search last 5s of history, wait up to 5s + event = await bus.find(EventType, past=5, future=5) + + # Search all history instantly, don't wait (debouncing) + event = await bus.find(EventType, past=True, future=False) + + # Wait up to 5s for future only (like old expect) + event = await bus.find(EventType, past=False, future=5) + + # Find child event that may have already fired + nav_event = await bus.dispatch(NavigateToUrlEvent(...)) + new_tab = await bus.find(TabCreatedEvent, child_of=nav_event, past=True, future=5) + """ + # If neither past nor future, return None immediately + if past is False and future is False: + return None + + # Build combined predicate including child_of check + def matches(event: BaseEvent[Any]) -> bool: + if not where(event): + return False + if child_of is not None and not self.event_is_child_of(event, child_of): + return False + return True + + # Search past history if enabled + if past is not False: + # Calculate cutoff time if past is a float (time window in seconds) + cutoff: datetime | None = None + if past is not True: # past is a float/int specifying time window + cutoff = datetime.now(UTC) - timedelta(seconds=float(past)) + + events = list(self.event_history.values()) + for event in reversed(events): + # Only match completed events in history + if event.event_completed_at is None: + continue + # Skip events older than cutoff (dispatched before the time window) + if cutoff is not None and event.event_created_at < cutoff: + continue + if not self._event_matches_pattern(event, event_type): + continue + if matches(event): + return event + + # If not searching future, return None + if future is False: + return None + + # Wait for future events using expect-like pattern + future_result: asyncio.Future[BaseEvent[Any]] = asyncio.Future() + + def notify_find_handler(event: BaseEvent[Any]) -> None: + """Handler that resolves the future when a matching event is found""" + if not future_result.done() and matches(event): + future_result.set_result(event) + + # Add debugging info to handler name + current_frame = inspect.currentframe() + assert current_frame + notify_find_handler.__name__ = f'{self}.find({event_type}, past={past}, future={future})@{_log_pretty_path(current_frame.f_code.co_filename)}:{current_frame.f_lineno}' + + # Register temporary listener + self.on(event_type, notify_find_handler) + + # Ensure the temporary handler runs before user handlers + event_key = event_type.__name__ if isinstance(event_type, type) else str(event_type) + handlers_for_key = self.handlers.get(event_key) + if handlers_for_key and handlers_for_key[-1] is notify_find_handler: + handlers_for_key.insert(0, handlers_for_key.pop()) + + try: + # Wait forever if future is True, otherwise wait up to N seconds + if future is True: + return await future_result + else: + return await asyncio.wait_for(future_result, timeout=float(future)) + except asyncio.TimeoutError: + return None + finally: + # Clean up handler + event_key = event_type.__name__ if isinstance(event_type, type) else str(event_type) + if event_key in self.handlers and notify_find_handler in self.handlers[event_key]: + self.handlers[event_key].remove(notify_find_handler) + @overload async def expect( self, @@ -839,7 +1009,18 @@ async def expect( past=True, timeout=30 ) + + .. deprecated:: + Use find() instead for clearer semantics: + ``await bus.find(EventType, where=..., past=False, future=timeout)`` """ + warnings.warn( + 'expect() is deprecated, use find() instead. ' + 'Example: await bus.find(EventType, where=lambda e: ..., past=False, future=30)', + DeprecationWarning, + stacklevel=2, + ) + # Merge include/exclude/predicate into single where function for find() def where(event: BaseEvent[Any]) -> bool: if predicate is not None and not predicate(event): # type: ignore[truthy-function] @@ -890,47 +1071,62 @@ async def query( predicate: Callable[[BaseEvent[Any]], bool] = lambda _: True, since: timedelta | float | int | None = None, ) -> BaseEvent[Any] | T_QueryEvent | None: - """Return the most recent completed event matching the filters, or None if not found.""" - - if predicate is not None: # type: ignore[truthy-function] - original_include = include - - def combined_include(event: BaseEvent[Any]) -> bool: - return original_include(event) and predicate(event) - - include = combined_include - - if isinstance(since, (int, float)): - since = timedelta(seconds=float(since)) + """ + Return the most recent completed event matching the filters, or None if not found. - cutoff: datetime | None = None - if since is not None: - if since < timedelta(0): - raise ValueError('since must be non-negative') - cutoff = datetime.now(UTC) - since + This is a convenience wrapper around find() for searching history only. - events = list(self.event_history.values()) - for event in reversed(events): - if cutoff is not None and event.event_created_at < cutoff: - break - - if event.event_completed_at is None: - continue + Args: + event_type: The event type string or model class to find + include: Filter function that must return True for the event to match + exclude: Filter function that must return False for the event to match + predicate: Deprecated alias for include + since: Only search events from the last N seconds (timedelta, float, or int) - if not self._event_matches_pattern(event, event_type): - continue + Returns: + The most recent matching event, or None if not found - if exclude(event): - continue + .. deprecated:: + Use find() instead for clearer semantics: + ``await bus.find(EventType, where=..., past=since, future=False)`` + """ + warnings.warn( + 'query() is deprecated, use find() instead. ' + 'Example: await bus.find(EventType, where=lambda e: ..., past=True, future=False)', + DeprecationWarning, + stacklevel=2, + ) + # Merge include/exclude/predicate into single where function + def where(event: BaseEvent[Any]) -> bool: + if predicate is not None and not predicate(event): # type: ignore[truthy-function] + return False if not include(event): - continue + return False + if exclude(event): + return False + return True - # if isinstance(event_type, type): - # return cast(event_type, event) - return event + # Convert since to past parameter for find() + past_param: bool | float + if since is None: + past_param = True # Search all history + elif isinstance(since, timedelta): + if since < timedelta(0): + raise ValueError('since must be non-negative') + past_param = since.total_seconds() + else: + if since < 0: + raise ValueError('since must be non-negative') + past_param = float(since) - return None + # Delegate to find() with future=False (no waiting) + return await self.find( + event_type, + where=where, + past=past_param, + future=False, + ) def event_is_child_of(self, event: BaseEvent[Any], ancestor: BaseEvent[Any]) -> bool: """ @@ -969,162 +1165,8 @@ def event_is_child_of(self, event: BaseEvent[Any], ancestor: BaseEvent[Any]) -> return False def event_is_parent_of(self, event: BaseEvent[Any], descendant: BaseEvent[Any]) -> bool: - """ - Check if event is an ancestor of descendant (parent, grandparent, etc.). - - This is the inverse of event_is_child_of. - - Args: - event: The potential ancestor event - descendant: The potential descendant event - - Returns: - True if event is an ancestor of descendant, False otherwise - """ return self.event_is_child_of(descendant, event) - @overload - async def find( - self, - event_type: type[T_ExpectedEvent], - where: Callable[[BaseEvent[Any] | T_ExpectedEvent], bool] = lambda _: True, - child_of: BaseEvent[Any] | None = None, - past: bool | float = True, - future: bool | float = True, - ) -> T_ExpectedEvent | None: ... - - @overload - async def find( - self, - event_type: PythonIdentifierStr, - where: Callable[[BaseEvent[Any]], bool] = lambda _: True, - child_of: BaseEvent[Any] | None = None, - past: bool | float = True, - future: bool | float = True, - ) -> BaseEvent[Any] | None: ... - - async def find( - self, - event_type: PythonIdentifierStr | type[T_ExpectedEvent], - where: Callable[[BaseEvent[Any]], bool] = lambda _: True, - child_of: BaseEvent[Any] | None = None, - past: bool | float = True, - future: bool | float = True, - ) -> BaseEvent[Any] | T_ExpectedEvent | None: - """ - Find an event matching criteria in history and/or future. - - This is a unified method that can search past event_history, wait for future - events, or both. Use this instead of separate query() and expect() calls. - - Args: - event_type: The event type string or model class to find - where: Predicate function for filtering (default: lambda _: True) - child_of: Only match events that are descendants of this parent event - past: Controls history search behavior: - - True: search all history - - False: skip history search - - float: search events from last N seconds only - future: Controls future wait behavior: - - True: wait forever for matching event - - False: don't wait for future events - - float: wait up to N seconds for matching event - - Returns: - Matching event or None if not found/timeout - - Examples: - # Search all history, wait up to 5s for future - event = await bus.find(EventType, past=True, future=5) - - # Search last 5s of history, wait forever - event = await bus.find(EventType, past=5, future=True) - - # Search last 5s of history, wait up to 5s - event = await bus.find(EventType, past=5, future=5) - - # Search all history instantly, don't wait (debouncing) - event = await bus.find(EventType, past=True, future=False) - - # Wait up to 5s for future only (like old expect) - event = await bus.find(EventType, past=False, future=5) - - # Find child event that may have already fired - nav_event = await bus.dispatch(NavigateToUrlEvent(...)) - new_tab = await bus.find(TabCreatedEvent, child_of=nav_event, past=True, future=5) - """ - # If neither past nor future, return None immediately - if past is False and future is False: - return None - - # Build combined predicate including child_of check - def matches(event: BaseEvent[Any]) -> bool: - if not where(event): - return False - if child_of is not None and not self.event_is_child_of(event, child_of): - return False - return True - - # Search past history if enabled - if past is not False: - # Calculate cutoff time if past is a float (time window in seconds) - cutoff: datetime | None = None - if past is not True: # past is a float/int specifying time window - cutoff = datetime.now(UTC) - timedelta(seconds=float(past)) - - events = list(self.event_history.values()) - for event in reversed(events): - # Only match completed events in history - if event.event_completed_at is None: - continue - # Skip events older than cutoff (dispatched before the time window) - if cutoff is not None and event.event_created_at < cutoff: - continue - if not self._event_matches_pattern(event, event_type): - continue - if matches(event): - return event - - # If not searching future, return None - if future is False: - return None - - # Wait for future events using expect-like pattern - future_result: asyncio.Future[BaseEvent[Any]] = asyncio.Future() - - def notify_find_handler(event: BaseEvent[Any]) -> None: - """Handler that resolves the future when a matching event is found""" - if not future_result.done() and matches(event): - future_result.set_result(event) - - # Add debugging info to handler name - current_frame = inspect.currentframe() - assert current_frame - notify_find_handler.__name__ = f'{self}.find({event_type}, past={past}, future={future})@{_log_pretty_path(current_frame.f_code.co_filename)}:{current_frame.f_lineno}' - - # Register temporary listener - self.on(event_type, notify_find_handler) - - # Ensure the temporary handler runs before user handlers - event_key = event_type.__name__ if isinstance(event_type, type) else str(event_type) - handlers_for_key = self.handlers.get(event_key) - if handlers_for_key and handlers_for_key[-1] is notify_find_handler: - handlers_for_key.insert(0, handlers_for_key.pop()) - - try: - # Wait forever if future is True, otherwise wait up to N seconds - if future is True: - return await future_result - else: - return await asyncio.wait_for(future_result, timeout=float(future)) - except asyncio.TimeoutError: - return None - finally: - # Clean up handler - event_key = event_type.__name__ if isinstance(event_type, type) else str(event_type) - if event_key in self.handlers and notify_find_handler in self.handlers[event_key]: - self.handlers[event_key].remove(notify_find_handler) - def _start(self) -> None: """Start the event bus if not already running""" if not self._is_running: @@ -1379,7 +1421,40 @@ async def _get_next_event(self, wait_for_timeout: float = 0.1) -> 'BaseEvent[Any async def step( self, event: 'BaseEvent[Any] | None' = None, timeout: float | None = None, wait_for_timeout: float = 0.1 ) -> 'BaseEvent[Any] | None': - """Process a single event from the queue""" + """ + Consume and process a single event from the queue (one iteration of the run loop). + + This is the high-level "consumer" method that: + 1. Dequeues the next event (or uses one passed in) + 2. Acquires the global processing lock + 3. Calls handle_event() to execute handlers + 4. Marks the queue task as done (only if event came from queue) + 5. Manages idle state signaling + + Use this method when manually driving the event loop (e.g., in tests). + For automatic processing, use dispatch() which queues events for the run loop. + + Args: + event: Optional event to process directly (bypasses queue if provided) + timeout: Handler execution timeout in seconds + wait_for_timeout: How long to wait for next event from queue (default: 0.1s) + + Returns: + The processed event, or None if queue was empty/shutdown + + Warning: + Passing an event directly (bypassing the queue) is for advanced use only, be aware if: + + - **Event not in queue**: Works fine, handlers execute normally. + - **Event already completed**: Handlers will run AGAIN, overwriting previous + results. No guard against double-processing. + - **Event in queue but not next**: Event processes immediately, but STAYS + in queue. The run loop will process it again later (double-processing). + + See Also: + dispatch: Queues an event for normal async processing by the bus's existing run loop (recommended) + handle_event: Lower-level method that executes handlers (called by step) + """ assert self._on_idle and self.event_queue, 'EventBus._start() must be called before step()' # Track if we got the event from the queue @@ -1400,7 +1475,7 @@ async def step( # Always acquire the global lock (it's re-entrant across tasks) async with _get_global_lock(): # Process the event - await self.process_event(event, timeout=timeout) + await self.handle_event(event, timeout=timeout) # Mark task as done only if we got it from the queue if from_queue: @@ -1409,8 +1484,45 @@ async def step( logger.debug(f'βœ… {self}.step({event}) COMPLETE') return event - async def process_event(self, event: BaseEvent[Any], timeout: float | None = None) -> None: - """Process a single event (assumes lock is already held)""" + async def handle_event(self, event: BaseEvent[Any], timeout: float | None = None) -> None: + """ + Execute all applicable handlers for an event (low-level, assumes lock is held). + + This is the core event handling method that: + 1. Finds all applicable handlers (type-specific + wildcard) + 2. Creates pending EventResult placeholders + 3. Executes handlers (serially or in parallel based on bus config) + 4. Marks the event as complete when all handlers finish + 5. Propagates completion status up the parent event chain + 6. Cleans up event history if over size limit + + IMPORTANT: This method assumes the global processing lock is already held. + For safe external use, call step() instead which handles locking. + + Args: + event: The event to handle + timeout: Handler execution timeout in seconds (defaults to event.event_timeout) + + Warning: + This is a low-level method with no safety guards. Behavior in edge cases: + + - **Event not in queue**: Works fine, handlers execute normally. This method + does not interact with the queue at all. + - **Event already completed**: Handlers run AGAIN, ``event_create_pending_results()`` + overwrites previous results. No guard against double-processing. + - **Event in queue but not next**: Works fine for this call, but event stays + in queue and will be processed again later by the run loop. + - **Another event being processed (lock held elsewhere)**: If called without + holding the lock, concurrent handler execution may cause race conditions. + If called from within a handler (lock is re-entrant), causes nested processing. + - **This exact event already being processed**: Recursive/re-entrant processing. + Handlers run again while already running, results overwritten mid-execution. + Likely to cause undefined behavior. + + See Also: + step: High-level method that acquires lock and calls handle_event + dispatch: Queues an event for async processing (recommended) + """ # Get applicable handlers applicable_handlers = self._get_applicable_handlers(event) @@ -1518,8 +1630,8 @@ async def _execute_handlers( applicable_handlers, eventbus=self, timeout=timeout or event.event_timeout ) for pending_result in pending_results.values(): - await self._middlewares_post_event_handler_snapshot_recorded( - event, pending_result, 'pending' + await self._middlewares_on_handler_state_change( + event, pending_result, EventStatus.PENDING ) # Execute all handlers in parallel @@ -1572,19 +1684,19 @@ async def execute_handler( {handler_id: handler}, eventbus=self, timeout=timeout or event.event_timeout ) for pending_result in new_results.values(): - await self._middlewares_post_event_handler_snapshot_recorded( - event, pending_result, 'pending' + await self._middlewares_on_handler_state_change( + event, pending_result, EventStatus.PENDING ) event_result = event.event_results[handler_id] event_result.update(status='started', timeout=timeout or event.event_timeout) - await self._middlewares_post_event_handler_snapshot_recorded( - event, event_result, 'started' + await self._middlewares_on_handler_state_change( + event, event_result, EventStatus.STARTED ) await self._maybe_record_event_started(event) - await self._middlewares_pre_event_handler_started(event, event_result) + await self._middlewares_on_handler_start(event, event_result) try: result_value = await event_result.execute( @@ -1602,22 +1714,22 @@ async def execute_handler( f' ↳ Handler {get_handler_name(handler)}#{handler_id[-4:]} returned: {result_type_name}' ) - await self._middlewares_post_event_handler_completed(event, event_result) - await self._middlewares_post_event_handler_snapshot_recorded( - event, event_result, 'completed' + await self._middlewares_on_handler_success(event, event_result) + await self._middlewares_on_handler_state_change( + event, event_result, EventStatus.COMPLETED ) return cast(T_EventResultType, result_value) except asyncio.CancelledError as exc: - await self._middlewares_post_event_handler_failed(event, event_result, exc) - await self._middlewares_post_event_handler_snapshot_recorded( - event, event_result, 'error' + await self._middlewares_on_handler_error(event, event_result, exc) + await self._middlewares_on_handler_state_change( + event, event_result, EventStatus.ERROR ) raise except Exception as exc: - await self._middlewares_post_event_handler_failed(event, event_result, exc) - await self._middlewares_post_event_handler_snapshot_recorded( - event, event_result, 'error' + await self._middlewares_on_handler_error(event, event_result, exc) + await self._middlewares_on_handler_state_change( + event, event_result, EventStatus.ERROR ) raise diff --git a/tests/test_eventbus.py b/tests/test_eventbus.py index bfb8d00..e32e40c 100644 --- a/tests/test_eventbus.py +++ b/tests/test_eventbus.py @@ -318,10 +318,14 @@ def static_method_handler(event: UserActionEvent) -> str: processor1 = EventProcessor('Processor1', 10) processor2 = EventProcessor('Processor2', 20) - # Register instance methods + # Register instance methods (suppress warning about same-named handlers from different instances) + import warnings + eventbus.on(UserActionEvent, processor1.sync_method_handler) eventbus.on(UserActionEvent, processor1.async_method_handler) - eventbus.on(UserActionEvent, processor2.sync_method_handler) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) + eventbus.on(UserActionEvent, processor2.sync_method_handler) # Register class and static methods eventbus.on('UserActionEvent', EventProcessor.class_method_handler) @@ -876,10 +880,10 @@ class TrackingMiddleware(EventBusMiddleware): def __init__(self, call_log: list[tuple[str, str]]): self.call_log = call_log - async def pre_event_handler_started(self, eventbus: EventBus, event: BaseEvent, event_result): + async def on_handler_start(self, eventbus: EventBus, event: BaseEvent, event_result): self.call_log.append(('before', event_result.status)) - async def post_event_handler_completed( + async def on_handler_success( self, eventbus: EventBus, event: BaseEvent, event_result ): self.call_log.append(('after', event_result.status)) @@ -906,10 +910,10 @@ class ErrorMiddleware(EventBusMiddleware): def __init__(self, log: list[tuple[str, str]]): self.log = log - async def pre_event_handler_started(self, eventbus: EventBus, event: BaseEvent, event_result): + async def on_handler_start(self, eventbus: EventBus, event: BaseEvent, event_result): self.log.append(('before', event_result.status)) - async def post_event_handler_failed( + async def on_handler_error( self, eventbus: EventBus, event: BaseEvent, diff --git a/tests/test_find.py b/tests/test_find.py index 510e418..6c4e574 100644 --- a/tests/test_find.py +++ b/tests/test_find.py @@ -64,7 +64,7 @@ class TestEventIsChildOf: async def test_direct_child_returns_true(self): """event_is_child_of returns True for direct parent-child relationship.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: # Create parent-child relationship via dispatch inside handler @@ -91,7 +91,7 @@ async def parent_handler(event: ParentEvent) -> str: async def test_grandchild_returns_true(self): """event_is_child_of returns True for grandparent relationship.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: grandchild_ref: list[BaseEvent] = [] @@ -122,7 +122,7 @@ async def child_handler(event: ChildEvent) -> str: async def test_unrelated_events_returns_false(self): """event_is_child_of returns False for unrelated events.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ParentEvent, lambda e: 'parent_done') @@ -138,7 +138,7 @@ async def test_unrelated_events_returns_false(self): async def test_same_event_returns_false(self): """event_is_child_of returns False when checking event against itself.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ParentEvent, lambda e: 'done') @@ -152,7 +152,7 @@ async def test_same_event_returns_false(self): async def test_reversed_relationship_returns_false(self): """event_is_child_of returns False when parent/child are reversed.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: child_ref: list[BaseEvent] = [] @@ -182,7 +182,7 @@ class TestEventIsParentOf: async def test_direct_parent_returns_true(self): """event_is_parent_of returns True for direct parent-child relationship.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: child_ref: list[BaseEvent] = [] @@ -208,7 +208,7 @@ async def parent_handler(event: ParentEvent) -> str: async def test_grandparent_returns_true(self): """event_is_parent_of returns True for grandparent relationship.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: grandchild_ref: list[BaseEvent] = [] @@ -248,7 +248,7 @@ class TestFindPastOnly: async def test_returns_matching_event_from_history(self): """find(past=True, future=False) returns event from history.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ParentEvent, lambda e: 'done') @@ -267,7 +267,7 @@ async def test_returns_matching_event_from_history(self): async def test_past_float_filters_by_time_window(self): """find(past=0.1) only returns events from last 0.1 seconds.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ParentEvent, lambda e: 'done') @@ -296,7 +296,7 @@ async def test_past_float_filters_by_time_window(self): async def test_past_float_returns_none_when_all_events_too_old(self): """find(past=0.05) returns None if all events are older than 0.05 seconds.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ParentEvent, lambda e: 'done') @@ -316,7 +316,7 @@ async def test_past_float_returns_none_when_all_events_too_old(self): async def test_returns_none_when_no_match(self): """find(past=True, future=False) returns None when no matching event.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: # No events dispatched @@ -329,7 +329,7 @@ async def test_returns_none_when_no_match(self): async def test_respects_where_filter(self): """find() applies where filter correctly.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ScreenshotEvent, lambda e: 'done') @@ -354,7 +354,7 @@ async def test_respects_where_filter(self): async def test_returns_most_recent_match(self): """find() returns most recent matching event from history.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ParentEvent, lambda e: 'done') @@ -379,7 +379,7 @@ class TestFindFutureOnly: async def test_waits_for_future_event(self): """find(past=False, future=1) waits for event to be dispatched.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ParentEvent, lambda e: 'done') @@ -404,7 +404,7 @@ async def dispatch_after_delay(): async def test_future_float_timeout(self): """find(future=0.01) times out quickly when no event.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: start = datetime.now(UTC) @@ -419,7 +419,7 @@ async def test_future_float_timeout(self): async def test_ignores_past_events(self): """find(past=False, future=...) ignores events already in history.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ParentEvent, lambda e: 'done') @@ -441,7 +441,7 @@ class TestFindNeitherPastNorFuture: async def test_returns_none_immediately(self): """find(past=False, future=False) returns None immediately.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ParentEvent, lambda e: 'done') @@ -466,7 +466,7 @@ class TestFindPastAndFuture: async def test_returns_past_event_immediately(self): """find(past=True, future=5) returns past event without waiting.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ParentEvent, lambda e: 'done') @@ -488,7 +488,7 @@ async def test_returns_past_event_immediately(self): async def test_waits_for_future_when_no_past_match(self): """find(past=True, future=1) waits for future if no past match.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ChildEvent, lambda e: 'done') @@ -517,7 +517,7 @@ async def dispatch_after_delay(): async def test_past_and_future_independent_control(self): """past=0.05, future=0.05 uses different windows for each.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ParentEvent, lambda e: 'done') @@ -541,7 +541,7 @@ async def test_past_and_future_independent_control(self): async def test_past_true_future_float(self): """past=True searches all history, future=0.1 waits up to 0.1s.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ParentEvent, lambda e: 'done') @@ -561,7 +561,7 @@ async def test_past_true_future_float(self): async def test_past_float_future_true_would_wait_forever(self): """past=0.05 with old events + future=True - verify past window works.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ParentEvent, lambda e: 'done') @@ -600,7 +600,7 @@ class TestFindWithChildOf: async def test_returns_child_of_specified_parent(self): """find(child_of=parent) returns event that is child of parent.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: child_ref: list[BaseEvent] = [] @@ -627,7 +627,7 @@ async def parent_handler(event: ParentEvent) -> str: async def test_returns_none_for_non_child(self): """find(child_of=parent) returns None if event is not a child.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ParentEvent, lambda e: 'parent_done') @@ -648,7 +648,7 @@ async def test_returns_none_for_non_child(self): async def test_finds_grandchild(self): """find(child_of=grandparent) returns grandchild event.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: grandchild_ref: list[BaseEvent] = [] @@ -728,7 +728,7 @@ class TestExpectBackwardsCompatibility: async def test_expect_waits_for_future_event(self): """expect() still waits for future events (existing behavior).""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ParentEvent, lambda e: 'done') @@ -750,7 +750,7 @@ async def dispatch_after_delay(): async def test_expect_with_include_filter(self): """expect() with include parameter still works.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ScreenshotEvent, lambda e: 'done') @@ -780,7 +780,7 @@ async def dispatch_events(): async def test_expect_with_exclude_filter(self): """expect() with exclude parameter still works.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ScreenshotEvent, lambda e: 'done') @@ -810,7 +810,7 @@ async def dispatch_events(): async def test_expect_with_past_true(self): """expect(past=True) finds already-dispatched events.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ParentEvent, lambda e: 'done') @@ -829,7 +829,7 @@ async def test_expect_with_past_true(self): async def test_expect_with_past_float(self): """expect(past=5.0) searches last 5 seconds of history.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ParentEvent, lambda e: 'done') @@ -848,7 +848,7 @@ async def test_expect_with_past_float(self): async def test_expect_with_child_of(self): """expect(child_of=parent) filters by parent relationship.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: child_ref: list[BaseEvent] = [] @@ -884,7 +884,7 @@ class TestDebouncingPattern: async def test_returns_existing_fresh_event(self): """Pattern returns existing event when fresh.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ScreenshotEvent, lambda e: 'done') @@ -908,7 +908,7 @@ async def test_returns_existing_fresh_event(self): async def test_dispatches_new_when_no_match(self): """Pattern dispatches new event when no matching event in history.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ScreenshotEvent, lambda e: 'done') @@ -930,7 +930,7 @@ async def test_dispatches_new_when_no_match(self): async def test_dispatches_new_when_stale(self): """Pattern dispatches new event when existing is stale.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ScreenshotEvent, lambda e: 'done') @@ -961,7 +961,7 @@ async def test_dispatches_new_when_stale(self): async def test_find_past_only_returns_immediately_without_waiting(self): """find(past=True, future=False) returns immediately, never waits.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ParentEvent, lambda e: 'done') @@ -979,7 +979,7 @@ async def test_find_past_only_returns_immediately_without_waiting(self): async def test_find_past_float_returns_immediately_without_waiting(self): """find(past=5, future=False) returns immediately, never waits.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ParentEvent, lambda e: 'done') @@ -997,7 +997,7 @@ async def test_find_past_float_returns_immediately_without_waiting(self): async def test_or_chain_without_waiting_finds_existing(self): """Or-chain pattern finds existing events without blocking.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ScreenshotEvent, lambda e: 'done') @@ -1025,7 +1025,7 @@ async def test_or_chain_without_waiting_finds_existing(self): async def test_or_chain_without_waiting_dispatches_when_no_match(self): """Or-chain pattern dispatches new event when no match, still fast.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ScreenshotEvent, lambda e: 'done') @@ -1051,7 +1051,7 @@ async def test_or_chain_without_waiting_dispatches_when_no_match(self): async def test_or_chain_multiple_sequential_lookups(self): """Multiple or-chain lookups work without blocking.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ScreenshotEvent, lambda e: 'done') @@ -1098,7 +1098,7 @@ async def test_or_chain_multiple_sequential_lookups(self): async def test_find_without_await_is_a_coroutine(self): """find() without await returns a coroutine that can be awaited.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ParentEvent, lambda e: 'done') @@ -1130,7 +1130,7 @@ class TestRaceConditionFix: async def test_find_catches_already_fired_event(self): """find(past=True) catches event that fired before the call.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: tab_ref: list[BaseEvent] = [] @@ -1161,7 +1161,7 @@ async def navigate_handler(event: NavigateEvent) -> str: async def test_child_of_filters_to_correct_parent(self): """child_of correctly filters to events from the right parent.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: async def navigate_handler(event: NavigateEvent) -> str: @@ -1204,7 +1204,7 @@ class TestNewParameterCombinations: async def test_past_true_future_false_searches_all_history(self): """past=True, future=False searches all history instantly.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ParentEvent, lambda e: 'done') @@ -1223,7 +1223,7 @@ async def test_past_true_future_false_searches_all_history(self): async def test_past_float_future_false_filters_by_age(self): """past=0.05, future=False only searches last 0.05 seconds.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ParentEvent, lambda e: 'done') @@ -1241,7 +1241,7 @@ async def test_past_float_future_false_filters_by_age(self): async def test_past_false_future_float_waits_for_timeout(self): """past=False, future=0.05 waits up to 0.05 seconds.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ParentEvent, lambda e: 'done') @@ -1258,7 +1258,7 @@ async def test_past_false_future_float_waits_for_timeout(self): async def test_past_true_future_true_searches_all_and_waits_forever(self): """past=True, future=True searches all history, would wait forever.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ParentEvent, lambda e: 'done') @@ -1281,7 +1281,7 @@ async def test_past_true_future_true_searches_all_and_waits_forever(self): async def test_find_with_where_and_past_float(self): """where filter combined with past=float works correctly.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: bus.on(ScreenshotEvent, lambda e: 'done') @@ -1315,7 +1315,7 @@ async def test_find_with_where_and_past_float(self): async def test_find_with_child_of_and_past_float(self): """child_of filter combined with past=float works correctly.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: child_ref: list[BaseEvent] = [] @@ -1346,7 +1346,7 @@ async def parent_handler(event: ParentEvent) -> str: async def test_find_with_all_parameters(self): """All parameters combined work correctly.""" - bus = EventBus(name='TestBus') + bus = EventBus() try: child_ref: list[BaseEvent] = [] From 9d6b782458bf881abf5d405559d2bc34114edb88 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Mon, 8 Dec 2025 18:06:42 -0800 Subject: [PATCH 34/79] massivly simplify middleware interface --- bubus/middlewares.py | 59 +++----- bubus/models.py | 20 +-- bubus/service.py | 195 ++++++-------------------- tests/test_event_history_mirroring.py | 2 +- tests/test_eventbus.py | 34 ++--- 5 files changed, 76 insertions(+), 234 deletions(-) diff --git a/bubus/middlewares.py b/bubus/middlewares.py index a00b22c..c883d6d 100644 --- a/bubus/middlewares.py +++ b/bubus/middlewares.py @@ -34,61 +34,47 @@ def __init__(self, wal_path: Path | str): self.wal_path.parent.mkdir(parents=True, exist_ok=True) self._lock = threading.Lock() - async def on_event_complete(self, eventbus: EventBus, event: BaseEvent[Any]) -> None: - if not self._once_per_event(event, 'wal_written'): + async def on_event_change(self, eventbus: EventBus, event: BaseEvent[Any], status: EventStatus) -> None: + if status != EventStatus.COMPLETED: return - - if not event.event_is_complete(): - return - try: - await asyncio.to_thread(self._write_event, event) - except Exception as exc: # pragma: no cover - logging branch - logger.error( - '❌ %s Failed to save event %s to WAL file %s: %s %s', - eventbus, - event.event_id, - self.wal_path, - type(exc).__name__, - exc, - ) + event_json = event.model_dump_json() # pyright: ignore[reportUnknownMemberType] + await asyncio.to_thread(self._write_line, event_json + '\n') + except Exception as exc: # pragma: no cover + logger.error('❌ %s Failed to save event %s to WAL: %s', eventbus, event.event_id, exc) - def _write_event(self, event: BaseEvent[Any]) -> None: - event_json = event.model_dump_json() # pyright: ignore[reportUnknownMemberType] + def _write_line(self, line: str) -> None: with self._lock: with self.wal_path.open('a', encoding='utf-8') as fp: - fp.write(event_json + '\n') + fp.write(line) class LoggerEventBusMiddleware(EventBusMiddleware): - """Log completed events using the existing logging helpers and optionally mirror to a text file.""" + """Log completed events to stdout and optionally to a file.""" def __init__(self, log_path: Path | str | None = None): self.log_path = Path(log_path) if log_path is not None else None if self.log_path is not None: self.log_path.parent.mkdir(parents=True, exist_ok=True) - async def on_event_complete(self, eventbus: EventBus, event: BaseEvent[Any]) -> None: - if not self._once_per_event(event, 'logged'): - return - - if not event.event_is_complete(): + async def on_event_change(self, eventbus: EventBus, event: BaseEvent[Any], status: EventStatus) -> None: + if status != EventStatus.COMPLETED: return summary = event.event_log_safe_summary() logger.info('βœ… %s completed event %s', eventbus, summary) - line = f'[{eventbus.name}] {summary}\n' - await asyncio.to_thread(self._append_line, line) + + if self.log_path is not None: + await asyncio.to_thread(self._write_line, line) + print(line.rstrip('\n'), flush=True) if logger.isEnabledFor(logging.DEBUG): log_eventbus_tree(eventbus) - def _append_line(self, line: str) -> None: - if self.log_path is not None: - with self.log_path.open('a', encoding='utf-8') as fp: - fp.write(line) - print(line.rstrip('\n'), flush=True) + def _write_line(self, line: str) -> None: + with self.log_path.open('a', encoding='utf-8') as fp: # type: ignore[union-attr] + fp.write(line) class SQLiteHistoryMirrorMiddleware(EventBusMiddleware): @@ -108,22 +94,19 @@ def __del__(self): except Exception: pass - async def on_event_state_change(self, eventbus: EventBus, event: BaseEvent[Any], status: EventStatus) -> None: - event_status = ( - EventStatus.ERROR if any(result.status == 'error' for result in event.event_results.values()) else event.event_status - ) + async def on_event_change(self, eventbus: EventBus, event: BaseEvent[Any], status: EventStatus) -> None: event_json = event.model_dump_json() await asyncio.to_thread( self._insert_event_snapshot, eventbus, event.event_id, event.event_type, - str(event_status), + str(event.event_status), str(status), event_json, ) - async def on_handler_state_change( + async def on_event_result_change( self, eventbus: EventBus, event: BaseEvent[Any], diff --git a/bubus/models.py b/bubus/models.py index 398f3e3..870fd79 100644 --- a/bubus/models.py +++ b/bubus/models.py @@ -43,8 +43,7 @@ class EventStatus(StrEnum): """ PENDING = 'pending' STARTED = 'started' - COMPLETED = 'completed' - ERROR = 'error' + COMPLETED = 'completed' # errored events are also considered completed def validate_event_name(s: str) -> str: @@ -286,8 +285,6 @@ def __str__(self) -> str: if self.event_status == 'pending' else 'βœ…' if self.event_status == 'completed' - else '❌' - if self.event_status == 'error' else 'πŸƒ' ) # AuthBus≫DataBusβ–Ά AuthLoginEvent#ab12 ⏳ @@ -466,21 +463,6 @@ def event_status(self) -> EventStatus: """Current status of this event in the lifecycle.""" return EventStatus.COMPLETED if self.event_completed_at else EventStatus.STARTED if self.event_started_at else EventStatus.PENDING - def event_is_complete(self) -> bool: - """Check if this event and all its handlers/children have finished processing. - - Returns True if: - - The completion signal is set (if it exists) - - All handlers have status 'completed' or 'error' - - All child events are recursively complete - """ - signal = self.event_completed_signal - if signal is not None and not signal.is_set(): - return False - if any(result.status not in ('completed', 'error') for result in self.event_results.values()): - return False - return self.event_are_all_children_complete() - @property def event_children(self) -> list['BaseEvent[Any]']: """Get all child events dispatched from within this event's handlers""" diff --git a/bubus/service.py b/bubus/service.py index 97ea32d..79b27a0 100644 --- a/bubus/service.py +++ b/bubus/service.py @@ -62,74 +62,26 @@ class QueueShutDown(Exception): class EventBusMiddleware: """Hookable lifecycle interface for observing or extending EventBus execution. - Override the hooks you need. All hooks are async and receive the EventBus instance. - Hooks: - on_handler_start: Called just before a handler begins execution - on_handler_success: Called after a handler completes successfully - on_handler_error: Called when a handler raises or is cancelled - on_event_state_change: Called on event state transitions (pending/started/completed/error) - on_handler_state_change: Called on handler state transitions - on_event_complete: Called after an event and all handlers have finished - """ - - def _once_per_event(self, event: BaseEvent[Any], key: str) -> bool: - """Returns True the first time called for this event/key combo, False after. - - Use this to ensure idempotent processing when a hook might be called multiple times: + on_event_change(eventbus, event, status): Called on event state transitions + on_event_result_change(eventbus, event, event_result, status): Called on EventResult state transitions - async def on_event_complete(self, eventbus, event): - if not self._once_per_event(event, 'logged'): - return - # ... do work only once ... - """ - attr = f'_middleware_{id(self)}_{key}' - if getattr(event, attr, False): - return False - setattr(event, attr, True) - return True - - async def on_handler_start( - self, eventbus: 'EventBus', event: BaseEvent[Any], event_result: EventResult[Any] - ) -> None: - """Called just before a handler begins execution.""" - return None - - async def on_handler_success( - self, eventbus: 'EventBus', event: BaseEvent[Any], event_result: EventResult[Any] - ) -> None: - """Called after a handler completes successfully.""" - return None - - async def on_handler_error( - self, - eventbus: 'EventBus', - event: BaseEvent[Any], - event_result: EventResult[Any], - error: BaseException, - ) -> None: - """Called when a handler raises or is cancelled.""" - return None + Status values: EventStatus.PENDING, STARTED, COMPLETED, ERROR + """ - async def on_event_state_change( + async def on_event_change( self, eventbus: 'EventBus', event: BaseEvent[Any], status: EventStatus ) -> None: """Called on event state transitions (pending, started, completed, error).""" - return None - async def on_handler_state_change( + async def on_event_result_change( self, eventbus: 'EventBus', event: BaseEvent[Any], event_result: EventResult[Any], status: EventStatus, ) -> None: - """Called on handler state transitions (pending, started, completed, error).""" - return None - - async def on_event_complete(self, eventbus: 'EventBus', event: BaseEvent[Any]) -> None: - """Called after an event and all of its handlers have finished.""" - return None + """Called on EventResult state transitions (pending, started, completed, error).""" def _is_middleware_class(candidate: object) -> TypeGuard[type['EventBusMiddleware']]: @@ -467,87 +419,19 @@ async def _call_middleware_hook( if inspect.isawaitable(result): await result - # Middleware fan-out helpers ------------------------------------------- # - async def _middlewares_on_event_state_change( - self, event: BaseEvent[Any], status: EventStatus - ) -> None: + # Middleware fan-out ---------------------------------------------------- # + async def _emit_event_change(self, event: BaseEvent[Any], status: EventStatus) -> None: for middleware in self._middlewares: - await self._call_middleware_hook( - middleware, 'on_event_state_change', self, event, status - ) + await self._call_middleware_hook(middleware, 'on_event_change', self, event, status) - async def _middlewares_on_handler_state_change( + async def _emit_event_result_change( self, event: BaseEvent[Any], event_result: EventResult[Any], status: EventStatus ) -> None: for middleware in self._middlewares: await self._call_middleware_hook( - middleware, - 'on_handler_state_change', - self, - event, - event_result, - status, - ) - - async def _maybe_record_event_started(self, event: BaseEvent[Any]) -> None: - if getattr(event, '_history_started_logged', False): - return - setattr(event, '_history_started_logged', True) - await self._middlewares_on_event_state_change(event, EventStatus.STARTED) - - async def _middlewares_on_handler_start( - self, event: BaseEvent[Any], event_result: EventResult[Any] - ) -> None: - for middleware in self._middlewares: - await self._call_middleware_hook( - middleware, 'on_handler_start', self, event, event_result + middleware, 'on_event_result_change', self, event, event_result, status ) - async def _middlewares_on_handler_success( - self, event: BaseEvent[Any], event_result: EventResult[Any] - ) -> None: - for middleware in self._middlewares: - await self._call_middleware_hook( - middleware, 'on_handler_success', self, event, event_result - ) - - async def _middlewares_on_handler_error( - self, event: BaseEvent[Any], event_result: EventResult[Any], error: BaseException - ) -> None: - for middleware in self._middlewares: - await self._call_middleware_hook( - middleware, 'on_handler_error', self, event, event_result, error - ) - - async def _middlewares_on_event_complete(self, event: BaseEvent[Any]) -> None: - for middleware in self._middlewares: - await self._call_middleware_hook(middleware, 'on_event_complete', self, event) - - async def _dispatch_after_event_hooks(self, event: BaseEvent[Any]) -> None: - if getattr(event, '_after_event_hooks_run', False): - return - - event_completed = False - if event.event_completed_signal is not None and event.event_completed_signal.is_set(): - event_completed = True - elif event.event_results and all(result.status in ('completed', 'error') for result in event.event_results.values()): - event_completed = True - - if not event_completed: - return - - if not getattr(event, '_history_completed_logged', False): - setattr(event, '_history_completed_logged', True) - final_status = ( - EventStatus.ERROR - if any(result.status == 'error' for result in event.event_results.values()) - else EventStatus.COMPLETED - ) - await self._middlewares_on_event_state_change(event, final_status) - - setattr(event, '_after_event_hooks_run', True) - await self._middlewares_on_event_complete(event) - @property def events_pending(self) -> list[BaseEvent[Any]]: """Get events that haven't started processing yet (does not include events that have not even finished dispatching yet in self.event_queue)""" @@ -757,7 +641,7 @@ def dispatch(self, event: T_ExpectedEvent) -> T_ExpectedEvent: self.event_history[event.event_id] = event loop = asyncio.get_running_loop() loop.create_task( - self._middlewares_on_event_state_change(event, EventStatus.PENDING) + self._emit_event_change(event, EventStatus.PENDING) ) logger.info( f'πŸ—£οΈ {self}.dispatch({event.event_type}) ➑️ {event.event_type}#{event.event_id[-4:]} (#{self.event_queue.qsize()} {event.event_status})' @@ -1534,10 +1418,12 @@ async def handle_event(self, event: BaseEvent[Any], timeout: float | None = None # Execute handlers await self._execute_handlers(event, handlers=applicable_handlers, timeout=timeout) - # Mark event as complete if all handlers are done + # Mark event as complete and emit change if it just completed + was_complete = event.event_completed_signal and event.event_completed_signal.is_set() event.event_mark_complete_if_all_handlers_completed() - - await self._dispatch_after_event_hooks(event) + just_completed = not was_complete and event.event_completed_signal and event.event_completed_signal.is_set() + if just_completed: + await self._emit_event_change(event, EventStatus.COMPLETED) # After processing this event, check if any parent events can now be marked complete # We do this by walking up the parent chain @@ -1561,11 +1447,12 @@ async def handle_event(self, event: BaseEvent[Any], timeout: float | None = None break # Check if parent can be marked complete - if parent_event.event_completed_signal and not parent_event.event_completed_signal.is_set(): + was_complete = parent_event.event_completed_signal and parent_event.event_completed_signal.is_set() + if not was_complete: parent_event.event_mark_complete_if_all_handlers_completed() - - if parent_bus: - await parent_bus._dispatch_after_event_hooks(parent_event) + just_completed = not was_complete and parent_event.event_completed_signal and parent_event.event_completed_signal.is_set() + if parent_bus and just_completed: + await parent_bus._emit_event_change(parent_event, EventStatus.COMPLETED) # Move up the chain current = parent_event @@ -1623,14 +1510,13 @@ async def _execute_handlers( """Execute all handlers for an event in parallel""" applicable_handlers = handlers if (handlers is not None) else self._get_applicable_handlers(event) if not applicable_handlers: - event.event_mark_complete_if_all_handlers_completed() # mark event completed immediately if it has no handlers - return + return # handle_event will mark complete pending_results = event.event_create_pending_results( applicable_handlers, eventbus=self, timeout=timeout or event.event_timeout ) for pending_result in pending_results.values(): - await self._middlewares_on_handler_state_change( + await self._emit_event_result_change( event, pending_result, EventStatus.PENDING ) @@ -1684,19 +1570,21 @@ async def execute_handler( {handler_id: handler}, eventbus=self, timeout=timeout or event.event_timeout ) for pending_result in new_results.values(): - await self._middlewares_on_handler_state_change( + await self._emit_event_result_change( event, pending_result, EventStatus.PENDING ) event_result = event.event_results[handler_id] + # Check if this is the first handler to start (before updating status) + is_first_handler = not any(r.started_at for r in event.event_results.values()) + event_result.update(status='started', timeout=timeout or event.event_timeout) - await self._middlewares_on_handler_state_change( - event, event_result, EventStatus.STARTED - ) - await self._maybe_record_event_started(event) + await self._emit_event_result_change(event, event_result, EventStatus.STARTED) - await self._middlewares_on_handler_start(event, event_result) + # Emit event STARTED once (when first handler starts) + if is_first_handler: + await self._emit_event_change(event, EventStatus.STARTED) try: result_value = await event_result.execute( @@ -1714,22 +1602,19 @@ async def execute_handler( f' ↳ Handler {get_handler_name(handler)}#{handler_id[-4:]} returned: {result_type_name}' ) - await self._middlewares_on_handler_success(event, event_result) - await self._middlewares_on_handler_state_change( + await self._emit_event_result_change( event, event_result, EventStatus.COMPLETED ) return cast(T_EventResultType, result_value) - except asyncio.CancelledError as exc: - await self._middlewares_on_handler_error(event, event_result, exc) - await self._middlewares_on_handler_state_change( - event, event_result, EventStatus.ERROR + except asyncio.CancelledError: + await self._emit_event_result_change( + event, event_result, EventStatus.COMPLETED ) raise - except Exception as exc: - await self._middlewares_on_handler_error(event, event_result, exc) - await self._middlewares_on_handler_state_change( - event, event_result, EventStatus.ERROR + except Exception: + await self._emit_event_result_change( + event, event_result, EventStatus.COMPLETED ) raise diff --git a/tests/test_event_history_mirroring.py b/tests/test_event_history_mirroring.py index 80bb2d6..ac2bbef 100644 --- a/tests/test_event_history_mirroring.py +++ b/tests/test_event_history_mirroring.py @@ -103,7 +103,7 @@ async def test_sqlite_mirror_matches_inmemory_error(tmp_path: Path) -> None: conn = sqlite3.connect(db_path) phases = conn.execute('SELECT DISTINCT phase FROM events_log').fetchall() conn.close() - assert {phase for (phase,) in phases} >= {'pending', 'started', 'error'} + assert {phase for (phase,) in phases} >= {'pending', 'started', 'completed'} def _worker_dispatch(db_path: str, worker_id: int) -> None: diff --git a/tests/test_eventbus.py b/tests/test_eventbus.py index e32e40c..10453e3 100644 --- a/tests/test_eventbus.py +++ b/tests/test_eventbus.py @@ -880,13 +880,11 @@ class TrackingMiddleware(EventBusMiddleware): def __init__(self, call_log: list[tuple[str, str]]): self.call_log = call_log - async def on_handler_start(self, eventbus: EventBus, event: BaseEvent, event_result): - self.call_log.append(('before', event_result.status)) - - async def on_handler_success( - self, eventbus: EventBus, event: BaseEvent, event_result - ): - self.call_log.append(('after', event_result.status)) + async def on_event_result_change(self, eventbus: EventBus, event: BaseEvent, event_result, status): + if status == 'started': + self.call_log.append(('before', event_result.status)) + elif status == 'completed': + self.call_log.append(('after', event_result.status)) bus = EventBus(middlewares=[TrackingMiddleware(calls)]) bus.on('UserActionEvent', lambda event: 'ok') @@ -910,17 +908,11 @@ class ErrorMiddleware(EventBusMiddleware): def __init__(self, log: list[tuple[str, str]]): self.log = log - async def on_handler_start(self, eventbus: EventBus, event: BaseEvent, event_result): - self.log.append(('before', event_result.status)) - - async def on_handler_error( - self, - eventbus: EventBus, - event: BaseEvent, - event_result, - error: BaseException, - ): - self.log.append(('error', type(error).__name__)) + async def on_event_result_change(self, eventbus: EventBus, event: BaseEvent, event_result, status): + if status == 'started': + self.log.append(('before', event_result.status)) + elif status == 'completed' and event_result.error: + self.log.append(('error', type(event_result.error).__name__)) async def failing_handler(event: BaseEvent) -> None: raise ValueError('boom') @@ -1035,11 +1027,11 @@ async def failing_handler(event: BaseEvent) -> None: events = conn.execute('SELECT phase, event_status FROM events_log ORDER BY id').fetchall() conn.close() - assert [phase for phase, *_ in result_rows] == ['pending', 'started', 'error'] + assert [phase for phase, *_ in result_rows] == ['pending', 'started', 'completed'] assert [status for _, status, *_ in result_rows] == ['pending', 'started', 'error'] assert 'RuntimeError' in result_rows[-1][2] - assert [phase for phase, _ in events] == ['pending', 'started', 'error'] - assert [status for _, status in events] == ['pending', 'started', 'error'] + assert [phase for phase, _ in events] == ['pending', 'started', 'completed'] + assert [status for _, status in events] == ['pending', 'started', 'completed'] finally: await bus.stop() From f6abe6d395f9246d49969bf69c694fa27695464d Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Mon, 8 Dec 2025 18:17:03 -0800 Subject: [PATCH 35/79] fix middlewares type --- bubus/service.py | 77 ++++++++++++------------------------------------ 1 file changed, 19 insertions(+), 58 deletions(-) diff --git a/bubus/service.py b/bubus/service.py index 79b27a0..076124d 100644 --- a/bubus/service.py +++ b/bubus/service.py @@ -10,7 +10,7 @@ from contextvars import ContextVar from datetime import UTC, datetime, timedelta from pathlib import Path -from typing import Any, Literal, TypeGuard, TypeVar, cast, overload +from typing import Any, Literal, TypeVar, cast, overload from uuid_extensions import uuid7str # pyright: ignore[reportMissingImports, reportUnknownVariableType] @@ -84,10 +84,6 @@ async def on_event_result_change( """Called on EventResult state transitions (pending, started, completed, error).""" -def _is_middleware_class(candidate: object) -> TypeGuard[type['EventBusMiddleware']]: - return isinstance(candidate, type) and issubclass(candidate, EventBusMiddleware) - - class CleanShutdownQueue(asyncio.Queue[QueueEntryType]): """asyncio.Queue subclass that handles shutdown cleanly without warnings.""" @@ -314,7 +310,7 @@ def __init__( name: PythonIdentifierStr | None = None, parallel_handlers: bool = False, max_history_size: int | None = 50, # Keep only 50 events in history - middlewares: Sequence[EventBusMiddleware | type[EventBusMiddleware]] | None = None, + middlewares: Sequence[EventBusMiddleware] | None = None, ): self.id = uuid7str() self.name = name or f'{self.__class__.__name__}_{self.id[-8:]}' @@ -354,8 +350,7 @@ def __init__( self.handlers = defaultdict(list) self.parallel_handlers = parallel_handlers self._on_idle = None - self._middlewares: list[EventBusMiddleware] = [] - self.middlewares = list(middlewares or []) + self.middlewares: list[EventBusMiddleware] = list(middlewares or []) # Memory leak prevention settings self.max_history_size = max_history_size @@ -388,49 +383,15 @@ def __str__(self) -> str: def __repr__(self) -> str: return str(self) - @property - def middlewares(self) -> list[EventBusMiddleware]: - return getattr(self, '_middlewares', []) - - @middlewares.setter - def middlewares(self, value: Sequence[EventBusMiddleware | type[EventBusMiddleware]]) -> None: - instances: list[EventBusMiddleware] = [] - for middleware in value: - if isinstance(middleware, EventBusMiddleware): - instances.append(middleware) - elif _is_middleware_class(middleware): - instances.append(middleware()) - else: - raise TypeError( - f'Invalid middleware {middleware!r}. Expected EventBusMiddleware instance or subclass.' - ) - self._middlewares = instances + async def _on_event_change(self, event: BaseEvent[Any], status: EventStatus) -> None: + for middleware in self.middlewares: + await middleware.on_event_change(self, event, status) - async def _call_middleware_hook( - self, - middleware: EventBusMiddleware, - method_name: str, - *args: Any, - ) -> None: - method = getattr(middleware, method_name, None) - if method is None: - return - result = method(*args) - if inspect.isawaitable(result): - await result - - # Middleware fan-out ---------------------------------------------------- # - async def _emit_event_change(self, event: BaseEvent[Any], status: EventStatus) -> None: - for middleware in self._middlewares: - await self._call_middleware_hook(middleware, 'on_event_change', self, event, status) - - async def _emit_event_result_change( + async def _on_event_result_change( self, event: BaseEvent[Any], event_result: EventResult[Any], status: EventStatus ) -> None: - for middleware in self._middlewares: - await self._call_middleware_hook( - middleware, 'on_event_result_change', self, event, event_result, status - ) + for middleware in self.middlewares: + await middleware.on_event_result_change(self, event, event_result, status) @property def events_pending(self) -> list[BaseEvent[Any]]: @@ -641,7 +602,7 @@ def dispatch(self, event: T_ExpectedEvent) -> T_ExpectedEvent: self.event_history[event.event_id] = event loop = asyncio.get_running_loop() loop.create_task( - self._emit_event_change(event, EventStatus.PENDING) + self._on_event_change(event, EventStatus.PENDING) ) logger.info( f'πŸ—£οΈ {self}.dispatch({event.event_type}) ➑️ {event.event_type}#{event.event_id[-4:]} (#{self.event_queue.qsize()} {event.event_status})' @@ -1423,7 +1384,7 @@ async def handle_event(self, event: BaseEvent[Any], timeout: float | None = None event.event_mark_complete_if_all_handlers_completed() just_completed = not was_complete and event.event_completed_signal and event.event_completed_signal.is_set() if just_completed: - await self._emit_event_change(event, EventStatus.COMPLETED) + await self._on_event_change(event, EventStatus.COMPLETED) # After processing this event, check if any parent events can now be marked complete # We do this by walking up the parent chain @@ -1452,7 +1413,7 @@ async def handle_event(self, event: BaseEvent[Any], timeout: float | None = None parent_event.event_mark_complete_if_all_handlers_completed() just_completed = not was_complete and parent_event.event_completed_signal and parent_event.event_completed_signal.is_set() if parent_bus and just_completed: - await parent_bus._emit_event_change(parent_event, EventStatus.COMPLETED) + await parent_bus._on_event_change(parent_event, EventStatus.COMPLETED) # Move up the chain current = parent_event @@ -1516,7 +1477,7 @@ async def _execute_handlers( applicable_handlers, eventbus=self, timeout=timeout or event.event_timeout ) for pending_result in pending_results.values(): - await self._emit_event_result_change( + await self._on_event_result_change( event, pending_result, EventStatus.PENDING ) @@ -1570,7 +1531,7 @@ async def execute_handler( {handler_id: handler}, eventbus=self, timeout=timeout or event.event_timeout ) for pending_result in new_results.values(): - await self._emit_event_result_change( + await self._on_event_result_change( event, pending_result, EventStatus.PENDING ) @@ -1580,11 +1541,11 @@ async def execute_handler( is_first_handler = not any(r.started_at for r in event.event_results.values()) event_result.update(status='started', timeout=timeout or event.event_timeout) - await self._emit_event_result_change(event, event_result, EventStatus.STARTED) + await self._on_event_result_change(event, event_result, EventStatus.STARTED) # Emit event STARTED once (when first handler starts) if is_first_handler: - await self._emit_event_change(event, EventStatus.STARTED) + await self._on_event_change(event, EventStatus.STARTED) try: result_value = await event_result.execute( @@ -1602,18 +1563,18 @@ async def execute_handler( f' ↳ Handler {get_handler_name(handler)}#{handler_id[-4:]} returned: {result_type_name}' ) - await self._emit_event_result_change( + await self._on_event_result_change( event, event_result, EventStatus.COMPLETED ) return cast(T_EventResultType, result_value) except asyncio.CancelledError: - await self._emit_event_result_change( + await self._on_event_result_change( event, event_result, EventStatus.COMPLETED ) raise except Exception: - await self._emit_event_result_change( + await self._on_event_result_change( event, event_result, EventStatus.COMPLETED ) raise From 4e291cfd2b013fc8400673655277890583e29fd8 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Mon, 8 Dec 2025 18:20:37 -0800 Subject: [PATCH 36/79] bump version --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 81c1ae1..17b5c48 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ name = "bubus" description = "Advanced Pydantic-powered event bus with async support" authors = [{ name = "Nick Sweeting" }] -version = "1.7.1" +version = "1.7.2" readme = "README.md" requires-python = ">=3.11,<4.0" classifiers = [ From 140fb7a20641e8a969028d4a2dfbdbc0f005242d Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Tue, 9 Dec 2025 11:30:43 -0800 Subject: [PATCH 37/79] Revise event handling examples in README Updated event handling examples and removed tree traversal helpers. --- README.md | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-) diff --git a/README.md b/README.md index 31774cc..0de9965 100644 --- a/README.md +++ b/README.md @@ -311,28 +311,14 @@ When you dispatch an event that triggers child events, use `child_of` to find sp # Dispatch a parent event that triggers child events nav_event = await bus.dispatch(NavigateToUrlEvent(url="https://example.com")) -# Find a child event (may have already fired, or wait for it) -new_tab = await bus.find(TabCreatedEvent, child_of=nav_event, future=5) +# Find a child event (already fired while NavigateToUrlEvent was being handled) +new_tab = await bus.find(TabCreatedEvent, child_of=nav_event, past=5) if new_tab: print(f"New tab created: {new_tab.tab_id}") ``` This solves race conditions where child events fire before you start waiting for them. -#### Tree Traversal Helpers - -Check parent-child relationships between events: - -```python -# Check if event is a descendant of another event -if bus.event_is_child_of(child_event, parent_event): - print("child_event is a descendant of parent_event") - -# Check if event is an ancestor of another event -if bus.event_is_parent_of(parent_event, child_event): - print("parent_event is an ancestor of child_event") -``` - > [!IMPORTANT] > `find()` resolves when the event is first *dispatched* to the `EventBus`, not when it completes. Use `await event` to wait for handlers to finish. > If no match is found (or future timeout elapses), `find()` returns `None`. @@ -346,11 +332,11 @@ Avoid re-running expensive work by reusing recent events. The `find()` method ma ```python # Simple debouncing: reuse event from last 10 seconds, or dispatch new event = ( - await bus.find(ScreenshotEvent, past=10, future=False) # Check last 10s of history (instant) + bus.find(ScreenshotEvent, past=10, future=False) # Check last 10s of history (instant) or await bus.dispatch(ScreenshotEvent()) ) -# More advanced: check history, wait briefly for in-flight, then dispatch +# Advanced: check history, wait briefly for new event to appear, fallback to dispatch new event event = ( await bus.find(SyncEvent, past=True, future=False) # Check all history (instant) or await bus.find(SyncEvent, past=False, future=5) # Wait up to 5s for in-flight From fb68fcb01de16978a8215a0e785b0b2946f23ede Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Wed, 4 Feb 2026 17:31:39 -0800 Subject: [PATCH 38/79] add bubus-ts implementation --- .gitignore | 1 + bubus-ts/eslint.config.js | 22 + bubus-ts/eslint.config.mjs | 21 + bubus-ts/examples/log_tree_demo.ts | 100 ++ bubus-ts/package.json | 40 + bubus-ts/pnpm-lock.yaml | 1237 +++++++++++++++++ bubus-ts/prettier.config.js | 8 + bubus-ts/src/base_event.ts | 286 ++++ bubus-ts/src/event_bus.ts | 1019 ++++++++++++++ bubus-ts/src/event_result.ts | 54 + bubus-ts/src/index.ts | 11 + bubus-ts/src/types.ts | 17 + bubus-ts/tests/comprehensive_patterns.test.ts | 598 ++++++++ bubus-ts/tests/debounce.test.ts | 51 + bubus-ts/tests/event_results.test.ts | 73 + bubus-ts/tests/fifo.test.ts | 41 + bubus-ts/tests/find.test.ts | 131 ++ bubus-ts/tests/forwarding.test.ts | 123 ++ bubus-ts/tests/parent_child.test.ts | 64 + bubus-ts/tests/performance.test.ts | 36 + bubus-ts/tests/timeout.test.ts | 43 + bubus-ts/tsconfig.base.json | 14 + bubus-ts/tsconfig.json | 18 + 23 files changed, 4008 insertions(+) create mode 100644 bubus-ts/eslint.config.js create mode 100644 bubus-ts/eslint.config.mjs create mode 100644 bubus-ts/examples/log_tree_demo.ts create mode 100644 bubus-ts/package.json create mode 100644 bubus-ts/pnpm-lock.yaml create mode 100644 bubus-ts/prettier.config.js create mode 100644 bubus-ts/src/base_event.ts create mode 100644 bubus-ts/src/event_bus.ts create mode 100644 bubus-ts/src/event_result.ts create mode 100644 bubus-ts/src/index.ts create mode 100644 bubus-ts/src/types.ts create mode 100644 bubus-ts/tests/comprehensive_patterns.test.ts create mode 100644 bubus-ts/tests/debounce.test.ts create mode 100644 bubus-ts/tests/event_results.test.ts create mode 100644 bubus-ts/tests/fifo.test.ts create mode 100644 bubus-ts/tests/find.test.ts create mode 100644 bubus-ts/tests/forwarding.test.ts create mode 100644 bubus-ts/tests/parent_child.test.ts create mode 100644 bubus-ts/tests/performance.test.ts create mode 100644 bubus-ts/tests/timeout.test.ts create mode 100644 bubus-ts/tsconfig.base.json create mode 100644 bubus-ts/tsconfig.json diff --git a/.gitignore b/.gitignore index 30015e4..8960285 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,7 @@ CLAUDE.local.md # Build files dist/ +node_modules/ # Coverage files .coverage diff --git a/bubus-ts/eslint.config.js b/bubus-ts/eslint.config.js new file mode 100644 index 0000000..3d89e0b --- /dev/null +++ b/bubus-ts/eslint.config.js @@ -0,0 +1,22 @@ +import ts_parser from "@typescript-eslint/parser"; +import ts_eslint_plugin from "@typescript-eslint/eslint-plugin"; + +export default [ + { + files: ["**/*.ts"], + languageOptions: { + parser: ts_parser, + parserOptions: { + sourceType: "module", + ecmaVersion: "latest" + } + }, + plugins: { + "@typescript-eslint": ts_eslint_plugin + }, + rules: { + "no-unused-vars": "off", + "@typescript-eslint/no-unused-vars": ["error", { "argsIgnorePattern": "^_" }] + } + } +]; diff --git a/bubus-ts/eslint.config.mjs b/bubus-ts/eslint.config.mjs new file mode 100644 index 0000000..75978ee --- /dev/null +++ b/bubus-ts/eslint.config.mjs @@ -0,0 +1,21 @@ +import globals from "globals"; +import pluginJs from "@eslint/js"; +import tseslint from "typescript-eslint"; + +/** @type {import('eslint').Linter.Config[]} */ +export default [ + { + files: ["**/*.{js,cjs,mjs,ts}"], + languageOptions: { globals: globals.node }, + }, + { + ignores: [ + "**/dist/**", + "**/node_modules/**", + "**/*.config.mjs", + "**/*.json", + ], + }, + pluginJs.configs.recommended, + ...tseslint.configs.recommended, +]; diff --git a/bubus-ts/examples/log_tree_demo.ts b/bubus-ts/examples/log_tree_demo.ts new file mode 100644 index 0000000..9897478 --- /dev/null +++ b/bubus-ts/examples/log_tree_demo.ts @@ -0,0 +1,100 @@ +import { z } from "zod"; + +import { BaseEvent, EventBus } from "../src/index.js"; + +const RootEvent = BaseEvent.extend( + "RootEvent", + { url: z.string() }, + { event_result_schema: z.string(), event_result_type: "string" } +); + +const ChildEvent = BaseEvent.extend( + "ChildEvent", + { tab_id: z.string() }, + { event_result_schema: z.string(), event_result_type: "string" } +); + +const GrandchildEvent = BaseEvent.extend( + "GrandchildEvent", + { status: z.string() }, + { event_result_schema: z.string(), event_result_type: "string" } +); + +const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms); + }); + +async function main(): Promise { + const bus_a = new EventBus("BusA"); + const bus_b = new EventBus("BusB"); + + async function forward_to_bus_b(event: InstanceType): Promise { + await delay(20); + bus_b.dispatch(event); + return "forwarded_to_bus_b"; + } + + bus_a.on("*", forward_to_bus_b); + + async function root_fast_handler(event: InstanceType): Promise { + await delay(10); + const child = event.bus?.emit( + ChildEvent({ tab_id: "tab-123", event_timeout: 0.05 }) + ); + if (child) { + await child.done(); + } + return "root_fast_handler_ok"; + } + + async function root_slow_handler(): Promise { + await delay(120); + return "root_slow_handler_timeout"; + } + + bus_a.on(RootEvent, root_fast_handler); + bus_a.on(RootEvent, root_slow_handler); + + async function child_slow_handler(event: InstanceType): Promise { + await delay(200); + return "child_slow_handler_done"; + } + + async function child_fast_handler(event: InstanceType): Promise { + await delay(10); + event.bus?.emit( + GrandchildEvent({ status: "ok", event_timeout: 0.05 }) + ); + return "child_handler_ok"; + } + + async function grandchild_fast_handler(): Promise { + await delay(5); + return "grandchild_fast_handler_ok"; + } + + async function grandchild_slow_handler(): Promise { + await delay(80); + return "grandchild_slow_handler_timeout"; + } + + bus_b.on(ChildEvent, child_slow_handler); + bus_b.on(ChildEvent, child_fast_handler); + bus_b.on(GrandchildEvent, grandchild_fast_handler); + bus_b.on(GrandchildEvent, grandchild_slow_handler); + + const root_event = bus_a.dispatch( + RootEvent({ url: "https://example.com", event_timeout: 0.05 }) + ); + + await root_event.done(); + + console.log("\n=== BusA logTree ==="); + console.log(bus_a.logTree()); + + console.log("\n=== BusB logTree ==="); + console.log(bus_b.logTree()); +} + +await main(); diff --git a/bubus-ts/package.json b/bubus-ts/package.json new file mode 100644 index 0000000..e229ce8 --- /dev/null +++ b/bubus-ts/package.json @@ -0,0 +1,40 @@ +{ + "name": "bubus-ts", + "version": "1.0.0", + "description": "Event bus library for browsers and ESM Node.js", + "type": "module", + "main": "./dist/esm/index.js", + "module": "./dist/esm/index.js", + "types": "./dist/types/index.d.ts", + "files": [ + "dist/esm", + "dist/types" + ], + "scripts": { + "build": "pnpm run build:esm && pnpm run build:types", + "build:esm": "esbuild src/index.ts --bundle --format=esm --platform=neutral --target=es2022 --outdir=dist/esm", + "build:types": "tsc -p tsconfig.json --emitDeclarationOnly", + "typecheck": "tsc -p tsconfig.json --noEmit", + "lint": "eslint .", + "format": "prettier --write .", + "format:check": "prettier --check .", + "test": "node --test --import tsx tests/**/*.test.ts" + }, + "keywords": [], + "author": "", + "license": "ISC", + "packageManager": "pnpm@10.23.0", + "dependencies": { + "uuid": "^11.1.0", + "zod": "^4.3.6" + }, + "devDependencies": { + "@typescript-eslint/eslint-plugin": "^8.46.0", + "@typescript-eslint/parser": "^8.46.0", + "esbuild": "^0.27.2", + "eslint": "^9.39.2", + "prettier": "^3.8.1", + "tsx": "^4.20.6", + "typescript": "^5.9.3" + } +} diff --git a/bubus-ts/pnpm-lock.yaml b/bubus-ts/pnpm-lock.yaml new file mode 100644 index 0000000..698b911 --- /dev/null +++ b/bubus-ts/pnpm-lock.yaml @@ -0,0 +1,1237 @@ +lockfileVersion: '9.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +importers: + + .: + dependencies: + uuid: + specifier: ^11.1.0 + version: 11.1.0 + zod: + specifier: ^4.3.6 + version: 4.3.6 + devDependencies: + '@typescript-eslint/eslint-plugin': + specifier: ^8.46.0 + version: 8.54.0(@typescript-eslint/parser@8.54.0(eslint@9.39.2)(typescript@5.9.3))(eslint@9.39.2)(typescript@5.9.3) + '@typescript-eslint/parser': + specifier: ^8.46.0 + version: 8.54.0(eslint@9.39.2)(typescript@5.9.3) + esbuild: + specifier: ^0.27.2 + version: 0.27.2 + eslint: + specifier: ^9.39.2 + version: 9.39.2 + prettier: + specifier: ^3.8.1 + version: 3.8.1 + tsx: + specifier: ^4.20.6 + version: 4.21.0 + typescript: + specifier: ^5.9.3 + version: 5.9.3 + +packages: + + '@esbuild/aix-ppc64@0.27.2': + resolution: {integrity: sha512-GZMB+a0mOMZs4MpDbj8RJp4cw+w1WV5NYD6xzgvzUJ5Ek2jerwfO2eADyI6ExDSUED+1X8aMbegahsJi+8mgpw==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [aix] + + '@esbuild/android-arm64@0.27.2': + resolution: {integrity: sha512-pvz8ZZ7ot/RBphf8fv60ljmaoydPU12VuXHImtAs0XhLLw+EXBi2BLe3OYSBslR4rryHvweW5gmkKFwTiFy6KA==} + engines: {node: '>=18'} + cpu: [arm64] + os: [android] + + '@esbuild/android-arm@0.27.2': + resolution: {integrity: sha512-DVNI8jlPa7Ujbr1yjU2PfUSRtAUZPG9I1RwW4F4xFB1Imiu2on0ADiI/c3td+KmDtVKNbi+nffGDQMfcIMkwIA==} + engines: {node: '>=18'} + cpu: [arm] + os: [android] + + '@esbuild/android-x64@0.27.2': + resolution: {integrity: sha512-z8Ank4Byh4TJJOh4wpz8g2vDy75zFL0TlZlkUkEwYXuPSgX8yzep596n6mT7905kA9uHZsf/o2OJZubl2l3M7A==} + engines: {node: '>=18'} + cpu: [x64] + os: [android] + + '@esbuild/darwin-arm64@0.27.2': + resolution: {integrity: sha512-davCD2Zc80nzDVRwXTcQP/28fiJbcOwvdolL0sOiOsbwBa72kegmVU0Wrh1MYrbuCL98Omp5dVhQFWRKR2ZAlg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [darwin] + + '@esbuild/darwin-x64@0.27.2': + resolution: {integrity: sha512-ZxtijOmlQCBWGwbVmwOF/UCzuGIbUkqB1faQRf5akQmxRJ1ujusWsb3CVfk/9iZKr2L5SMU5wPBi1UWbvL+VQA==} + engines: {node: '>=18'} + cpu: [x64] + os: [darwin] + + '@esbuild/freebsd-arm64@0.27.2': + resolution: {integrity: sha512-lS/9CN+rgqQ9czogxlMcBMGd+l8Q3Nj1MFQwBZJyoEKI50XGxwuzznYdwcav6lpOGv5BqaZXqvBSiB/kJ5op+g==} + engines: {node: '>=18'} + cpu: [arm64] + os: [freebsd] + + '@esbuild/freebsd-x64@0.27.2': + resolution: {integrity: sha512-tAfqtNYb4YgPnJlEFu4c212HYjQWSO/w/h/lQaBK7RbwGIkBOuNKQI9tqWzx7Wtp7bTPaGC6MJvWI608P3wXYA==} + engines: {node: '>=18'} + cpu: [x64] + os: [freebsd] + + '@esbuild/linux-arm64@0.27.2': + resolution: {integrity: sha512-hYxN8pr66NsCCiRFkHUAsxylNOcAQaxSSkHMMjcpx0si13t1LHFphxJZUiGwojB1a/Hd5OiPIqDdXONia6bhTw==} + engines: {node: '>=18'} + cpu: [arm64] + os: [linux] + + '@esbuild/linux-arm@0.27.2': + resolution: {integrity: sha512-vWfq4GaIMP9AIe4yj1ZUW18RDhx6EPQKjwe7n8BbIecFtCQG4CfHGaHuh7fdfq+y3LIA2vGS/o9ZBGVxIDi9hw==} + engines: {node: '>=18'} + cpu: [arm] + os: [linux] + + '@esbuild/linux-ia32@0.27.2': + resolution: {integrity: sha512-MJt5BRRSScPDwG2hLelYhAAKh9imjHK5+NE/tvnRLbIqUWa+0E9N4WNMjmp/kXXPHZGqPLxggwVhz7QP8CTR8w==} + engines: {node: '>=18'} + cpu: [ia32] + os: [linux] + + '@esbuild/linux-loong64@0.27.2': + resolution: {integrity: sha512-lugyF1atnAT463aO6KPshVCJK5NgRnU4yb3FUumyVz+cGvZbontBgzeGFO1nF+dPueHD367a2ZXe1NtUkAjOtg==} + engines: {node: '>=18'} + cpu: [loong64] + os: [linux] + + '@esbuild/linux-mips64el@0.27.2': + resolution: {integrity: sha512-nlP2I6ArEBewvJ2gjrrkESEZkB5mIoaTswuqNFRv/WYd+ATtUpe9Y09RnJvgvdag7he0OWgEZWhviS1OTOKixw==} + engines: {node: '>=18'} + cpu: [mips64el] + os: [linux] + + '@esbuild/linux-ppc64@0.27.2': + resolution: {integrity: sha512-C92gnpey7tUQONqg1n6dKVbx3vphKtTHJaNG2Ok9lGwbZil6DrfyecMsp9CrmXGQJmZ7iiVXvvZH6Ml5hL6XdQ==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [linux] + + '@esbuild/linux-riscv64@0.27.2': + resolution: {integrity: sha512-B5BOmojNtUyN8AXlK0QJyvjEZkWwy/FKvakkTDCziX95AowLZKR6aCDhG7LeF7uMCXEJqwa8Bejz5LTPYm8AvA==} + engines: {node: '>=18'} + cpu: [riscv64] + os: [linux] + + '@esbuild/linux-s390x@0.27.2': + resolution: {integrity: sha512-p4bm9+wsPwup5Z8f4EpfN63qNagQ47Ua2znaqGH6bqLlmJ4bx97Y9JdqxgGZ6Y8xVTixUnEkoKSHcpRlDnNr5w==} + engines: {node: '>=18'} + cpu: [s390x] + os: [linux] + + '@esbuild/linux-x64@0.27.2': + resolution: {integrity: sha512-uwp2Tip5aPmH+NRUwTcfLb+W32WXjpFejTIOWZFw/v7/KnpCDKG66u4DLcurQpiYTiYwQ9B7KOeMJvLCu/OvbA==} + engines: {node: '>=18'} + cpu: [x64] + os: [linux] + + '@esbuild/netbsd-arm64@0.27.2': + resolution: {integrity: sha512-Kj6DiBlwXrPsCRDeRvGAUb/LNrBASrfqAIok+xB0LxK8CHqxZ037viF13ugfsIpePH93mX7xfJp97cyDuTZ3cw==} + engines: {node: '>=18'} + cpu: [arm64] + os: [netbsd] + + '@esbuild/netbsd-x64@0.27.2': + resolution: {integrity: sha512-HwGDZ0VLVBY3Y+Nw0JexZy9o/nUAWq9MlV7cahpaXKW6TOzfVno3y3/M8Ga8u8Yr7GldLOov27xiCnqRZf0tCA==} + engines: {node: '>=18'} + cpu: [x64] + os: [netbsd] + + '@esbuild/openbsd-arm64@0.27.2': + resolution: {integrity: sha512-DNIHH2BPQ5551A7oSHD0CKbwIA/Ox7+78/AWkbS5QoRzaqlev2uFayfSxq68EkonB+IKjiuxBFoV8ESJy8bOHA==} + engines: {node: '>=18'} + cpu: [arm64] + os: [openbsd] + + '@esbuild/openbsd-x64@0.27.2': + resolution: {integrity: sha512-/it7w9Nb7+0KFIzjalNJVR5bOzA9Vay+yIPLVHfIQYG/j+j9VTH84aNB8ExGKPU4AzfaEvN9/V4HV+F+vo8OEg==} + engines: {node: '>=18'} + cpu: [x64] + os: [openbsd] + + '@esbuild/openharmony-arm64@0.27.2': + resolution: {integrity: sha512-LRBbCmiU51IXfeXk59csuX/aSaToeG7w48nMwA6049Y4J4+VbWALAuXcs+qcD04rHDuSCSRKdmY63sruDS5qag==} + engines: {node: '>=18'} + cpu: [arm64] + os: [openharmony] + + '@esbuild/sunos-x64@0.27.2': + resolution: {integrity: sha512-kMtx1yqJHTmqaqHPAzKCAkDaKsffmXkPHThSfRwZGyuqyIeBvf08KSsYXl+abf5HDAPMJIPnbBfXvP2ZC2TfHg==} + engines: {node: '>=18'} + cpu: [x64] + os: [sunos] + + '@esbuild/win32-arm64@0.27.2': + resolution: {integrity: sha512-Yaf78O/B3Kkh+nKABUF++bvJv5Ijoy9AN1ww904rOXZFLWVc5OLOfL56W+C8F9xn5JQZa3UX6m+IktJnIb1Jjg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [win32] + + '@esbuild/win32-ia32@0.27.2': + resolution: {integrity: sha512-Iuws0kxo4yusk7sw70Xa2E2imZU5HoixzxfGCdxwBdhiDgt9vX9VUCBhqcwY7/uh//78A1hMkkROMJq9l27oLQ==} + engines: {node: '>=18'} + cpu: [ia32] + os: [win32] + + '@esbuild/win32-x64@0.27.2': + resolution: {integrity: sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ==} + engines: {node: '>=18'} + cpu: [x64] + os: [win32] + + '@eslint-community/eslint-utils@4.9.1': + resolution: {integrity: sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: ^6.0.0 || ^7.0.0 || >=8.0.0 + + '@eslint-community/regexpp@4.12.2': + resolution: {integrity: sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==} + engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0} + + '@eslint/config-array@0.21.1': + resolution: {integrity: sha512-aw1gNayWpdI/jSYVgzN5pL0cfzU02GT3NBpeT/DXbx1/1x7ZKxFPd9bwrzygx/qiwIQiJ1sw/zD8qY/kRvlGHA==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@eslint/config-helpers@0.4.2': + resolution: {integrity: sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@eslint/core@0.17.0': + resolution: {integrity: sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@eslint/eslintrc@3.3.3': + resolution: {integrity: sha512-Kr+LPIUVKz2qkx1HAMH8q1q6azbqBAsXJUxBl/ODDuVPX45Z9DfwB8tPjTi6nNZ8BuM3nbJxC5zCAg5elnBUTQ==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@eslint/js@9.39.2': + resolution: {integrity: sha512-q1mjIoW1VX4IvSocvM/vbTiveKC4k9eLrajNEuSsmjymSDEbpGddtpfOoN7YGAqBK3NG+uqo8ia4PDTt8buCYA==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@eslint/object-schema@2.1.7': + resolution: {integrity: sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@eslint/plugin-kit@0.4.1': + resolution: {integrity: sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@humanfs/core@0.19.1': + resolution: {integrity: sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==} + engines: {node: '>=18.18.0'} + + '@humanfs/node@0.16.7': + resolution: {integrity: sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==} + engines: {node: '>=18.18.0'} + + '@humanwhocodes/module-importer@1.0.1': + resolution: {integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==} + engines: {node: '>=12.22'} + + '@humanwhocodes/retry@0.4.3': + resolution: {integrity: sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==} + engines: {node: '>=18.18'} + + '@types/estree@1.0.8': + resolution: {integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==} + + '@types/json-schema@7.0.15': + resolution: {integrity: sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==} + + '@typescript-eslint/eslint-plugin@8.54.0': + resolution: {integrity: sha512-hAAP5io/7csFStuOmR782YmTthKBJ9ND3WVL60hcOjvtGFb+HJxH4O5huAcmcZ9v9G8P+JETiZ/G1B8MALnWZQ==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + '@typescript-eslint/parser': ^8.54.0 + eslint: ^8.57.0 || ^9.0.0 + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/parser@8.54.0': + resolution: {integrity: sha512-BtE0k6cjwjLZoZixN0t5AKP0kSzlGu7FctRXYuPAm//aaiZhmfq1JwdYpYr1brzEspYyFeF+8XF5j2VK6oalrA==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/project-service@8.54.0': + resolution: {integrity: sha512-YPf+rvJ1s7MyiWM4uTRhE4DvBXrEV+d8oC3P9Y2eT7S+HBS0clybdMIPnhiATi9vZOYDc7OQ1L/i6ga6NFYK/g==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/scope-manager@8.54.0': + resolution: {integrity: sha512-27rYVQku26j/PbHYcVfRPonmOlVI6gihHtXFbTdB5sb6qA0wdAQAbyXFVarQ5t4HRojIz64IV90YtsjQSSGlQg==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@typescript-eslint/tsconfig-utils@8.54.0': + resolution: {integrity: sha512-dRgOyT2hPk/JwxNMZDsIXDgyl9axdJI3ogZ2XWhBPsnZUv+hPesa5iuhdYt2gzwA9t8RE5ytOJ6xB0moV0Ujvw==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/type-utils@8.54.0': + resolution: {integrity: sha512-hiLguxJWHjjwL6xMBwD903ciAwd7DmK30Y9Axs/etOkftC3ZNN9K44IuRD/EB08amu+Zw6W37x9RecLkOo3pMA==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/types@8.54.0': + resolution: {integrity: sha512-PDUI9R1BVjqu7AUDsRBbKMtwmjWcn4J3le+5LpcFgWULN3LvHC5rkc9gCVxbrsrGmO1jfPybN5s6h4Jy+OnkAA==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@typescript-eslint/typescript-estree@8.54.0': + resolution: {integrity: sha512-BUwcskRaPvTk6fzVWgDPdUndLjB87KYDrN5EYGetnktoeAvPtO4ONHlAZDnj5VFnUANg0Sjm7j4usBlnoVMHwA==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/utils@8.54.0': + resolution: {integrity: sha512-9Cnda8GS57AQakvRyG0PTejJNlA2xhvyNtEVIMlDWOOeEyBkYWhGPnfrIAnqxLMTSTo6q8g12XVjjev5l1NvMA==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/visitor-keys@8.54.0': + resolution: {integrity: sha512-VFlhGSl4opC0bprJiItPQ1RfUhGDIBokcPwaFH4yiBCaNPeld/9VeXbiPO1cLyorQi1G1vL+ecBk1x8o1axORA==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + acorn-jsx@5.3.2: + resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} + peerDependencies: + acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 + + acorn@8.15.0: + resolution: {integrity: sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==} + engines: {node: '>=0.4.0'} + hasBin: true + + ajv@6.12.6: + resolution: {integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==} + + ansi-styles@4.3.0: + resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} + engines: {node: '>=8'} + + argparse@2.0.1: + resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} + + balanced-match@1.0.2: + resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} + + brace-expansion@1.1.12: + resolution: {integrity: sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==} + + brace-expansion@2.0.2: + resolution: {integrity: sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==} + + callsites@3.1.0: + resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==} + engines: {node: '>=6'} + + chalk@4.1.2: + resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} + engines: {node: '>=10'} + + color-convert@2.0.1: + resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} + engines: {node: '>=7.0.0'} + + color-name@1.1.4: + resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} + + concat-map@0.0.1: + resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==} + + cross-spawn@7.0.6: + resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==} + engines: {node: '>= 8'} + + debug@4.4.3: + resolution: {integrity: sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + + deep-is@0.1.4: + resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} + + esbuild@0.27.2: + resolution: {integrity: sha512-HyNQImnsOC7X9PMNaCIeAm4ISCQXs5a5YasTXVliKv4uuBo1dKrG0A+uQS8M5eXjVMnLg3WgXaKvprHlFJQffw==} + engines: {node: '>=18'} + hasBin: true + + escape-string-regexp@4.0.0: + resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==} + engines: {node: '>=10'} + + eslint-scope@8.4.0: + resolution: {integrity: sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + eslint-visitor-keys@3.4.3: + resolution: {integrity: sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + eslint-visitor-keys@4.2.1: + resolution: {integrity: sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + eslint@9.39.2: + resolution: {integrity: sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + hasBin: true + peerDependencies: + jiti: '*' + peerDependenciesMeta: + jiti: + optional: true + + espree@10.4.0: + resolution: {integrity: sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + esquery@1.7.0: + resolution: {integrity: sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==} + engines: {node: '>=0.10'} + + esrecurse@4.3.0: + resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==} + engines: {node: '>=4.0'} + + estraverse@5.3.0: + resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==} + engines: {node: '>=4.0'} + + esutils@2.0.3: + resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==} + engines: {node: '>=0.10.0'} + + fast-deep-equal@3.1.3: + resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} + + fast-json-stable-stringify@2.1.0: + resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==} + + fast-levenshtein@2.0.6: + resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} + + fdir@6.5.0: + resolution: {integrity: sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==} + engines: {node: '>=12.0.0'} + peerDependencies: + picomatch: ^3 || ^4 + peerDependenciesMeta: + picomatch: + optional: true + + file-entry-cache@8.0.0: + resolution: {integrity: sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==} + engines: {node: '>=16.0.0'} + + find-up@5.0.0: + resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} + engines: {node: '>=10'} + + flat-cache@4.0.1: + resolution: {integrity: sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==} + engines: {node: '>=16'} + + flatted@3.3.3: + resolution: {integrity: sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==} + + fsevents@2.3.3: + resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + + get-tsconfig@4.13.1: + resolution: {integrity: sha512-EoY1N2xCn44xU6750Sx7OjOIT59FkmstNc3X6y5xpz7D5cBtZRe/3pSlTkDJgqsOk3WwZPkWfonhhUJfttQo3w==} + + glob-parent@6.0.2: + resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==} + engines: {node: '>=10.13.0'} + + globals@14.0.0: + resolution: {integrity: sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==} + engines: {node: '>=18'} + + has-flag@4.0.0: + resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} + engines: {node: '>=8'} + + ignore@5.3.2: + resolution: {integrity: sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==} + engines: {node: '>= 4'} + + ignore@7.0.5: + resolution: {integrity: sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==} + engines: {node: '>= 4'} + + import-fresh@3.3.1: + resolution: {integrity: sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==} + engines: {node: '>=6'} + + imurmurhash@0.1.4: + resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} + engines: {node: '>=0.8.19'} + + is-extglob@2.1.1: + resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} + engines: {node: '>=0.10.0'} + + is-glob@4.0.3: + resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} + engines: {node: '>=0.10.0'} + + isexe@2.0.0: + resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} + + js-yaml@4.1.1: + resolution: {integrity: sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==} + hasBin: true + + json-buffer@3.0.1: + resolution: {integrity: sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==} + + json-schema-traverse@0.4.1: + resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==} + + json-stable-stringify-without-jsonify@1.0.1: + resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==} + + keyv@4.5.4: + resolution: {integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==} + + levn@0.4.1: + resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==} + engines: {node: '>= 0.8.0'} + + locate-path@6.0.0: + resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} + engines: {node: '>=10'} + + lodash.merge@4.6.2: + resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} + + minimatch@3.1.2: + resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} + + minimatch@9.0.5: + resolution: {integrity: sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==} + engines: {node: '>=16 || 14 >=14.17'} + + ms@2.1.3: + resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + + natural-compare@1.4.0: + resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} + + optionator@0.9.4: + resolution: {integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==} + engines: {node: '>= 0.8.0'} + + p-limit@3.1.0: + resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} + engines: {node: '>=10'} + + p-locate@5.0.0: + resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} + engines: {node: '>=10'} + + parent-module@1.0.1: + resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==} + engines: {node: '>=6'} + + path-exists@4.0.0: + resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} + engines: {node: '>=8'} + + path-key@3.1.1: + resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} + engines: {node: '>=8'} + + picomatch@4.0.3: + resolution: {integrity: sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==} + engines: {node: '>=12'} + + prelude-ls@1.2.1: + resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} + engines: {node: '>= 0.8.0'} + + prettier@3.8.1: + resolution: {integrity: sha512-UOnG6LftzbdaHZcKoPFtOcCKztrQ57WkHDeRD9t/PTQtmT0NHSeWWepj6pS0z/N7+08BHFDQVUrfmfMRcZwbMg==} + engines: {node: '>=14'} + hasBin: true + + punycode@2.3.1: + resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==} + engines: {node: '>=6'} + + resolve-from@4.0.0: + resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==} + engines: {node: '>=4'} + + resolve-pkg-maps@1.0.0: + resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==} + + semver@7.7.3: + resolution: {integrity: sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==} + engines: {node: '>=10'} + hasBin: true + + shebang-command@2.0.0: + resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} + engines: {node: '>=8'} + + shebang-regex@3.0.0: + resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} + engines: {node: '>=8'} + + strip-json-comments@3.1.1: + resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} + engines: {node: '>=8'} + + supports-color@7.2.0: + resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} + engines: {node: '>=8'} + + tinyglobby@0.2.15: + resolution: {integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==} + engines: {node: '>=12.0.0'} + + ts-api-utils@2.4.0: + resolution: {integrity: sha512-3TaVTaAv2gTiMB35i3FiGJaRfwb3Pyn/j3m/bfAvGe8FB7CF6u+LMYqYlDh7reQf7UNvoTvdfAqHGmPGOSsPmA==} + engines: {node: '>=18.12'} + peerDependencies: + typescript: '>=4.8.4' + + tsx@4.21.0: + resolution: {integrity: sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==} + engines: {node: '>=18.0.0'} + hasBin: true + + type-check@0.4.0: + resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==} + engines: {node: '>= 0.8.0'} + + typescript@5.9.3: + resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==} + engines: {node: '>=14.17'} + hasBin: true + + uri-js@4.4.1: + resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} + + uuid@11.1.0: + resolution: {integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==} + hasBin: true + + which@2.0.2: + resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} + engines: {node: '>= 8'} + hasBin: true + + word-wrap@1.2.5: + resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==} + engines: {node: '>=0.10.0'} + + yocto-queue@0.1.0: + resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} + engines: {node: '>=10'} + + zod@4.3.6: + resolution: {integrity: sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==} + +snapshots: + + '@esbuild/aix-ppc64@0.27.2': + optional: true + + '@esbuild/android-arm64@0.27.2': + optional: true + + '@esbuild/android-arm@0.27.2': + optional: true + + '@esbuild/android-x64@0.27.2': + optional: true + + '@esbuild/darwin-arm64@0.27.2': + optional: true + + '@esbuild/darwin-x64@0.27.2': + optional: true + + '@esbuild/freebsd-arm64@0.27.2': + optional: true + + '@esbuild/freebsd-x64@0.27.2': + optional: true + + '@esbuild/linux-arm64@0.27.2': + optional: true + + '@esbuild/linux-arm@0.27.2': + optional: true + + '@esbuild/linux-ia32@0.27.2': + optional: true + + '@esbuild/linux-loong64@0.27.2': + optional: true + + '@esbuild/linux-mips64el@0.27.2': + optional: true + + '@esbuild/linux-ppc64@0.27.2': + optional: true + + '@esbuild/linux-riscv64@0.27.2': + optional: true + + '@esbuild/linux-s390x@0.27.2': + optional: true + + '@esbuild/linux-x64@0.27.2': + optional: true + + '@esbuild/netbsd-arm64@0.27.2': + optional: true + + '@esbuild/netbsd-x64@0.27.2': + optional: true + + '@esbuild/openbsd-arm64@0.27.2': + optional: true + + '@esbuild/openbsd-x64@0.27.2': + optional: true + + '@esbuild/openharmony-arm64@0.27.2': + optional: true + + '@esbuild/sunos-x64@0.27.2': + optional: true + + '@esbuild/win32-arm64@0.27.2': + optional: true + + '@esbuild/win32-ia32@0.27.2': + optional: true + + '@esbuild/win32-x64@0.27.2': + optional: true + + '@eslint-community/eslint-utils@4.9.1(eslint@9.39.2)': + dependencies: + eslint: 9.39.2 + eslint-visitor-keys: 3.4.3 + + '@eslint-community/regexpp@4.12.2': {} + + '@eslint/config-array@0.21.1': + dependencies: + '@eslint/object-schema': 2.1.7 + debug: 4.4.3 + minimatch: 3.1.2 + transitivePeerDependencies: + - supports-color + + '@eslint/config-helpers@0.4.2': + dependencies: + '@eslint/core': 0.17.0 + + '@eslint/core@0.17.0': + dependencies: + '@types/json-schema': 7.0.15 + + '@eslint/eslintrc@3.3.3': + dependencies: + ajv: 6.12.6 + debug: 4.4.3 + espree: 10.4.0 + globals: 14.0.0 + ignore: 5.3.2 + import-fresh: 3.3.1 + js-yaml: 4.1.1 + minimatch: 3.1.2 + strip-json-comments: 3.1.1 + transitivePeerDependencies: + - supports-color + + '@eslint/js@9.39.2': {} + + '@eslint/object-schema@2.1.7': {} + + '@eslint/plugin-kit@0.4.1': + dependencies: + '@eslint/core': 0.17.0 + levn: 0.4.1 + + '@humanfs/core@0.19.1': {} + + '@humanfs/node@0.16.7': + dependencies: + '@humanfs/core': 0.19.1 + '@humanwhocodes/retry': 0.4.3 + + '@humanwhocodes/module-importer@1.0.1': {} + + '@humanwhocodes/retry@0.4.3': {} + + '@types/estree@1.0.8': {} + + '@types/json-schema@7.0.15': {} + + '@typescript-eslint/eslint-plugin@8.54.0(@typescript-eslint/parser@8.54.0(eslint@9.39.2)(typescript@5.9.3))(eslint@9.39.2)(typescript@5.9.3)': + dependencies: + '@eslint-community/regexpp': 4.12.2 + '@typescript-eslint/parser': 8.54.0(eslint@9.39.2)(typescript@5.9.3) + '@typescript-eslint/scope-manager': 8.54.0 + '@typescript-eslint/type-utils': 8.54.0(eslint@9.39.2)(typescript@5.9.3) + '@typescript-eslint/utils': 8.54.0(eslint@9.39.2)(typescript@5.9.3) + '@typescript-eslint/visitor-keys': 8.54.0 + eslint: 9.39.2 + ignore: 7.0.5 + natural-compare: 1.4.0 + ts-api-utils: 2.4.0(typescript@5.9.3) + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/parser@8.54.0(eslint@9.39.2)(typescript@5.9.3)': + dependencies: + '@typescript-eslint/scope-manager': 8.54.0 + '@typescript-eslint/types': 8.54.0 + '@typescript-eslint/typescript-estree': 8.54.0(typescript@5.9.3) + '@typescript-eslint/visitor-keys': 8.54.0 + debug: 4.4.3 + eslint: 9.39.2 + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/project-service@8.54.0(typescript@5.9.3)': + dependencies: + '@typescript-eslint/tsconfig-utils': 8.54.0(typescript@5.9.3) + '@typescript-eslint/types': 8.54.0 + debug: 4.4.3 + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/scope-manager@8.54.0': + dependencies: + '@typescript-eslint/types': 8.54.0 + '@typescript-eslint/visitor-keys': 8.54.0 + + '@typescript-eslint/tsconfig-utils@8.54.0(typescript@5.9.3)': + dependencies: + typescript: 5.9.3 + + '@typescript-eslint/type-utils@8.54.0(eslint@9.39.2)(typescript@5.9.3)': + dependencies: + '@typescript-eslint/types': 8.54.0 + '@typescript-eslint/typescript-estree': 8.54.0(typescript@5.9.3) + '@typescript-eslint/utils': 8.54.0(eslint@9.39.2)(typescript@5.9.3) + debug: 4.4.3 + eslint: 9.39.2 + ts-api-utils: 2.4.0(typescript@5.9.3) + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/types@8.54.0': {} + + '@typescript-eslint/typescript-estree@8.54.0(typescript@5.9.3)': + dependencies: + '@typescript-eslint/project-service': 8.54.0(typescript@5.9.3) + '@typescript-eslint/tsconfig-utils': 8.54.0(typescript@5.9.3) + '@typescript-eslint/types': 8.54.0 + '@typescript-eslint/visitor-keys': 8.54.0 + debug: 4.4.3 + minimatch: 9.0.5 + semver: 7.7.3 + tinyglobby: 0.2.15 + ts-api-utils: 2.4.0(typescript@5.9.3) + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/utils@8.54.0(eslint@9.39.2)(typescript@5.9.3)': + dependencies: + '@eslint-community/eslint-utils': 4.9.1(eslint@9.39.2) + '@typescript-eslint/scope-manager': 8.54.0 + '@typescript-eslint/types': 8.54.0 + '@typescript-eslint/typescript-estree': 8.54.0(typescript@5.9.3) + eslint: 9.39.2 + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/visitor-keys@8.54.0': + dependencies: + '@typescript-eslint/types': 8.54.0 + eslint-visitor-keys: 4.2.1 + + acorn-jsx@5.3.2(acorn@8.15.0): + dependencies: + acorn: 8.15.0 + + acorn@8.15.0: {} + + ajv@6.12.6: + dependencies: + fast-deep-equal: 3.1.3 + fast-json-stable-stringify: 2.1.0 + json-schema-traverse: 0.4.1 + uri-js: 4.4.1 + + ansi-styles@4.3.0: + dependencies: + color-convert: 2.0.1 + + argparse@2.0.1: {} + + balanced-match@1.0.2: {} + + brace-expansion@1.1.12: + dependencies: + balanced-match: 1.0.2 + concat-map: 0.0.1 + + brace-expansion@2.0.2: + dependencies: + balanced-match: 1.0.2 + + callsites@3.1.0: {} + + chalk@4.1.2: + dependencies: + ansi-styles: 4.3.0 + supports-color: 7.2.0 + + color-convert@2.0.1: + dependencies: + color-name: 1.1.4 + + color-name@1.1.4: {} + + concat-map@0.0.1: {} + + cross-spawn@7.0.6: + dependencies: + path-key: 3.1.1 + shebang-command: 2.0.0 + which: 2.0.2 + + debug@4.4.3: + dependencies: + ms: 2.1.3 + + deep-is@0.1.4: {} + + esbuild@0.27.2: + optionalDependencies: + '@esbuild/aix-ppc64': 0.27.2 + '@esbuild/android-arm': 0.27.2 + '@esbuild/android-arm64': 0.27.2 + '@esbuild/android-x64': 0.27.2 + '@esbuild/darwin-arm64': 0.27.2 + '@esbuild/darwin-x64': 0.27.2 + '@esbuild/freebsd-arm64': 0.27.2 + '@esbuild/freebsd-x64': 0.27.2 + '@esbuild/linux-arm': 0.27.2 + '@esbuild/linux-arm64': 0.27.2 + '@esbuild/linux-ia32': 0.27.2 + '@esbuild/linux-loong64': 0.27.2 + '@esbuild/linux-mips64el': 0.27.2 + '@esbuild/linux-ppc64': 0.27.2 + '@esbuild/linux-riscv64': 0.27.2 + '@esbuild/linux-s390x': 0.27.2 + '@esbuild/linux-x64': 0.27.2 + '@esbuild/netbsd-arm64': 0.27.2 + '@esbuild/netbsd-x64': 0.27.2 + '@esbuild/openbsd-arm64': 0.27.2 + '@esbuild/openbsd-x64': 0.27.2 + '@esbuild/openharmony-arm64': 0.27.2 + '@esbuild/sunos-x64': 0.27.2 + '@esbuild/win32-arm64': 0.27.2 + '@esbuild/win32-ia32': 0.27.2 + '@esbuild/win32-x64': 0.27.2 + + escape-string-regexp@4.0.0: {} + + eslint-scope@8.4.0: + dependencies: + esrecurse: 4.3.0 + estraverse: 5.3.0 + + eslint-visitor-keys@3.4.3: {} + + eslint-visitor-keys@4.2.1: {} + + eslint@9.39.2: + dependencies: + '@eslint-community/eslint-utils': 4.9.1(eslint@9.39.2) + '@eslint-community/regexpp': 4.12.2 + '@eslint/config-array': 0.21.1 + '@eslint/config-helpers': 0.4.2 + '@eslint/core': 0.17.0 + '@eslint/eslintrc': 3.3.3 + '@eslint/js': 9.39.2 + '@eslint/plugin-kit': 0.4.1 + '@humanfs/node': 0.16.7 + '@humanwhocodes/module-importer': 1.0.1 + '@humanwhocodes/retry': 0.4.3 + '@types/estree': 1.0.8 + ajv: 6.12.6 + chalk: 4.1.2 + cross-spawn: 7.0.6 + debug: 4.4.3 + escape-string-regexp: 4.0.0 + eslint-scope: 8.4.0 + eslint-visitor-keys: 4.2.1 + espree: 10.4.0 + esquery: 1.7.0 + esutils: 2.0.3 + fast-deep-equal: 3.1.3 + file-entry-cache: 8.0.0 + find-up: 5.0.0 + glob-parent: 6.0.2 + ignore: 5.3.2 + imurmurhash: 0.1.4 + is-glob: 4.0.3 + json-stable-stringify-without-jsonify: 1.0.1 + lodash.merge: 4.6.2 + minimatch: 3.1.2 + natural-compare: 1.4.0 + optionator: 0.9.4 + transitivePeerDependencies: + - supports-color + + espree@10.4.0: + dependencies: + acorn: 8.15.0 + acorn-jsx: 5.3.2(acorn@8.15.0) + eslint-visitor-keys: 4.2.1 + + esquery@1.7.0: + dependencies: + estraverse: 5.3.0 + + esrecurse@4.3.0: + dependencies: + estraverse: 5.3.0 + + estraverse@5.3.0: {} + + esutils@2.0.3: {} + + fast-deep-equal@3.1.3: {} + + fast-json-stable-stringify@2.1.0: {} + + fast-levenshtein@2.0.6: {} + + fdir@6.5.0(picomatch@4.0.3): + optionalDependencies: + picomatch: 4.0.3 + + file-entry-cache@8.0.0: + dependencies: + flat-cache: 4.0.1 + + find-up@5.0.0: + dependencies: + locate-path: 6.0.0 + path-exists: 4.0.0 + + flat-cache@4.0.1: + dependencies: + flatted: 3.3.3 + keyv: 4.5.4 + + flatted@3.3.3: {} + + fsevents@2.3.3: + optional: true + + get-tsconfig@4.13.1: + dependencies: + resolve-pkg-maps: 1.0.0 + + glob-parent@6.0.2: + dependencies: + is-glob: 4.0.3 + + globals@14.0.0: {} + + has-flag@4.0.0: {} + + ignore@5.3.2: {} + + ignore@7.0.5: {} + + import-fresh@3.3.1: + dependencies: + parent-module: 1.0.1 + resolve-from: 4.0.0 + + imurmurhash@0.1.4: {} + + is-extglob@2.1.1: {} + + is-glob@4.0.3: + dependencies: + is-extglob: 2.1.1 + + isexe@2.0.0: {} + + js-yaml@4.1.1: + dependencies: + argparse: 2.0.1 + + json-buffer@3.0.1: {} + + json-schema-traverse@0.4.1: {} + + json-stable-stringify-without-jsonify@1.0.1: {} + + keyv@4.5.4: + dependencies: + json-buffer: 3.0.1 + + levn@0.4.1: + dependencies: + prelude-ls: 1.2.1 + type-check: 0.4.0 + + locate-path@6.0.0: + dependencies: + p-locate: 5.0.0 + + lodash.merge@4.6.2: {} + + minimatch@3.1.2: + dependencies: + brace-expansion: 1.1.12 + + minimatch@9.0.5: + dependencies: + brace-expansion: 2.0.2 + + ms@2.1.3: {} + + natural-compare@1.4.0: {} + + optionator@0.9.4: + dependencies: + deep-is: 0.1.4 + fast-levenshtein: 2.0.6 + levn: 0.4.1 + prelude-ls: 1.2.1 + type-check: 0.4.0 + word-wrap: 1.2.5 + + p-limit@3.1.0: + dependencies: + yocto-queue: 0.1.0 + + p-locate@5.0.0: + dependencies: + p-limit: 3.1.0 + + parent-module@1.0.1: + dependencies: + callsites: 3.1.0 + + path-exists@4.0.0: {} + + path-key@3.1.1: {} + + picomatch@4.0.3: {} + + prelude-ls@1.2.1: {} + + prettier@3.8.1: {} + + punycode@2.3.1: {} + + resolve-from@4.0.0: {} + + resolve-pkg-maps@1.0.0: {} + + semver@7.7.3: {} + + shebang-command@2.0.0: + dependencies: + shebang-regex: 3.0.0 + + shebang-regex@3.0.0: {} + + strip-json-comments@3.1.1: {} + + supports-color@7.2.0: + dependencies: + has-flag: 4.0.0 + + tinyglobby@0.2.15: + dependencies: + fdir: 6.5.0(picomatch@4.0.3) + picomatch: 4.0.3 + + ts-api-utils@2.4.0(typescript@5.9.3): + dependencies: + typescript: 5.9.3 + + tsx@4.21.0: + dependencies: + esbuild: 0.27.2 + get-tsconfig: 4.13.1 + optionalDependencies: + fsevents: 2.3.3 + + type-check@0.4.0: + dependencies: + prelude-ls: 1.2.1 + + typescript@5.9.3: {} + + uri-js@4.4.1: + dependencies: + punycode: 2.3.1 + + uuid@11.1.0: {} + + which@2.0.2: + dependencies: + isexe: 2.0.0 + + word-wrap@1.2.5: {} + + yocto-queue@0.1.0: {} + + zod@4.3.6: {} diff --git a/bubus-ts/prettier.config.js b/bubus-ts/prettier.config.js new file mode 100644 index 0000000..f68c694 --- /dev/null +++ b/bubus-ts/prettier.config.js @@ -0,0 +1,8 @@ +const config = { + semi: false, + singleQuote: true, + trailingComma: "es5", + printWidth: 140 +}; + +export default config; diff --git a/bubus-ts/src/base_event.ts b/bubus-ts/src/base_event.ts new file mode 100644 index 0000000..d1095e9 --- /dev/null +++ b/bubus-ts/src/base_event.ts @@ -0,0 +1,286 @@ +import { z } from "zod"; +import { v7 as uuidv7 } from "uuid"; + +import type { EventBus } from "./event_bus.js"; +import { EventResult } from "./event_result.js"; + + +export const BaseEventSchema = z + .object({ + event_id: z.string().uuid(), + event_created_at: z.string().datetime(), + event_type: z.string(), + event_timeout: z.number().positive().nullable(), + event_parent_id: z.string().uuid().optional(), + event_path: z.array(z.string()).optional() + }) + .passthrough(); + +export type BaseEventData = z.infer; +type BaseEventFields = Pick< + BaseEventData, + "event_id" | "event_created_at" | "event_type" | "event_timeout" | "event_parent_id" +>; + +export type BaseEventInit> = TFields & + Partial; + +type BaseEventSchemaShape = typeof BaseEventSchema.shape; + +export type EventSchema = z.ZodObject< + BaseEventSchemaShape & TShape +>; + +type EventInput = z.input>; +export type EventInit = Omit, keyof BaseEventFields> & + Partial; + +export type EventFactory = { + (data: EventInit): BaseEvent & z.infer>; + new (data: EventInit): BaseEvent & z.infer>; + schema: EventSchema; + event_type?: string; + event_result_schema?: z.ZodTypeAny; + event_result_type?: string; +}; + +export type EventExtendOptions = { + event_result_schema?: z.ZodTypeAny; + event_result_type?: string; +}; + +export class BaseEvent { + event_id: string; + event_created_at: string; + event_type: string; + event_timeout: number | null; + event_parent_id?: string; + event_path: string[]; + event_processed_path: string[]; + event_factory?: Function; + event_result_schema?: z.ZodTypeAny; + event_result_type?: string; + event_results: Map; + event_children: BaseEvent[]; + event_emitted_by_handler_id?: string; + event_pending_buses: number; + event_status: "pending" | "started" | "completed"; + event_created_at_ms: number; + event_started_at?: string; + event_completed_at?: string; + event_errors: unknown[]; + event_key_symbol?: symbol; + bus?: EventBus; + _original_event?: BaseEvent; + + static schema = BaseEventSchema; + static event_type?: string; + + _done_promise: Promise | null; + _done_resolve: ((event: this) => void) | null; + _done_reject: ((reason: unknown) => void) | null; + + constructor(data: BaseEventInit> = {}) { + const ctor = this.constructor as typeof BaseEvent & { + factory?: Function; + event_result_schema?: z.ZodTypeAny; + event_result_type?: string; + }; + const event_type = data.event_type ?? ctor.event_type ?? ctor.name; + const event_id = data.event_id ?? uuidv7(); + const event_created_at = + data.event_created_at ?? new Date().toISOString(); + const event_timeout = + data.event_timeout === undefined ? BaseEvent.defaultTimeout() : data.event_timeout; + + const base_data = { + ...data, + event_id, + event_created_at, + event_type, + event_timeout + }; + + const schema = ctor.schema ?? BaseEventSchema; + const parsed = schema.parse(base_data) as BaseEventData & Record; + + Object.assign(this, parsed); + + this.event_path = Array.isArray((parsed as { event_path?: string[] }).event_path) + ? ([...(parsed as { event_path?: string[] }).event_path] as string[]) + : []; + this.event_processed_path = []; + this.event_pending_buses = 0; + this.event_status = "pending"; + this.event_created_at_ms = Date.parse(this.event_created_at); + this.event_errors = []; + this.event_factory = ctor.factory; + this.event_result_schema = ctor.event_result_schema; + this.event_result_type = ctor.event_result_type; + this.event_results = new Map(); + this.event_children = []; + + this._done_promise = null; + this._done_resolve = null; + this._done_reject = null; + } + + static defaultTimeout(): number { + return 300; + } + + static extend( + shape: TShape, + options?: EventExtendOptions + ): EventFactory; + static extend( + event_type: string, + shape: TShape, + options?: EventExtendOptions + ): EventFactory; + static extend( + arg1: string | TShape, + arg2?: TShape | EventExtendOptions, + arg3?: EventExtendOptions + ): EventFactory { + return extendEvent( + arg1 as string | TShape, + arg2 as TShape | EventExtendOptions | undefined, + arg3 + ); + } + + static parse(this: T, data: unknown): InstanceType { + const schema = this.schema ?? BaseEventSchema; + const parsed = schema.parse(data); + return new this(parsed) as InstanceType; + } + + toJSON(): BaseEventData { + return { + event_id: this.event_id, + event_created_at: this.event_created_at, + event_type: this.event_type, + event_timeout: this.event_timeout, + event_parent_id: this.event_parent_id, + event_path: this.event_path + }; + } + + get type(): string { + return this.event_type; + } + + done(): Promise { + if (!this.bus) { + return Promise.reject(new Error("event has no bus attached")); + } + const runner_bus = this.bus as { + _runImmediately: (event: BaseEvent) => Promise; + isInsideHandler: () => boolean; + }; + if (this.event_status === "completed") { + return Promise.resolve(this); + } + if (runner_bus.isInsideHandler()) { + return runner_bus._runImmediately(this) as Promise; + } + return this.waitForCompletion(); + } + + waitForCompletion(): Promise { + this.ensureDonePromise(); + return this._done_promise as Promise; + } + + markStarted(): void { + if (this.event_status !== "pending") { + return; + } + this.event_status = "started"; + this.event_started_at = new Date().toISOString(); + } + + markCompleted(): void { + if (this.event_status === "completed") { + return; + } + this.event_status = "completed"; + this.event_completed_at = new Date().toISOString(); + this.ensureDonePromise(); + if (this._done_resolve) { + this._done_resolve(this as this); + } + } + + markFailed(error: unknown): void { + this.event_errors.push(error); + } + + cancelPendingChildProcessing(reason: unknown): void { + for (const child of this.event_children) { + for (const result of child.event_results.values()) { + if (result.status === "pending") { + result.markError(reason); + } + } + child.cancelPendingChildProcessing(reason); + } + } + + ensureDonePromise(): void { + if (this._done_promise) { + return; + } + this._done_promise = new Promise((resolve, reject) => { + this._done_resolve = resolve; + this._done_reject = reject; + }); + } +} + +export function extendEvent( + shape: TShape +): EventFactory; +export function extendEvent( + event_type: string, + shape: TShape, + options?: EventExtendOptions +): EventFactory; +export function extendEvent( + arg1: string | TShape, + arg2?: TShape | EventExtendOptions, + arg3?: EventExtendOptions +): EventFactory { + const event_type = typeof arg1 === "string" ? arg1 : undefined; + const shape = (typeof arg1 === "string" ? arg2 : arg1) as TShape; + const options = (typeof arg1 === "string" ? arg3 : arg2) as EventExtendOptions | undefined; + + const full_schema = BaseEventSchema.extend(shape); + + class ExtendedEvent extends BaseEvent { + static schema = full_schema; + static event_type = event_type; + static factory?: Function; + static event_result_schema = options?.event_result_schema; + static event_result_type = options?.event_result_type; + + constructor(data: EventInit) { + super(data as BaseEventInit>); + } + } + + function EventFactory(data: EventInit): BaseEvent & z.infer> { + return new ExtendedEvent(data); + } + + EventFactory.schema = full_schema; + EventFactory.event_type = event_type; + EventFactory.event_result_schema = options?.event_result_schema; + EventFactory.event_result_type = options?.event_result_type; + EventFactory.prototype = ExtendedEvent.prototype; + (EventFactory as unknown as { class: typeof ExtendedEvent }).class = ExtendedEvent; + (ExtendedEvent as unknown as { factory?: Function }).factory = EventFactory; + + return EventFactory as EventFactory; +} diff --git a/bubus-ts/src/event_bus.ts b/bubus-ts/src/event_bus.ts new file mode 100644 index 0000000..1389a82 --- /dev/null +++ b/bubus-ts/src/event_bus.ts @@ -0,0 +1,1019 @@ +import { BaseEvent } from "./base_event.js"; +import { EventResult } from "./event_result.js"; +import { v7 as uuidv7 } from "uuid"; + + +export class EventHandlerTimeoutError extends Error { + event_type: string; + handler_name: string; + timeout_seconds: number; + + constructor( + message: string, + params: { event_type: string; handler_name: string; timeout_seconds: number } + ) { + super(message); + this.name = "EventHandlerTimeoutError"; + this.event_type = params.event_type; + this.handler_name = params.handler_name; + this.timeout_seconds = params.timeout_seconds; + } +} + +export class EventHandlerCancelledError extends Error { + event_type: string; + handler_name: string; + parent_error: Error; + + constructor( + message: string, + params: { event_type: string; handler_name: string; parent_error: Error } + ) { + super(message); + this.name = "EventHandlerCancelledError"; + this.event_type = params.event_type; + this.handler_name = params.handler_name; + this.parent_error = params.parent_error; + } +} + +const with_resolvers = () => { + if (typeof Promise.withResolvers === "function") { + return Promise.withResolvers(); + } + + let resolve!: (value: T | PromiseLike) => void; + let reject!: (reason?: unknown) => void; + const promise = new Promise((resolve_fn, reject_fn) => { + resolve = resolve_fn; + reject = reject_fn; + }); + + return { promise, resolve, reject }; +}; +import type { + EventClass, + EventHandler, + EventKey, + FindOptions +} from "./types.js"; + +type FindWaiter = { + event_key: EventKey; + matches: (event: BaseEvent) => boolean; + resolve: (event: BaseEvent) => void; + timeout_id?: ReturnType; +}; + +type EventBusOptions = { + max_history_size?: number | null; +}; + +export class EventBus { + static instances: Set = new Set(); + + name: string; + max_history_size: number | null; + handlers_by_key: Map>; + event_history: BaseEvent[]; + event_history_by_id: Map; + pending_queue: BaseEvent[]; + is_running: boolean; + idle_waiters: Array<() => void>; + find_waiters: Set; + handler_stack: EventResult[]; + handler_file_paths: Map; + run_now_depth: number; + run_now_waiters: Array<() => void>; + inside_handler_depth: number; + + constructor(name: string = "EventBus", options: EventBusOptions = {}) { + this.name = name; + this.max_history_size = + options.max_history_size === undefined ? 100 : options.max_history_size; + this.handlers_by_key = new Map(); + this.event_history = []; + this.event_history_by_id = new Map(); + this.pending_queue = []; + this.is_running = false; + this.idle_waiters = []; + this.find_waiters = new Set(); + this.handler_stack = []; + this.handler_file_paths = new Map(); + this.run_now_depth = 0; + this.run_now_waiters = []; + this.inside_handler_depth = 0; + + EventBus.instances.add(this); + + this.dispatch = this.dispatch.bind(this); + this.emit = this.emit.bind(this); + } + + on(event_key: EventKey | "*", handler: EventHandler): void { + const handler_set = this.handlers_by_key.get(event_key) ?? new Set(); + handler_set.add(handler as EventHandler); + this.handlers_by_key.set(event_key, handler_set); + + if (!this.handler_file_paths.has(handler as EventHandler)) { + const file_path = this.inferHandlerFilePath(); + if (file_path) { + this.handler_file_paths.set(handler as EventHandler, file_path); + } + } + } + + off(event_key: EventKey | "*", handler: EventHandler): void { + const handler_set = this.handlers_by_key.get(event_key); + if (!handler_set) { + return; + } + handler_set.delete(handler as EventHandler); + } + + dispatch(event: T, event_key?: EventKey): T { + const original_event = event._original_event ?? event; + if (!Array.isArray(original_event.event_path)) { + original_event.event_path = []; + } + + if (typeof event_key === "symbol") { + original_event.event_key_symbol = event_key; + } + + if (original_event.event_path.includes(this.name) || this.eventHasVisited(original_event)) { + return this._getBusScopedEvent(original_event) as T; + } + + if (!original_event.event_path.includes(this.name)) { + original_event.event_path.push(this.name); + } + + const current_handler = this.handler_stack[this.handler_stack.length - 1]; + if (current_handler) { + const parent_event = this.event_history_by_id.get(current_handler.event_id); + if (parent_event && !original_event.event_parent_id) { + original_event.event_parent_id = parent_event.event_id; + this.recordChildEvent(parent_event.event_id, original_event); + } + } + + this.event_history.push(original_event); + this.event_history_by_id.set(original_event.event_id, original_event); + this.trimHistory(); + + original_event.event_pending_buses += 1; + this.pending_queue.push(original_event); + this.startRunloop(); + + return this._getBusScopedEvent(original_event) as T; + } + + emit(event: T, event_key?: EventKey): T { + return this.dispatch(event, event_key); + } + + find(event_key: EventKey, options?: FindOptions): Promise; + find( + event_key: EventKey, + where: (event: T) => boolean, + options?: FindOptions + ): Promise; + async find( + event_key: EventKey, + where_or_options: ((event: T) => boolean) | FindOptions = {}, + maybe_options: FindOptions = {} + ): Promise { + const where = typeof where_or_options === "function" ? where_or_options : (() => true); + const options = typeof where_or_options === "function" ? maybe_options : where_or_options; + + return this.findInternal(event_key, where, options); + } + + private async findInternal( + event_key: EventKey, + where: (event: T) => boolean, + options: FindOptions + ): Promise { + const past = options.past ?? true; + const future = options.future ?? true; + const child_of = options.child_of ?? null; + + if (past === false && future === false) { + return null; + } + + const matches = (event: BaseEvent): boolean => { + if (!this.eventMatchesKey(event, event_key)) { + return false; + } + if (!where(event as T)) { + return false; + } + if (child_of && !this.eventIsChildOf(event, child_of)) { + return false; + } + return true; + }; + + if (past !== false) { + const now_ms = Date.now(); + const cutoff_ms = + past === true ? null : now_ms - Math.max(0, Number(past)) * 1000; + + for (let i = this.event_history.length - 1; i >= 0; i -= 1) { + const event = this.event_history[i]; + if (event.event_status !== "completed") { + continue; + } + if (cutoff_ms !== null && event.event_created_at_ms < cutoff_ms) { + continue; + } + if (matches(event)) { + return event as T; + } + } + } + + if (future === false) { + return null; + } + + return new Promise((resolve, reject) => { + const waiter: FindWaiter = { + event_key, + matches, + resolve: (event) => resolve(event as T) + }; + + if (future !== true) { + const timeout_ms = Math.max(0, Number(future)) * 1000; + waiter.timeout_id = setTimeout(() => { + this.find_waiters.delete(waiter); + resolve(null); + }, timeout_ms); + } + + this.find_waiters.add(waiter); + }); + } + + async _runImmediately(event: T): Promise { + const original_event = event._original_event ?? event; + if (original_event.event_status === "completed") { + return event; + } + if (original_event.event_status === "started") { + await this.runImmediatelyAcrossBuses(original_event); + return event; + } + + const index = this.pending_queue.indexOf(original_event); + if (index >= 0) { + this.pending_queue.splice(index, 1); + } + + await this.runImmediatelyAcrossBuses(original_event); + return event; + } + + async waitUntilIdle(): Promise { + if (!this.is_running && this.pending_queue.length === 0) { + return; + } + return new Promise((resolve) => { + this.idle_waiters.push(resolve); + }); + } + + eventIsChildOf(event: BaseEvent, ancestor: BaseEvent): boolean { + if (event.event_id === ancestor.event_id) { + return false; + } + + let current_parent_id = event.event_parent_id; + while (current_parent_id) { + if (current_parent_id === ancestor.event_id) { + return true; + } + const parent = this.event_history_by_id.get(current_parent_id); + if (!parent) { + return false; + } + current_parent_id = parent.event_parent_id; + } + return false; + } + + eventIsParentOf(event: BaseEvent, descendant: BaseEvent): boolean { + return this.eventIsChildOf(descendant, event); + } + + recordChildEvent(parent_event_id: string, child_event: BaseEvent): void { + const original_child = child_event._original_event ?? child_event; + const parent_event = this.event_history_by_id.get(parent_event_id); + if (parent_event) { + parent_event.event_children.push(original_child); + } + + const current_result = this.handler_stack[this.handler_stack.length - 1]; + if (current_result) { + current_result.event_children.push(original_child); + original_child.event_emitted_by_handler_id = current_result.handler_id; + } + } + + logTree(): string { + const parent_to_children = new Map(); + + const add_child = (parent_id: string | null, child: BaseEvent): void => { + const existing = parent_to_children.get(parent_id) ?? []; + existing.push(child); + parent_to_children.set(parent_id, existing); + }; + + for (const event of this.event_history) { + add_child(event.event_parent_id ?? null, event); + } + + for (const children of parent_to_children.values()) { + children.sort((a, b) => a.event_created_at_ms - b.event_created_at_ms); + } + + const root_events: BaseEvent[] = []; + const seen = new Set(); + + for (const event of this.event_history) { + const parent_id = event.event_parent_id; + if (!parent_id || parent_id === event.event_id || !this.event_history_by_id.has(parent_id)) { + if (!seen.has(event.event_id)) { + root_events.push(event); + seen.add(event.event_id); + } + } + } + + if (root_events.length === 0) { + return "(No events in history)"; + } + + const lines: string[] = []; + lines.push(`πŸ“Š Event History Tree for ${this.name}`); + lines.push("=".repeat(80)); + + root_events.sort((a, b) => a.event_created_at_ms - b.event_created_at_ms); + const visited = new Set(); + root_events.forEach((event, index) => { + lines.push( + this.buildTreeLine( + event, + "", + index === root_events.length - 1, + parent_to_children, + visited + ) + ); + }); + + lines.push("=".repeat(80)); + + return lines.join("\n"); + } + + isInsideHandler(): boolean { + return this.inside_handler_depth > 0; + } + + private async runImmediatelyAcrossBuses(event: BaseEvent): Promise { + const buses = this.getBusesForImmediateRun(event); + if (buses.length === 0) { + await event.waitForCompletion(); + return; + } + + for (const bus of buses) { + bus.run_now_depth += 1; + } + + try { + for (const bus of buses) { + const index = bus.pending_queue.indexOf(event); + if (index >= 0) { + bus.pending_queue.splice(index, 1); + } + if (!event.event_processed_path.includes(bus.name)) { + await bus.processEvent(event); + } + } + + if (event.event_status !== "completed") { + await event.waitForCompletion(); + } + } finally { + for (const bus of buses) { + bus.run_now_depth = Math.max(0, bus.run_now_depth - 1); + bus.releaseRunNowWaiters(); + } + } + } + + private getBusesForImmediateRun(event: BaseEvent): EventBus[] { + const ordered: EventBus[] = []; + const seen = new Set(); + + const event_path = Array.isArray(event.event_path) ? event.event_path : []; + for (const name of event_path) { + for (const bus of EventBus.instances) { + if (bus.name !== name) { + continue; + } + if (!bus.event_history_by_id.has(event.event_id)) { + continue; + } + if (event.event_processed_path.includes(bus.name)) { + continue; + } + if (!seen.has(bus)) { + ordered.push(bus); + seen.add(bus); + } + } + } + + if (!seen.has(this) && this.event_history_by_id.has(event.event_id)) { + ordered.push(this); + } + + return ordered; + } + + private releaseRunNowWaiters(): void { + if (this.run_now_depth !== 0 || this.run_now_waiters.length === 0) { + return; + } + const waiters = this.run_now_waiters; + this.run_now_waiters = []; + for (const resolve of waiters) { + resolve(); + } + } + + + private startRunloop(): void { + if (this.is_running) { + return; + } + this.is_running = true; + setTimeout(() => { + setTimeout(() => { + void this.runloop(); + }, 0); + }, 0); + } + + private async runloop(): Promise { + while (this.pending_queue.length > 0) { + await Promise.resolve(); + if (this.run_now_depth > 0) { + await new Promise((resolve) => { + this.run_now_waiters.push(resolve); + }); + continue; + } + const next_event = this.pending_queue.shift(); + if (!next_event) { + continue; + } + if (this.eventHasVisited(next_event)) { + continue; + } + await this.processEvent(next_event); + await Promise.resolve(); + } + this.is_running = false; + const idle_waiters = this.idle_waiters; + this.idle_waiters = []; + for (const resolve of idle_waiters) { + resolve(); + } + } + + private async processEvent(event: BaseEvent): Promise { + if (this.eventHasVisited(event)) { + return; + } + if (!Array.isArray(event.event_processed_path)) { + event.event_processed_path = []; + } + if (!event.event_processed_path.includes(this.name)) { + event.event_processed_path.push(this.name); + } + event.markStarted(); + this.notifyFinders(event); + + const handlers = this.collectHandlers(event); + const handler_results = handlers.map((handler) => { + const handler_name = handler.name || "anonymous"; + const handler_id = uuidv7(); + const result = new EventResult({ + event_id: event.event_id, + handler_id, + handler_name, + handler_file_path: this.handler_file_paths.get(handler) ?? undefined, + eventbus_name: this.name + }); + event.event_results.set(handler_id, result); + return { handler, result }; + }); + + const handler_event = this._getBusScopedEvent(event); + + for (const { handler, result } of handler_results) { + if (result.status === "error" && result.error instanceof EventHandlerCancelledError) { + continue; + } + this.inside_handler_depth += 1; + this.handler_stack.push(result); + + try { + result.markStarted(); + const handler_result = await this.runHandlerWithTimeout(event, handler, handler_event); + if (event.event_result_schema) { + const parsed = event.event_result_schema.safeParse(handler_result); + if (parsed.success) { + result.markCompleted(parsed.data); + } else { + const error = new Error( + `handler result did not match event_result_schema: ${parsed.error.message}` + ); + result.markError(error); + event.markFailed(error); + } + } else { + result.markCompleted(handler_result); + } + } catch (error) { + if (error instanceof EventHandlerTimeoutError) { + result.markError(error); + event.markFailed(error); + const cancelled_error = new EventHandlerCancelledError( + `Cancelled pending handler due to parent timeout: ${error.message}`, + { + event_type: event.event_type, + handler_name: result.handler_name, + parent_error: error + } + ); + event.cancelPendingChildProcessing(cancelled_error); + } else { + result.markError(error); + event.markFailed(error); + } + } finally { + this.handler_stack.pop(); + this.inside_handler_depth = Math.max(0, this.inside_handler_depth - 1); + } + } + + event.event_pending_buses -= 1; + if (event.event_pending_buses <= 0) { + event.event_pending_buses = 0; + event.markCompleted(); + } + } + + + + private async runHandlerWithTimeout( + event: BaseEvent, + handler: EventHandler, + handler_event: BaseEvent = event + ): Promise { + if (event.event_timeout === null) { + return handler(handler_event); + } + + const timeout_seconds = event.event_timeout; + const timeout_ms = timeout_seconds * 1000; + + const { promise, resolve, reject } = with_resolvers(); + let settled = false; + + const timer = setTimeout(() => { + if (settled) { + return; + } + settled = true; + reject( + new EventHandlerTimeoutError( + `handler ${handler.name || "anonymous"} timed out after ${timeout_seconds}s`, + { + event_type: event.event_type, + handler_name: handler.name || "anonymous", + timeout_seconds + } + ) + ); + }, timeout_ms); + + Promise.resolve() + .then(() => handler(handler_event)) + .then((value) => { + if (settled) { + return; + } + settled = true; + clearTimeout(timer); + resolve(value); + }) + .catch((error) => { + if (settled) { + return; + } + settled = true; + clearTimeout(timer); + reject(error); + }); + + return promise; + } + + private eventHasVisited(event: BaseEvent): boolean { + return ( + Array.isArray(event.event_processed_path) && + event.event_processed_path.includes(this.name) + ); + } + + _getBusScopedEvent(event: T): T { + const original_event = event._original_event ?? event; + const bus = this; + const scoped = new Proxy(original_event, { + get(target, prop, receiver) { + if (prop === "bus") { + return bus; + } + if (prop === "_original_event") { + return target; + } + return Reflect.get(target, prop, receiver); + }, + set(target, prop, value) { + if (prop === "bus") { + return true; + } + return Reflect.set(target, prop, value, target); + }, + has(target, prop) { + if (prop === "bus") { + return true; + } + if (prop === "_original_event") { + return true; + } + return Reflect.has(target, prop); + } + }); + + return scoped as T; + } + + private buildTreeLine( + event: BaseEvent, + indent: string, + is_last: boolean, + parent_to_children: Map, + visited: Set + ): string { + const connector = is_last ? "└── " : "β”œβ”€β”€ "; + const status_icon = + event.event_status === "completed" + ? "βœ…" + : event.event_status === "started" + ? "πŸƒ" + : "⏳"; + + const created_at = this.formatTimestamp(event.event_created_at); + let timing = `[${created_at}`; + if (event.event_completed_at) { + const created_ms = Date.parse(event.event_created_at); + const completed_ms = Date.parse(event.event_completed_at); + if (!Number.isNaN(created_ms) && !Number.isNaN(completed_ms)) { + const duration = (completed_ms - created_ms) / 1000; + timing += ` (${duration.toFixed(3)}s)`; + } + } + timing += "]"; + + const line = `${indent}${connector}${status_icon} ${event.event_type}#${event.event_id.slice(-4)} ${timing}`; + + if (visited.has(event.event_id)) { + return line; + } + visited.add(event.event_id); + + const extension = is_last ? " " : "β”‚ "; + const new_indent = indent + extension; + + const result_items: Array<{ type: "result"; result: EventResult } | { type: "child"; child: BaseEvent }> = + []; + const printed_child_ids = new Set(); + + const results = Array.from(event.event_results.values()).sort((a, b) => { + const a_time = a.started_at ? Date.parse(a.started_at) : 0; + const b_time = b.started_at ? Date.parse(b.started_at) : 0; + return a_time - b_time; + }); + + results.forEach((result) => { + result_items.push({ type: "result", result }); + result.event_children.forEach((child) => { + printed_child_ids.add(child.event_id); + }); + }); + + const children = parent_to_children.get(event.event_id) ?? []; + children.forEach((child) => { + if (!printed_child_ids.has(child.event_id) && !child.event_emitted_by_handler_id) { + result_items.push({ type: "child", child }); + } + }); + + if (result_items.length === 0) { + return line; + } + + const child_lines: string[] = []; + result_items.forEach((item, index) => { + const is_last_item = index === result_items.length - 1; + if (item.type === "result") { + child_lines.push( + this.buildResultLine( + item.result, + new_indent, + is_last_item, + parent_to_children, + visited + ) + ); + } else { + child_lines.push( + this.buildTreeLine( + item.child, + new_indent, + is_last_item, + parent_to_children, + visited + ) + ); + } + }); + + return [line, ...child_lines].join("\n"); + } + + private buildResultLine( + result: EventResult, + indent: string, + is_last: boolean, + parent_to_children: Map, + visited: Set + ): string { + const connector = is_last ? "└── " : "β”œβ”€β”€ "; + const status_icon = + result.status === "completed" + ? "βœ…" + : result.status === "error" + ? "❌" + : result.status === "started" + ? "πŸƒ" + : "⏳"; + + const handler_label = + result.handler_name && result.handler_name !== "anonymous" + ? result.handler_name + : result.handler_file_path + ? result.handler_file_path + : "anonymous"; + const handler_display = `${result.eventbus_name}.${handler_label}#${result.handler_id.slice(-4)}`; + let line = `${indent}${connector}${status_icon} ${handler_display}`; + + if (result.started_at) { + line += ` [${this.formatTimestamp(result.started_at)}`; + if (result.completed_at) { + const started_ms = Date.parse(result.started_at); + const completed_ms = Date.parse(result.completed_at); + if (!Number.isNaN(started_ms) && !Number.isNaN(completed_ms)) { + const duration = (completed_ms - started_ms) / 1000; + line += ` (${duration.toFixed(3)}s)`; + } + } + line += "]"; + } + + if (result.status === "error" && result.error) { + if (result.error instanceof EventHandlerTimeoutError) { + line += ` ⏱️ Timeout: ${result.error.message}`; + } else if (result.error instanceof EventHandlerCancelledError) { + line += ` 🚫 Cancelled: ${result.error.message}`; + } else { + const error_name = result.error instanceof Error ? result.error.name : "Error"; + const error_message = result.error instanceof Error ? result.error.message : String(result.error); + line += ` ☠️ ${error_name}: ${error_message}`; + } + } else if (result.status === "completed") { + line += ` β†’ ${this.formatResultValue(result.result)}`; + } + + const extension = is_last ? " " : "β”‚ "; + const new_indent = indent + extension; + + if (result.event_children.length === 0) { + return line; + } + + const child_lines: string[] = []; + const direct_children = result.event_children; + const parent_children = parent_to_children.get(result.event_id) ?? []; + const emitted_children = parent_children.filter( + (child) => child.event_emitted_by_handler_id === result.handler_id + ); + const combined_children = [...direct_children, ...emitted_children]; + const children_to_print = combined_children.filter( + (child) => !visited.has(child.event_id) + ); + + children_to_print.forEach((child, index) => { + child_lines.push( + this.buildTreeLine( + child, + new_indent, + index === children_to_print.length - 1, + parent_to_children, + visited + ) + ); + }); + + return [line, ...child_lines].join("\n"); + } + + private formatTimestamp(value?: string): string { + if (!value) { + return "N/A"; + } + const date = new Date(value); + if (Number.isNaN(date.getTime())) { + return "N/A"; + } + return date.toISOString().slice(11, 23); + } + + private inferHandlerFilePath(): string | null { + const stack = new Error().stack; + if (!stack) { + return null; + } + const lines = stack.split("\n").map((line) => line.trim()); + for (const line of lines) { + if (!line || line.startsWith("Error")) { + continue; + } + if ( + line.includes("event_bus.ts") || + line.includes("node:internal") || + line.includes("/node_modules/") + ) { + continue; + } + const match = line.match(/\(?(.+?:\d+:\d+)\)?$/); + if (match && match[1]) { + return match[1]; + } + } + return null; + } + + private formatResultValue(value: unknown): string { + if (value === null || value === undefined) { + return "None"; + } + if (value instanceof BaseEvent) { + return `Event(${value.event_type}#${value.event_id.slice(-4)})`; + } + if (typeof value === "string") { + return JSON.stringify(value); + } + if (typeof value === "number" || typeof value === "boolean") { + return String(value); + } + if (Array.isArray(value)) { + return `list(${value.length} items)`; + } + if (typeof value === "object") { + return `dict(${Object.keys(value as Record).length} items)`; + } + return `${typeof value}(...)`; + } + + private notifyFinders(event: BaseEvent): void { + for (const waiter of Array.from(this.find_waiters)) { + if (!this.eventMatchesKey(event, waiter.event_key)) { + continue; + } + if (!waiter.matches(event)) { + continue; + } + if (waiter.timeout_id) { + clearTimeout(waiter.timeout_id); + } + this.find_waiters.delete(waiter); + waiter.resolve(event); + } + } + + private collectHandlers(event: BaseEvent): EventHandler[] { + const handlers: EventHandler[] = []; + + const string_handlers = this.handlers_by_key.get(event.event_type); + if (string_handlers) { + handlers.push(...string_handlers); + } + + const class_handlers = this.handlers_by_key.get(event.constructor as EventClass); + if (class_handlers) { + handlers.push(...class_handlers); + } + + if (event.event_factory) { + const factory_handlers = this.handlers_by_key.get(event.event_factory as EventKey); + if (factory_handlers) { + handlers.push(...factory_handlers); + } + } + + if (event.event_key_symbol) { + const symbol_handlers = this.handlers_by_key.get(event.event_key_symbol); + if (symbol_handlers) { + handlers.push(...symbol_handlers); + } + } + + const wildcard_handlers = this.handlers_by_key.get("*"); + if (wildcard_handlers) { + handlers.push(...wildcard_handlers); + } + + return handlers; + } + + private eventMatchesKey(event: BaseEvent, event_key: EventKey): boolean { + if (event_key === "*") { + return true; + } + if (typeof event_key === "string") { + return event.event_type === event_key; + } + if (typeof event_key === "symbol") { + return event.event_key_symbol === event_key; + } + if (event.event_factory && event_key === event.event_factory) { + return true; + } + const ctor = event.constructor as EventClass & { factory?: Function }; + if (ctor.factory && event_key === ctor.factory) { + return true; + } + return event.constructor === event_key; + } + + private trimHistory(): void { + if (this.max_history_size === null) { + return; + } + if (this.event_history.length <= this.max_history_size) { + return; + } + + let remaining_overage = this.event_history.length - this.max_history_size; + + for (let i = 0; i < this.event_history.length && remaining_overage > 0; i += 1) { + const event = this.event_history[i]; + if (event.event_status !== "completed") { + continue; + } + this.event_history_by_id.delete(event.event_id); + this.event_history.splice(i, 1); + i -= 1; + remaining_overage -= 1; + } + + while (remaining_overage > 0 && this.event_history.length > 0) { + const event = this.event_history.shift(); + if (event) { + this.event_history_by_id.delete(event.event_id); + } + remaining_overage -= 1; + } + } +} diff --git a/bubus-ts/src/event_result.ts b/bubus-ts/src/event_result.ts new file mode 100644 index 0000000..cc74016 --- /dev/null +++ b/bubus-ts/src/event_result.ts @@ -0,0 +1,54 @@ +import { v7 as uuidv7 } from "uuid"; + +import type { BaseEvent } from "./base_event.js"; + +export type EventResultStatus = "pending" | "started" | "completed" | "error"; + +export class EventResult { + id: string; + status: EventResultStatus; + event_id: string; + handler_id: string; + handler_name: string; + handler_file_path?: string; + eventbus_name: string; + started_at?: string; + completed_at?: string; + result?: unknown; + error?: unknown; + event_children: BaseEvent[]; + + constructor(params: { + event_id: string; + handler_id: string; + handler_name: string; + handler_file_path?: string; + eventbus_name: string; + }) { + this.id = uuidv7(); + this.status = "pending"; + this.event_id = params.event_id; + this.handler_id = params.handler_id; + this.handler_name = params.handler_name; + this.handler_file_path = params.handler_file_path; + this.eventbus_name = params.eventbus_name; + this.event_children = []; + } + + markStarted(): void { + this.status = "started"; + this.started_at = new Date().toISOString(); + } + + markCompleted(result: unknown): void { + this.status = "completed"; + this.result = result; + this.completed_at = new Date().toISOString(); + } + + markError(error: unknown): void { + this.status = "error"; + this.error = error; + this.completed_at = new Date().toISOString(); + } +} diff --git a/bubus-ts/src/index.ts b/bubus-ts/src/index.ts new file mode 100644 index 0000000..969cdd6 --- /dev/null +++ b/bubus-ts/src/index.ts @@ -0,0 +1,11 @@ +export { BaseEvent, BaseEventSchema, extendEvent } from "./base_event.js"; +export { EventResult } from "./event_result.js"; +export { EventBus, EventHandlerTimeoutError, EventHandlerCancelledError } from "./event_bus.js"; +export type { + EventClass, + EventHandler, + EventKey, + EventStatus, + FindOptions, + FindWindow +} from "./types.js"; diff --git a/bubus-ts/src/types.ts b/bubus-ts/src/types.ts new file mode 100644 index 0000000..f065d04 --- /dev/null +++ b/bubus-ts/src/types.ts @@ -0,0 +1,17 @@ +import type { BaseEvent } from "./base_event.js"; + +export type EventStatus = "pending" | "started" | "completed"; + +export type EventClass = new (...args: any[]) => T; + +export type EventKey = string | symbol | EventClass; + +export type EventHandler = (event: T) => void | Promise; + +export type FindWindow = boolean | number; + +export type FindOptions = { + past?: FindWindow; + future?: FindWindow; + child_of?: BaseEvent | null; +}; diff --git a/bubus-ts/tests/comprehensive_patterns.test.ts b/bubus-ts/tests/comprehensive_patterns.test.ts new file mode 100644 index 0000000..f311702 --- /dev/null +++ b/bubus-ts/tests/comprehensive_patterns.test.ts @@ -0,0 +1,598 @@ +import assert from "node:assert/strict"; +import { test } from "node:test"; + +import { BaseEvent, EventBus } from "../src/index.js"; + +const ParentEvent = BaseEvent.extend("ParentEvent", {}); +const ChildEvent = BaseEvent.extend("ChildEvent", {}); +const ImmediateChildEvent = BaseEvent.extend("ImmediateChildEvent", {}); +const QueuedChildEvent = BaseEvent.extend("QueuedChildEvent", {}); + +const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms); + }); + +test("comprehensive patterns: forwarding, async/sync dispatch, parent tracking", async () => { + const bus_1 = new EventBus("bus1"); + const bus_2 = new EventBus("bus2"); + + const results: Array<[number, string]> = []; + const execution_counter = { count: 0 }; + + const child_bus2_event_handler = (event: BaseEvent): string => { + execution_counter.count += 1; + const seq = execution_counter.count; + const event_type_short = event.event_type.replace(/Event$/, ""); + results.push([seq, `bus2_handler_${event_type_short}`]); + return "forwarded bus result"; + }; + + bus_2.on("*", child_bus2_event_handler); + bus_1.on("*", bus_2.dispatch); + + const parent_bus1_handler = async (event: BaseEvent): Promise => { + execution_counter.count += 1; + const seq = execution_counter.count; + results.push([seq, "parent_start"]); + + const child_event_async = event.bus?.emit(QueuedChildEvent({}))!; + assert.notEqual(child_event_async.event_status, "completed"); + + const child_event_sync = await event.bus?.emit(ImmediateChildEvent({})).done()!; + assert.equal(child_event_sync.event_status, "completed"); + + assert.ok(child_event_sync.event_path.includes("bus2")); + assert.ok( + Array.from(child_event_sync.event_results.values()).some((result) => + result.handler_name.includes("dispatch") + ) + ); + + assert.equal(child_event_async.event_parent_id, event.event_id); + assert.equal(child_event_sync.event_parent_id, event.event_id); + + execution_counter.count += 1; + const end_seq = execution_counter.count; + results.push([end_seq, "parent_end"]); + return "parent_done"; + }; + + bus_1.on(ParentEvent, parent_bus1_handler); + + const parent_event = bus_1.dispatch(ParentEvent({})); + await parent_event.done(); + await bus_1.waitUntilIdle(); + await bus_2.waitUntilIdle(); + + const event_children = bus_1.event_history.filter( + (event) => + event.event_type === "ImmediateChildEvent" || event.event_type === "QueuedChildEvent" + ); + assert.ok(event_children.length > 0); + assert.ok( + event_children.every((event) => event.event_parent_id === parent_event.event_id) + ); + + const sorted_results = results.slice().sort((a, b) => a[0] - b[0]); + const execution_order = sorted_results.map((item) => item[1]); + + assert.equal(execution_order[0], "parent_start"); + assert.ok(execution_order.includes("bus2_handler_ImmediateChild")); + + if (execution_order.includes("parent_end")) { + const parent_end_idx = execution_order.indexOf("parent_end"); + assert.ok(parent_end_idx > 1); + } + + assert.equal( + execution_order.filter((value) => value === "bus2_handler_ImmediateChild").length, + 1 + ); + assert.equal( + execution_order.filter((value) => value === "bus2_handler_QueuedChild").length, + 1 + ); + assert.equal( + execution_order.filter((value) => value === "bus2_handler_Parent").length, + 1 + ); +}); + +test("race condition stress", async () => { + const bus_1 = new EventBus("bus1"); + const bus_2 = new EventBus("bus2"); + + const results: string[] = []; + + const child_handler = async (event: BaseEvent): Promise => { + const bus_name = event.event_path[event.event_path.length - 1] ?? "unknown"; + results.push(`child_${bus_name}`); + await delay(1); + return `child_done_${bus_name}`; + }; + + const parent_handler = async (event: BaseEvent): Promise => { + const children: BaseEvent[] = []; + + for (let i = 0; i < 3; i += 1) { + children.push(event.bus?.emit(QueuedChildEvent({}))!); + } + + for (let i = 0; i < 3; i += 1) { + const child = await event.bus?.emit(ImmediateChildEvent({})).done()!; + assert.equal(child.event_status, "completed"); + children.push(child); + } + + assert.ok(children.every((child) => child.event_parent_id === event.event_id)); + return "parent_done"; + }; + + const bad_handler = (_bad: BaseEvent): void => {}; + + bus_1.on("*", bus_2.dispatch); + bus_1.on(QueuedChildEvent, child_handler); + bus_1.on(ImmediateChildEvent, child_handler); + bus_2.on(QueuedChildEvent, child_handler); + bus_2.on(ImmediateChildEvent, child_handler); + bus_1.on(BaseEvent, parent_handler); + bus_1.on(BaseEvent, bad_handler); + + for (let run = 0; run < 5; run += 1) { + results.length = 0; + + const event = bus_1.dispatch(new BaseEvent({})); + await event.done(); + await bus_1.waitUntilIdle(); + await bus_2.waitUntilIdle(); + + assert.equal( + results.filter((value) => value === "child_bus1").length, + 6, + `Run ${run}: Expected 6 child_bus1, got ${results.filter((value) => value === "child_bus1").length}` + ); + assert.equal( + results.filter((value) => value === "child_bus2").length, + 6, + `Run ${run}: Expected 6 child_bus2, got ${results.filter((value) => value === "child_bus2").length}` + ); + } +}); + +test("awaited child jumps queue without overshoot", async () => { + const bus = new EventBus("TestBus", { max_history_size: 100 }); + const execution_order: string[] = []; + + const Event1 = BaseEvent.extend("Event1", {}); + const Event2 = BaseEvent.extend("Event2", {}); + const Event3 = BaseEvent.extend("Event3", {}); + const LocalChildEvent = BaseEvent.extend("ChildEvent", {}); + + const event1_handler = async (_event: BaseEvent): Promise => { + execution_order.push("Event1_start"); + const child = _event.bus?.emit(LocalChildEvent({}))!; + execution_order.push("Child_dispatched"); + await child.done(); + execution_order.push("Child_await_returned"); + execution_order.push("Event1_end"); + return "event1_done"; + }; + + const event2_handler = async (): Promise => { + execution_order.push("Event2_start"); + execution_order.push("Event2_end"); + return "event2_done"; + }; + + const event3_handler = async (): Promise => { + execution_order.push("Event3_start"); + execution_order.push("Event3_end"); + return "event3_done"; + }; + + const child_handler = async (): Promise => { + execution_order.push("Child_start"); + execution_order.push("Child_end"); + return "child_done"; + }; + + bus.on(Event1, event1_handler); + bus.on(Event2, event2_handler); + bus.on(Event3, event3_handler); + bus.on(LocalChildEvent, child_handler); + + const event_1 = bus.dispatch(Event1({})); + const event_2 = bus.dispatch(Event2({})); + const event_3 = bus.dispatch(Event3({})); + + await delay(0); + + await event_1.done(); + + assert.ok(execution_order.includes("Child_start")); + assert.ok(execution_order.includes("Child_end")); + const child_start_idx = execution_order.indexOf("Child_start"); + const child_end_idx = execution_order.indexOf("Child_end"); + const event1_end_idx = execution_order.indexOf("Event1_end"); + assert.ok(child_start_idx < event1_end_idx); + assert.ok(child_end_idx < event1_end_idx); + + assert.ok(!execution_order.includes("Event2_start")); + assert.ok(!execution_order.includes("Event3_start")); + + assert.equal(event_2.event_status, "pending"); + assert.equal(event_3.event_status, "pending"); + + await bus.waitUntilIdle(); + + const event2_start_idx = execution_order.indexOf("Event2_start"); + const event3_start_idx = execution_order.indexOf("Event3_start"); + assert.ok(event2_start_idx < event3_start_idx); + + assert.equal(event_2.event_status, "completed"); + assert.equal(event_3.event_status, "completed"); + + const history_list = bus.event_history; + const child_event = history_list.find((event) => event.event_type === "ChildEvent"); + const event2_from_history = history_list.find((event) => event.event_type === "Event2"); + const event3_from_history = history_list.find((event) => event.event_type === "Event3"); + + assert.ok(child_event?.event_started_at); + assert.ok(event2_from_history?.event_started_at); + assert.ok(event3_from_history?.event_started_at); + + assert.ok(child_event!.event_started_at! < event2_from_history!.event_started_at!); + assert.ok(child_event!.event_started_at! < event3_from_history!.event_started_at!); +}); + +test("dispatch multiple, await one skips others until after handler completes", async () => { + const bus = new EventBus("MultiDispatchBus", { max_history_size: 100 }); + const execution_order: string[] = []; + + const Event1 = BaseEvent.extend("Event1", {}); + const Event2 = BaseEvent.extend("Event2", {}); + const Event3 = BaseEvent.extend("Event3", {}); + const ChildA = BaseEvent.extend("ChildA", {}); + const ChildB = BaseEvent.extend("ChildB", {}); + const ChildC = BaseEvent.extend("ChildC", {}); + + const event1_handler = async (event: BaseEvent): Promise => { + execution_order.push("Event1_start"); + + const child_a = event.bus?.emit(ChildA({}))!; + execution_order.push("ChildA_dispatched"); + + const child_b = event.bus?.emit(ChildB({}))!; + execution_order.push("ChildB_dispatched"); + + const child_c = event.bus?.emit(ChildC({}))!; + execution_order.push("ChildC_dispatched"); + + await child_b.done(); + execution_order.push("ChildB_await_returned"); + + execution_order.push("Event1_end"); + return "event1_done"; + }; + + const event2_handler = async (): Promise => { + execution_order.push("Event2_start"); + execution_order.push("Event2_end"); + return "event2_done"; + }; + + const event3_handler = async (): Promise => { + execution_order.push("Event3_start"); + execution_order.push("Event3_end"); + return "event3_done"; + }; + + const child_a_handler = async (): Promise => { + execution_order.push("ChildA_start"); + execution_order.push("ChildA_end"); + return "child_a_done"; + }; + + const child_b_handler = async (): Promise => { + execution_order.push("ChildB_start"); + execution_order.push("ChildB_end"); + return "child_b_done"; + }; + + const child_c_handler = async (): Promise => { + execution_order.push("ChildC_start"); + execution_order.push("ChildC_end"); + return "child_c_done"; + }; + + bus.on(Event1, event1_handler); + bus.on(Event2, event2_handler); + bus.on(Event3, event3_handler); + bus.on(ChildA, child_a_handler); + bus.on(ChildB, child_b_handler); + bus.on(ChildC, child_c_handler); + + const event_1 = bus.dispatch(Event1({})); + bus.dispatch(Event2({})); + bus.dispatch(Event3({})); + + await event_1.done(); + + assert.ok(execution_order.includes("ChildB_start")); + assert.ok(execution_order.includes("ChildB_end")); + + const child_b_end_idx = execution_order.indexOf("ChildB_end"); + const event1_end_idx = execution_order.indexOf("Event1_end"); + assert.ok(child_b_end_idx < event1_end_idx); + + if (execution_order.includes("ChildA_start")) { + const child_a_start_idx = execution_order.indexOf("ChildA_start"); + assert.ok(child_a_start_idx > event1_end_idx); + } + if (execution_order.includes("ChildC_start")) { + const child_c_start_idx = execution_order.indexOf("ChildC_start"); + assert.ok(child_c_start_idx > event1_end_idx); + } + if (execution_order.includes("Event2_start")) { + const event2_start_idx = execution_order.indexOf("Event2_start"); + assert.ok(event2_start_idx > event1_end_idx); + } + if (execution_order.includes("Event3_start")) { + const event3_start_idx = execution_order.indexOf("Event3_start"); + assert.ok(event3_start_idx > event1_end_idx); + } + + await bus.waitUntilIdle(); + + const event2_start_idx = execution_order.indexOf("Event2_start"); + const event3_start_idx = execution_order.indexOf("Event3_start"); + const child_a_start_idx = execution_order.indexOf("ChildA_start"); + const child_c_start_idx = execution_order.indexOf("ChildC_start"); + + assert.ok(event2_start_idx < event3_start_idx); + assert.ok(event3_start_idx < child_a_start_idx); + assert.ok(child_a_start_idx < child_c_start_idx); +}); + +test("multi-bus queues are independent when awaiting child", async () => { + const bus_1 = new EventBus("Bus1", { max_history_size: 100 }); + const bus_2 = new EventBus("Bus2", { max_history_size: 100 }); + const execution_order: string[] = []; + + const Event1 = BaseEvent.extend("Event1", {}); + const Event2 = BaseEvent.extend("Event2", {}); + const Event3 = BaseEvent.extend("Event3", {}); + const Event4 = BaseEvent.extend("Event4", {}); + const LocalChildEvent = BaseEvent.extend("ChildEvent", {}); + + const event1_handler = async (event: BaseEvent): Promise => { + execution_order.push("Bus1_Event1_start"); + const child = event.bus?.emit(LocalChildEvent({}))!; + execution_order.push("Child_dispatched_to_Bus1"); + await child.done(); + execution_order.push("Child_await_returned"); + execution_order.push("Bus1_Event1_end"); + return "event1_done"; + }; + + const event2_handler = async (): Promise => { + execution_order.push("Bus1_Event2_start"); + execution_order.push("Bus1_Event2_end"); + return "event2_done"; + }; + + const event3_handler = async (): Promise => { + execution_order.push("Bus2_Event3_start"); + execution_order.push("Bus2_Event3_end"); + return "event3_done"; + }; + + const event4_handler = async (): Promise => { + execution_order.push("Bus2_Event4_start"); + execution_order.push("Bus2_Event4_end"); + return "event4_done"; + }; + + const child_handler = async (): Promise => { + execution_order.push("Child_start"); + execution_order.push("Child_end"); + return "child_done"; + }; + + bus_1.on(Event1, event1_handler); + bus_1.on(Event2, event2_handler); + bus_1.on(LocalChildEvent, child_handler); + + bus_2.on(Event3, event3_handler); + bus_2.on(Event4, event4_handler); + + const event_1 = bus_1.dispatch(Event1({})); + bus_1.dispatch(Event2({})); + bus_2.dispatch(Event3({})); + bus_2.dispatch(Event4({})); + + await delay(0); + + await event_1.done(); + + assert.ok(execution_order.includes("Child_start")); + assert.ok(execution_order.includes("Child_end")); + + const child_end_idx = execution_order.indexOf("Child_end"); + const event1_end_idx = execution_order.indexOf("Bus1_Event1_end"); + assert.ok(child_end_idx < event1_end_idx); + + assert.ok(!execution_order.includes("Bus1_Event2_start")); + assert.ok(!execution_order.includes("Bus2_Event3_start")); + assert.ok(!execution_order.includes("Bus2_Event4_start")); + + await bus_1.waitUntilIdle(); + await bus_2.waitUntilIdle(); + + assert.ok(execution_order.includes("Bus1_Event2_start")); + assert.ok(execution_order.includes("Bus2_Event3_start")); + assert.ok(execution_order.includes("Bus2_Event4_start")); +}); + +test("awaiting an already completed event is a no-op", async () => { + const bus = new EventBus("AlreadyCompletedBus", { max_history_size: 100 }); + const execution_order: string[] = []; + + const Event1 = BaseEvent.extend("Event1", {}); + const Event2 = BaseEvent.extend("Event2", {}); + + const event1_handler = async (): Promise => { + execution_order.push("Event1_start"); + execution_order.push("Event1_end"); + return "event1_done"; + }; + + const event2_handler = async (): Promise => { + execution_order.push("Event2_start"); + execution_order.push("Event2_end"); + return "event2_done"; + }; + + bus.on(Event1, event1_handler); + bus.on(Event2, event2_handler); + + const event_1 = await bus.dispatch(Event1({})).done(); + assert.equal(event_1.event_status, "completed"); + + const event_2 = bus.dispatch(Event2({})); + + await event_1.done(); + + assert.equal(event_2.event_status, "pending"); + + await bus.waitUntilIdle(); +}); + +test("multiple awaits on same event", async () => { + const bus = new EventBus("MultiAwaitBus", { max_history_size: 100 }); + const execution_order: string[] = []; + const await_results: string[] = []; + + const Event1 = BaseEvent.extend("Event1", {}); + const Event2 = BaseEvent.extend("Event2", {}); + const LocalChildEvent = BaseEvent.extend("ChildEvent", {}); + + const event1_handler = async (event: BaseEvent): Promise => { + execution_order.push("Event1_start"); + + const child = event.bus?.emit(LocalChildEvent({}))!; + + const await_child = async (name: string): Promise => { + await child.done(); + await_results.push(`${name}_completed`); + }; + + await Promise.all([await_child("await1"), await_child("await2")]); + execution_order.push("Both_awaits_completed"); + execution_order.push("Event1_end"); + return "event1_done"; + }; + + const event2_handler = async (): Promise => { + execution_order.push("Event2_start"); + execution_order.push("Event2_end"); + return "event2_done"; + }; + + const child_handler = async (): Promise => { + execution_order.push("Child_start"); + await delay(10); + execution_order.push("Child_end"); + return "child_done"; + }; + + bus.on(Event1, event1_handler); + bus.on(Event2, event2_handler); + bus.on(LocalChildEvent, child_handler); + + const event_1 = bus.dispatch(Event1({})); + bus.dispatch(Event2({})); + + await event_1.done(); + + assert.equal(await_results.length, 2); + assert.ok(await_results.includes("await1_completed")); + assert.ok(await_results.includes("await2_completed")); + + assert.ok(execution_order.includes("Child_start")); + assert.ok(execution_order.includes("Child_end")); + const child_end_idx = execution_order.indexOf("Child_end"); + const event1_end_idx = execution_order.indexOf("Event1_end"); + assert.ok(child_end_idx < event1_end_idx); + + assert.ok(!execution_order.includes("Event2_start")); + + await bus.waitUntilIdle(); +}); + +test("deeply nested awaited children", async () => { + const bus = new EventBus("DeepNestedBus", { max_history_size: 100 }); + const execution_order: string[] = []; + + const Event1 = BaseEvent.extend("Event1", {}); + const Event2 = BaseEvent.extend("Event2", {}); + const Child1 = BaseEvent.extend("Child1", {}); + const Child2 = BaseEvent.extend("Child2", {}); + + const event1_handler = async (event: BaseEvent): Promise => { + execution_order.push("Event1_start"); + const child1 = event.bus?.emit(Child1({}))!; + await child1.done(); + execution_order.push("Event1_end"); + return "event1_done"; + }; + + const child1_handler = async (event: BaseEvent): Promise => { + execution_order.push("Child1_start"); + const child2 = event.bus?.emit(Child2({}))!; + await child2.done(); + execution_order.push("Child1_end"); + return "child1_done"; + }; + + const child2_handler = async (): Promise => { + execution_order.push("Child2_start"); + execution_order.push("Child2_end"); + return "child2_done"; + }; + + const event2_handler = async (): Promise => { + execution_order.push("Event2_start"); + execution_order.push("Event2_end"); + return "event2_done"; + }; + + bus.on(Event1, event1_handler); + bus.on(Child1, child1_handler); + bus.on(Child2, child2_handler); + bus.on(Event2, event2_handler); + + const event_1 = bus.dispatch(Event1({})); + bus.dispatch(Event2({})); + + await event_1.done(); + + assert.ok(execution_order.includes("Child1_start")); + assert.ok(execution_order.includes("Child1_end")); + assert.ok(execution_order.includes("Child2_start")); + assert.ok(execution_order.includes("Child2_end")); + + const child2_end_idx = execution_order.indexOf("Child2_end"); + const child1_end_idx = execution_order.indexOf("Child1_end"); + const event1_end_idx = execution_order.indexOf("Event1_end"); + assert.ok(child2_end_idx < child1_end_idx); + assert.ok(child1_end_idx < event1_end_idx); + + assert.ok(!execution_order.includes("Event2_start")); + + await bus.waitUntilIdle(); + + const event2_start_idx = execution_order.indexOf("Event2_start"); + assert.ok(event2_start_idx > event1_end_idx); +}); diff --git a/bubus-ts/tests/debounce.test.ts b/bubus-ts/tests/debounce.test.ts new file mode 100644 index 0000000..c7275d6 --- /dev/null +++ b/bubus-ts/tests/debounce.test.ts @@ -0,0 +1,51 @@ +import assert from "node:assert/strict"; +import { test } from "node:test"; + +import { z } from "zod"; + +import { BaseEvent, EventBus } from "../src/index.js"; + +const ParentEvent = BaseEvent.extend("ParentEvent", {}); + +const ScreenshotEvent = BaseEvent.extend("ScreenshotEvent", { target_id: z.string() }); + +const SyncEvent = BaseEvent.extend("SyncEvent", {}); + +test("simple debounce uses recent history or dispatches new", async () => { + const bus = new EventBus("DebounceBus"); + + const parent_event = bus.dispatch(ParentEvent({})); + await parent_event.done(); + + const child_event = parent_event.bus?.emit(ScreenshotEvent({ target_id: "tab-1" })); + assert.ok(child_event); + await child_event.done(); + + const reused_event = + (await bus.find(ScreenshotEvent, { + past: 10, + future: false, + child_of: parent_event + })) ?? (await bus.dispatch(ScreenshotEvent({ target_id: "fallback" })).done()); + + assert.equal(reused_event.event_id, child_event.event_id); + assert.equal(reused_event.event_parent_id, parent_event.event_id); +}); + +test("advanced debounce prefers history, then waits for future, then dispatches", async () => { + const bus = new EventBus("AdvancedDebounceBus"); + + const pending_event = bus.find(SyncEvent, { past: false, future: 0.5 }); + + setTimeout(() => { + bus.dispatch(SyncEvent({})); + }, 50); + + const resolved_event = + (await bus.find(SyncEvent, { past: true, future: false })) ?? + (await pending_event) ?? + (await bus.dispatch(SyncEvent({})).done()); + + assert.ok(resolved_event); + assert.equal(resolved_event.event_type, "SyncEvent"); +}); diff --git a/bubus-ts/tests/event_results.test.ts b/bubus-ts/tests/event_results.test.ts new file mode 100644 index 0000000..40577b9 --- /dev/null +++ b/bubus-ts/tests/event_results.test.ts @@ -0,0 +1,73 @@ +import assert from "node:assert/strict"; +import { test } from "node:test"; + +import { z } from "zod"; + +import { BaseEvent, EventBus } from "../src/index.js"; + +const StringResultEvent = BaseEvent.extend( + "StringResultEvent", + {}, + { event_result_schema: z.string(), event_result_type: "string" } +); + +const ObjectResultEvent = BaseEvent.extend( + "ObjectResultEvent", + {}, + { event_result_schema: z.object({ value: z.string(), count: z.number() }) } +); + +const NoResultSchemaEvent = BaseEvent.extend("NoResultSchemaEvent", {}); + +test("event results capture handler return values", async () => { + const bus = new EventBus("ResultCaptureBus"); + + bus.on(StringResultEvent, () => "ok"); + + const event = bus.dispatch(StringResultEvent({})); + await event.done(); + + assert.equal(event.event_results.size, 1); + const result = Array.from(event.event_results.values())[0]; + assert.equal(result.status, "completed"); + assert.equal(result.result, "ok"); +}); + +test("event_result_schema validates handler results", async () => { + const bus = new EventBus("ResultSchemaBus"); + + bus.on(ObjectResultEvent, () => ({ value: "hello", count: 2 })); + + const event = bus.dispatch(ObjectResultEvent({})); + await event.done(); + + const result = Array.from(event.event_results.values())[0]; + assert.equal(result.status, "completed"); + assert.deepEqual(result.result, { value: "hello", count: 2 }); +}); + +test("invalid result marks handler error", async () => { + const bus = new EventBus("ResultSchemaErrorBus"); + + bus.on(ObjectResultEvent, () => ({ value: "bad", count: "nope" } as unknown)); + + const event = bus.dispatch(ObjectResultEvent({})); + await event.done(); + + const result = Array.from(event.event_results.values())[0]; + assert.equal(result.status, "error"); + assert.ok(result.error instanceof Error); +}); + +test("event with no result schema stores raw values", async () => { + const bus = new EventBus("NoSchemaBus"); + + bus.on(NoResultSchemaEvent, () => ({ raw: true })); + + const event = bus.dispatch(NoResultSchemaEvent({})); + await event.done(); + + const result = Array.from(event.event_results.values())[0]; + assert.equal(result.status, "completed"); + assert.deepEqual(result.result, { raw: true }); +}); diff --git a/bubus-ts/tests/fifo.test.ts b/bubus-ts/tests/fifo.test.ts new file mode 100644 index 0000000..5efede7 --- /dev/null +++ b/bubus-ts/tests/fifo.test.ts @@ -0,0 +1,41 @@ +import assert from "node:assert/strict"; +import { test } from "node:test"; + +import { z } from "zod"; + +import { BaseEvent, EventBus } from "../src/index.js"; + +const OrderEvent = BaseEvent.extend("OrderEvent", { order: z.number() }); + +const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms); + }); + +test("events are processed in FIFO order", async () => { + const bus = new EventBus("FifoBus"); + + const processed_orders: number[] = []; + const handler_start_times: number[] = []; + + bus.on(OrderEvent, async (event) => { + handler_start_times.push(Date.now()); + if (event.order % 2 === 0) { + await delay(30); + } else { + await delay(5); + } + processed_orders.push(event.order); + }); + + for (let i = 0; i < 10; i += 1) { + bus.dispatch(OrderEvent({ order: i })); + } + + await bus.waitUntilIdle(); + + assert.deepEqual(processed_orders, Array.from({ length: 10 }, (_, i) => i)); + for (let i = 1; i < handler_start_times.length; i += 1) { + assert.ok(handler_start_times[i] >= handler_start_times[i - 1]); + } +}); diff --git a/bubus-ts/tests/find.test.ts b/bubus-ts/tests/find.test.ts new file mode 100644 index 0000000..da73a75 --- /dev/null +++ b/bubus-ts/tests/find.test.ts @@ -0,0 +1,131 @@ +import assert from "node:assert/strict"; +import { test } from "node:test"; + +import { z } from "zod"; + +import { BaseEvent, EventBus } from "../src/index.js"; + +const ParentEvent = BaseEvent.extend("ParentEvent", {}); +const ChildEvent = BaseEvent.extend("ChildEvent", {}); +const UnrelatedEvent = BaseEvent.extend("UnrelatedEvent", {}); +const ScreenshotEvent = BaseEvent.extend("ScreenshotEvent", { target_id: z.string() }); + +const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms); + }); + +test("find past returns most recent completed event", async () => { + const bus = new EventBus("FindPastBus"); + + const first_event = bus.dispatch(ParentEvent({})); + await first_event.done(); + await delay(20); + const second_event = bus.dispatch(ParentEvent({})); + await second_event.done(); + + const found_event = await bus.find(ParentEvent, { past: true, future: false }); + assert.ok(found_event); + assert.equal(found_event.event_id, second_event.event_id); +}); + +test("find past window filters by time", async () => { + const bus = new EventBus("FindWindowBus"); + + const old_event = bus.dispatch(ParentEvent({})); + await old_event.done(); + await delay(120); + const new_event = bus.dispatch(ParentEvent({})); + await new_event.done(); + + const found_event = await bus.find(ParentEvent, { past: 0.1, future: false }); + assert.ok(found_event); + assert.equal(found_event.event_id, new_event.event_id); +}); + +test("find past returns null when all events are too old", async () => { + const bus = new EventBus("FindTooOldBus"); + + const old_event = bus.dispatch(ParentEvent({})); + await old_event.done(); + await delay(120); + + const found_event = await bus.find(ParentEvent, { past: 0.05, future: false }); + assert.equal(found_event, null); +}); + +test("find future waits for event", async () => { + const bus = new EventBus("FindFutureBus"); + + const find_promise = bus.find(ParentEvent, { past: false, future: 0.5 }); + + setTimeout(() => { + bus.dispatch(ParentEvent({})); + }, 50); + + const found_event = await find_promise; + assert.ok(found_event); + assert.equal(found_event.event_type, "ParentEvent"); +}); + +test("find future times out when no event arrives", async () => { + const bus = new EventBus("FindFutureTimeoutBus"); + + const found_event = await bus.find(ParentEvent, { past: false, future: 0.05 }); + assert.equal(found_event, null); +}); + +test("find respects where filter", async () => { + const bus = new EventBus("FindWhereBus"); + + const event_a = bus.dispatch(ScreenshotEvent({ target_id: "tab-a" })); + const event_b = bus.dispatch(ScreenshotEvent({ target_id: "tab-b" })); + await event_a.done(); + await event_b.done(); + + const found_event = await bus.find( + ScreenshotEvent, + (event) => event.target_id === "tab-b", + { past: true, future: false } + ); + + assert.ok(found_event); + assert.equal(found_event.event_id, event_b.event_id); +}); + +test("find child_of returns child event", async () => { + const bus = new EventBus("FindChildBus"); + + bus.on(ParentEvent, (event) => { + event.bus?.emit(ChildEvent({})); + }); + + const parent_event = bus.dispatch(ParentEvent({})); + await bus.waitUntilIdle(); + + const child_event = await bus.find(ChildEvent, { + past: true, + future: false, + child_of: parent_event + }); + + assert.ok(child_event); + assert.equal(child_event.event_parent_id, parent_event.event_id); +}); + +test("find child_of returns null for non-child", async () => { + const bus = new EventBus("FindNonChildBus"); + + const parent_event = bus.dispatch(ParentEvent({})); + const unrelated_event = bus.dispatch(UnrelatedEvent({})); + await parent_event.done(); + await unrelated_event.done(); + + const found_event = await bus.find(UnrelatedEvent, { + past: true, + future: false, + child_of: parent_event + }); + + assert.equal(found_event, null); +}); diff --git a/bubus-ts/tests/forwarding.test.ts b/bubus-ts/tests/forwarding.test.ts new file mode 100644 index 0000000..b03884d --- /dev/null +++ b/bubus-ts/tests/forwarding.test.ts @@ -0,0 +1,123 @@ +import assert from "node:assert/strict"; +import { test } from "node:test"; + +import { z } from "zod"; + +import { BaseEvent, EventBus } from "../src/index.js"; + +const PingEvent = BaseEvent.extend("PingEvent", { value: z.number() }); + +test("events forward between buses without duplication", async () => { + const bus_a = new EventBus("BusA"); + const bus_b = new EventBus("BusB"); + const bus_c = new EventBus("BusC"); + + const seen_a: string[] = []; + const seen_b: string[] = []; + const seen_c: string[] = []; + + bus_a.on(PingEvent, (event) => { + seen_a.push(event.event_id); + }); + + bus_b.on(PingEvent, (event) => { + seen_b.push(event.event_id); + }); + + bus_c.on(PingEvent, (event) => { + seen_c.push(event.event_id); + }); + + bus_a.on("*", bus_b.dispatch); + bus_b.on("*", bus_c.dispatch); + + const event = bus_a.dispatch(PingEvent({ value: 1 })); + + await bus_a.waitUntilIdle(); + await bus_b.waitUntilIdle(); + await bus_c.waitUntilIdle(); + + assert.equal(seen_a.length, 1); + assert.equal(seen_b.length, 1); + assert.equal(seen_c.length, 1); + + assert.equal(seen_a[0], event.event_id); + assert.equal(seen_b[0], event.event_id); + assert.equal(seen_c[0], event.event_id); + + assert.deepEqual(event.event_path, ["BusA", "BusB", "BusC"]); +}); + +test("await event.done waits for handlers on forwarded buses", async () => { + const bus_a = new EventBus("BusA"); + const bus_b = new EventBus("BusB"); + const bus_c = new EventBus("BusC"); + + const completion_log: string[] = []; + + const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms); + }); + + bus_a.on(PingEvent, async () => { + await delay(10); + completion_log.push("A"); + }); + + bus_b.on(PingEvent, async () => { + await delay(30); + completion_log.push("B"); + }); + + bus_c.on(PingEvent, async () => { + await delay(50); + completion_log.push("C"); + }); + + bus_a.on("*", bus_b.dispatch); + bus_b.on("*", bus_c.dispatch); + + const event = bus_a.dispatch(PingEvent({ value: 2 })); + + await event.done(); + + assert.deepEqual(completion_log.sort(), ["A", "B", "C"]); + assert.equal(event.event_pending_buses, 0); +}); + +test("await event.done waits when forwarding handler is async-delayed", async () => { + const bus_a = new EventBus("BusA"); + const bus_b = new EventBus("BusB"); + + const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms); + }); + + let bus_a_done = false; + let bus_b_done = false; + + bus_a.on(PingEvent, async () => { + await delay(20); + bus_a_done = true; + }); + + bus_b.on(PingEvent, async () => { + await delay(10); + bus_b_done = true; + }); + + bus_a.on("*", async (event) => { + await delay(30); + bus_b.dispatch(event); + }); + + const event = bus_a.dispatch(PingEvent({ value: 3 })); + await event.done(); + + assert.equal(bus_a_done, true); + assert.equal(bus_b_done, true); + assert.equal(event.event_pending_buses, 0); + assert.deepEqual(event.event_path, ["BusA", "BusB"]); +}); diff --git a/bubus-ts/tests/parent_child.test.ts b/bubus-ts/tests/parent_child.test.ts new file mode 100644 index 0000000..f95b700 --- /dev/null +++ b/bubus-ts/tests/parent_child.test.ts @@ -0,0 +1,64 @@ +import assert from "node:assert/strict"; +import { test } from "node:test"; + +import { BaseEvent, EventBus } from "../src/index.js"; + +const ParentEvent = BaseEvent.extend("ParentEvent", {}); +const ChildEvent = BaseEvent.extend("ChildEvent", {}); +const GrandchildEvent = BaseEvent.extend("GrandchildEvent", {}); +const UnrelatedEvent = BaseEvent.extend("UnrelatedEvent", {}); + +test("eventIsChildOf and eventIsParentOf work for direct children", async () => { + const bus = new EventBus("ParentChildBus"); + + bus.on(ParentEvent, (event) => { + event.bus?.emit(ChildEvent({})); + }); + + const parent_event = bus.dispatch(ParentEvent({})); + await bus.waitUntilIdle(); + + const child_event = bus.event_history.find((event) => event.event_type === "ChildEvent"); + assert.ok(child_event); + + assert.equal(child_event.event_parent_id, parent_event.event_id); + assert.equal(bus.eventIsChildOf(child_event, parent_event), true); + assert.equal(bus.eventIsParentOf(parent_event, child_event), true); +}); + +test("eventIsChildOf works for grandchildren", async () => { + const bus = new EventBus("GrandchildBus"); + + bus.on(ParentEvent, (event) => { + event.bus?.emit(ChildEvent({})); + }); + + bus.on(ChildEvent, (event) => { + event.bus?.emit(GrandchildEvent({})); + }); + + const parent_event = bus.dispatch(ParentEvent({})); + await bus.waitUntilIdle(); + + const child_event = bus.event_history.find((event) => event.event_type === "ChildEvent"); + const grandchild_event = bus.event_history.find((event) => event.event_type === "GrandchildEvent"); + + assert.ok(child_event); + assert.ok(grandchild_event); + + assert.equal(bus.eventIsChildOf(child_event, parent_event), true); + assert.equal(bus.eventIsChildOf(grandchild_event, parent_event), true); + assert.equal(bus.eventIsParentOf(parent_event, grandchild_event), true); +}); + +test("eventIsChildOf returns false for unrelated events", async () => { + const bus = new EventBus("UnrelatedBus"); + + const parent_event = bus.dispatch(ParentEvent({})); + const unrelated_event = bus.dispatch(UnrelatedEvent({})); + await parent_event.done(); + await unrelated_event.done(); + + assert.equal(bus.eventIsChildOf(unrelated_event, parent_event), false); + assert.equal(bus.eventIsParentOf(parent_event, unrelated_event), false); +}); diff --git a/bubus-ts/tests/performance.test.ts b/bubus-ts/tests/performance.test.ts new file mode 100644 index 0000000..8901986 --- /dev/null +++ b/bubus-ts/tests/performance.test.ts @@ -0,0 +1,36 @@ +import assert from "node:assert/strict"; +import { test } from "node:test"; + +import { BaseEvent, EventBus } from "../src/index.js"; + +const SimpleEvent = BaseEvent.extend("SimpleEvent", {}); + +test( + "processes 20k events within reasonable time", + { timeout: 120_000 }, + async () => { + const bus = new EventBus("PerfBus", { max_history_size: 1000 }); + + let processed_count = 0; + bus.on(SimpleEvent, () => { + processed_count += 1; + }); + + const total_events = 20_000; + const start = Date.now(); + + const pending: Array> = []; + for (let i = 0; i < total_events; i += 1) { + pending.push(bus.dispatch(SimpleEvent({}))); + } + + await Promise.all(pending.map((event) => event.done())); + await bus.waitUntilIdle(); + + const duration_ms = Date.now() - start; + + assert.equal(processed_count, total_events); + assert.ok(duration_ms < 120_000, `Processing took ${duration_ms}ms`); + assert.ok(bus.event_history.length <= bus.max_history_size); + } +); diff --git a/bubus-ts/tests/timeout.test.ts b/bubus-ts/tests/timeout.test.ts new file mode 100644 index 0000000..3a1ed45 --- /dev/null +++ b/bubus-ts/tests/timeout.test.ts @@ -0,0 +1,43 @@ +import assert from "node:assert/strict"; +import { test } from "node:test"; + +import { BaseEvent, EventBus, EventHandlerTimeoutError } from "../src/index.js"; + +const TimeoutEvent = BaseEvent.extend("TimeoutEvent", {}); + +const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms); + }); + +test("handler timeout marks EventResult as error", async () => { + const bus = new EventBus("TimeoutBus"); + + bus.on(TimeoutEvent, async () => { + await delay(50); + return "slow"; + }); + + const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.01 })); + await event.done(); + + const result = Array.from(event.event_results.values())[0]; + assert.equal(result.status, "error"); + assert.ok(result.error instanceof EventHandlerTimeoutError); +}); + +test("handler completes within timeout", async () => { + const bus = new EventBus("TimeoutOkBus"); + + bus.on(TimeoutEvent, async () => { + await delay(5); + return "fast"; + }); + + const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.5 })); + await event.done(); + + const result = Array.from(event.event_results.values())[0]; + assert.equal(result.status, "completed"); + assert.equal(result.result, "fast"); +}); diff --git a/bubus-ts/tsconfig.base.json b/bubus-ts/tsconfig.base.json new file mode 100644 index 0000000..4694aa9 --- /dev/null +++ b/bubus-ts/tsconfig.base.json @@ -0,0 +1,14 @@ +{ + "compilerOptions": { + "target": "ES2022", + "esModuleInterop": true, + "allowSyntheticDefaultImports": true, + "noImplicitAny": true, + "module": "ESNext", + "moduleResolution": "node", + "sourceMap": true, + "inlineSources": true, + "declaration": true, + "skipLibCheck": true + } +} diff --git a/bubus-ts/tsconfig.json b/bubus-ts/tsconfig.json new file mode 100644 index 0000000..9071125 --- /dev/null +++ b/bubus-ts/tsconfig.json @@ -0,0 +1,18 @@ +{ + "compilerOptions": { + "target": "ES2022", + "lib": ["ES2022", "DOM"], + "module": "ESNext", + "moduleResolution": "Bundler", + "strict": true, + "skipLibCheck": true, + "noEmitOnError": true, + "declaration": true, + "emitDeclarationOnly": false, + "outDir": "dist/types", + "rootDir": "src", + "forceConsistentCasingInFileNames": true, + "useDefineForClassFields": true + }, + "include": ["src"] +} From 5230ba0cf76a98308c77920e6c8c1f80104b978e Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Wed, 4 Feb 2026 17:35:28 -0800 Subject: [PATCH 39/79] use monotonic timestamps and fix bus proxy --- bubus-ts/src/base_event.ts | 12 ++++++++++-- bubus-ts/src/event_bus.ts | 28 +++++++++++++++++++++++++++- 2 files changed, 37 insertions(+), 3 deletions(-) diff --git a/bubus-ts/src/base_event.ts b/bubus-ts/src/base_event.ts index d1095e9..3141668 100644 --- a/bubus-ts/src/base_event.ts +++ b/bubus-ts/src/base_event.ts @@ -50,6 +50,7 @@ export type EventExtendOptions = { }; export class BaseEvent { + static _last_timestamp_ms = 0; event_id: string; event_created_at: string; event_type: string; @@ -129,6 +130,13 @@ export class BaseEvent { return 300; } + static nextIsoTimestamp(): string { + const now_ms = Date.now(); + const next_ms = Math.max(now_ms, BaseEvent._last_timestamp_ms + 1); + BaseEvent._last_timestamp_ms = next_ms; + return new Date(next_ms).toISOString(); + } + static extend( shape: TShape, options?: EventExtendOptions @@ -198,7 +206,7 @@ export class BaseEvent { return; } this.event_status = "started"; - this.event_started_at = new Date().toISOString(); + this.event_started_at = BaseEvent.nextIsoTimestamp(); } markCompleted(): void { @@ -206,7 +214,7 @@ export class BaseEvent { return; } this.event_status = "completed"; - this.event_completed_at = new Date().toISOString(); + this.event_completed_at = BaseEvent.nextIsoTimestamp(); this.ensureDonePromise(); if (this._done_resolve) { this._done_resolve(this as this); diff --git a/bubus-ts/src/event_bus.ts b/bubus-ts/src/event_bus.ts index 1389a82..ec668a9 100644 --- a/bubus-ts/src/event_bus.ts +++ b/bubus-ts/src/event_bus.ts @@ -133,6 +133,9 @@ export class EventBus { dispatch(event: T, event_key?: EventKey): T { const original_event = event._original_event ?? event; + if (!original_event.bus) { + original_event.bus = this; + } if (!Array.isArray(original_event.event_path)) { original_event.event_path = []; } @@ -648,10 +651,33 @@ export class EventBus { _getBusScopedEvent(event: T): T { const original_event = event._original_event ?? event; const bus = this; + const parent_event_id = original_event.event_id; + const bus_proxy = new Proxy(bus, { + get(target, prop, receiver) { + if (prop === "dispatch" || prop === "emit") { + return (child_event: BaseEvent, event_key?: EventKey) => { + const original_child = child_event._original_event ?? child_event; + if (!original_child.event_parent_id) { + original_child.event_parent_id = parent_event_id; + } + const current_handler = bus.handler_stack[bus.handler_stack.length - 1]; + if (!current_handler || current_handler.event_id !== parent_event_id) { + bus.recordChildEvent(parent_event_id, original_child); + } + const dispatcher = Reflect.get(target, prop, receiver) as ( + event: BaseEvent, + event_key?: EventKey + ) => BaseEvent; + return dispatcher.call(target, original_child, event_key); + }; + } + return Reflect.get(target, prop, receiver); + } + }); const scoped = new Proxy(original_event, { get(target, prop, receiver) { if (prop === "bus") { - return bus; + return bus_proxy; } if (prop === "_original_event") { return target; From b5cda39f6b0602f9142e6981abe47e5bbf351877 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Wed, 4 Feb 2026 17:36:08 -0800 Subject: [PATCH 40/79] add ts readme --- bubus-ts/README.md | 81 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) create mode 100644 bubus-ts/README.md diff --git a/bubus-ts/README.md b/bubus-ts/README.md new file mode 100644 index 0000000..bf1f97a --- /dev/null +++ b/bubus-ts/README.md @@ -0,0 +1,81 @@ +# bubus-ts: Python vs JS Differences (and the tricky parts) + +This README only covers the differences between the Python implementation and this TypeScript port, plus the +gotchas we uncovered while matching behavior. It intentionally does **not** re-document the full TS API surface. + +## Key Differences vs Python + +### 1) Awaiting events: `event.done()` instead of `await event` +- Python: `await event` waits for handlers and can jump the queue when awaited inside a handler. +- TS: use `await event.done()` for the same behavior. +- Outside a handler, `done()` just waits for completion (it does not jump the queue). +- Inside a handler, `done()` triggers immediate processing (queue jump) on **all buses** where the event is queued. + +### 2) Cross-bus queue jump (forwarding) +- Python uses a global re-entrant lock to let awaited events process immediately on every bus where they appear. +- TS does **not** use AsyncLocalStorage or a global lock (browser support). +- Instead, `EventBus.instances` + `run_now_depth` pauses each runloop and processes the same event immediately across buses. + +### 3) `event.bus` is a BusScopedEvent view +- In Python, `event.event_bus` is dynamic (contextvars). +- In TS, `event.bus` is provided by a **BusScopedEvent** (a Proxy over the original event). +- That proxy injects a bus-bound `emit/dispatch` to ensure correct parent/child tracking. + +### 4) Monotonic timestamps +- JS `Date.now()` is not strictly monotonic at millisecond granularity. +- To keep FIFO tests stable, we generate strictly increasing ISO timestamps via `BaseEvent.nextIsoTimestamp()`. + +### 5) No middleware, no WAL, no SQLite mirrors +- Those Python features were intentionally dropped for the JS version. + +## Gotchas and Design Choices (What surprised us) + +### A) Why we keep a handler stack (context without AsyncLocalStorage) +We need to know **which handler is currently executing** to correctly assign: +- `event_parent_id` +- `event_emitted_by_handler_id` +- and to attach child events under the correct handler in the tree. + +Looking at `EventResult.status` alone is not enough because multiple handlers can be `started` at the same time +(nested awaits). The stack gives us deterministic, correct parentage without AsyncLocalStorage. + +### B) Why `run_now_depth` exists +When an event is awaited inside a handler, the event must **jump the queue**. If the runloop continues normally, +it could process unrelated events ("overshoot"), breaking FIFO guarantees. + +`run_now_depth` pauses the runloop while we run the awaited event immediately. Once the queue-jump completes, +the runloop resumes in FIFO order. This matches the Python behavior. + +### C) BusScopedEvent: why it exists and how it works +Forwarding exposes a subtle bug: if you pass the **same event object** to another bus, a naive implementation +can mutate `event.bus` mid-handler and break parent-child tracking. + +To prevent that: +- Handlers always receive a **BusScopedEvent** (Proxy of the original event). +- Its `bus` property is a proxy over the real `EventBus`. +- That proxy intercepts `emit/dispatch` to set `event_parent_id` and attach children to the correct handler. +- The original event object is still the canonical one stored in history. + +### D) Cross-bus immediate processing (forwarding + awaiting) +When you `await event.done()` inside a handler: +- the system finds all buses that have this event queued (using `EventBus.instances` + `event_path`) +- pauses their runloops +- processes the event immediately on each bus +- then resumes the runloops + +This gives the same "awaited events jump the queue" semantics as Python, but without a global lock. + +### E) Why `event.bus` is required for `done()` +`done()` is the signal to run an event immediately when called inside a handler. Without a bus, we can't +perform the queue jump, so `done()` throws if no bus is attached. + +## Summary +The core contract is preserved: +- FIFO order +- child event tracking +- forwarding +- await-inside-handler queue jump + +But the **implementation details are different** because JS needs browser compatibility and lacks Python’s +contextvars + asyncio primitives. The stack, runloop pause, and BusScopedEvent proxy are the key differences +that make the behavior match in practice. From b04db570c694d9969f4cfd8fa410f1c03959ff59 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Wed, 4 Feb 2026 17:37:31 -0800 Subject: [PATCH 41/79] remove unused configs --- bubus-ts/eslint.config.mjs | 21 --------------------- bubus-ts/tsconfig.base.json | 14 -------------- 2 files changed, 35 deletions(-) delete mode 100644 bubus-ts/eslint.config.mjs delete mode 100644 bubus-ts/tsconfig.base.json diff --git a/bubus-ts/eslint.config.mjs b/bubus-ts/eslint.config.mjs deleted file mode 100644 index 75978ee..0000000 --- a/bubus-ts/eslint.config.mjs +++ /dev/null @@ -1,21 +0,0 @@ -import globals from "globals"; -import pluginJs from "@eslint/js"; -import tseslint from "typescript-eslint"; - -/** @type {import('eslint').Linter.Config[]} */ -export default [ - { - files: ["**/*.{js,cjs,mjs,ts}"], - languageOptions: { globals: globals.node }, - }, - { - ignores: [ - "**/dist/**", - "**/node_modules/**", - "**/*.config.mjs", - "**/*.json", - ], - }, - pluginJs.configs.recommended, - ...tseslint.configs.recommended, -]; diff --git a/bubus-ts/tsconfig.base.json b/bubus-ts/tsconfig.base.json deleted file mode 100644 index 4694aa9..0000000 --- a/bubus-ts/tsconfig.base.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "compilerOptions": { - "target": "ES2022", - "esModuleInterop": true, - "allowSyntheticDefaultImports": true, - "noImplicitAny": true, - "module": "ESNext", - "moduleResolution": "node", - "sourceMap": true, - "inlineSources": true, - "declaration": true, - "skipLibCheck": true - } -} From 500ed8e1c0088b22b0d858dfe46112853cb7630b Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Wed, 4 Feb 2026 17:41:25 -0800 Subject: [PATCH 42/79] fix eslint --- bubus-ts/examples/log_tree_demo.ts | 2 +- bubus-ts/src/event_bus.ts | 2 +- bubus-ts/src/types.ts | 2 +- bubus-ts/tests/comprehensive_patterns.test.ts | 5 ++--- 4 files changed, 5 insertions(+), 6 deletions(-) diff --git a/bubus-ts/examples/log_tree_demo.ts b/bubus-ts/examples/log_tree_demo.ts index 9897478..0d22c3f 100644 --- a/bubus-ts/examples/log_tree_demo.ts +++ b/bubus-ts/examples/log_tree_demo.ts @@ -56,7 +56,7 @@ async function main(): Promise { bus_a.on(RootEvent, root_fast_handler); bus_a.on(RootEvent, root_slow_handler); - async function child_slow_handler(event: InstanceType): Promise { + async function child_slow_handler(_event: InstanceType): Promise { await delay(200); return "child_slow_handler_done"; } diff --git a/bubus-ts/src/event_bus.ts b/bubus-ts/src/event_bus.ts index ec668a9..aea09e6 100644 --- a/bubus-ts/src/event_bus.ts +++ b/bubus-ts/src/event_bus.ts @@ -242,7 +242,7 @@ export class EventBus { return null; } - return new Promise((resolve, reject) => { + return new Promise((resolve, _reject) => { const waiter: FindWaiter = { event_key, matches, diff --git a/bubus-ts/src/types.ts b/bubus-ts/src/types.ts index f065d04..d7abad6 100644 --- a/bubus-ts/src/types.ts +++ b/bubus-ts/src/types.ts @@ -10,7 +10,7 @@ export type EventHandler = (event: T) => void | export type FindWindow = boolean | number; -export type FindOptions = { +export type FindOptions = { past?: FindWindow; future?: FindWindow; child_of?: BaseEvent | null; diff --git a/bubus-ts/tests/comprehensive_patterns.test.ts b/bubus-ts/tests/comprehensive_patterns.test.ts index f311702..70dca74 100644 --- a/bubus-ts/tests/comprehensive_patterns.test.ts +++ b/bubus-ts/tests/comprehensive_patterns.test.ts @@ -4,7 +4,6 @@ import { test } from "node:test"; import { BaseEvent, EventBus } from "../src/index.js"; const ParentEvent = BaseEvent.extend("ParentEvent", {}); -const ChildEvent = BaseEvent.extend("ChildEvent", {}); const ImmediateChildEvent = BaseEvent.extend("ImmediateChildEvent", {}); const QueuedChildEvent = BaseEvent.extend("QueuedChildEvent", {}); @@ -260,13 +259,13 @@ test("dispatch multiple, await one skips others until after handler completes", const event1_handler = async (event: BaseEvent): Promise => { execution_order.push("Event1_start"); - const child_a = event.bus?.emit(ChildA({}))!; + event.bus?.emit(ChildA({})); execution_order.push("ChildA_dispatched"); const child_b = event.bus?.emit(ChildB({}))!; execution_order.push("ChildB_dispatched"); - const child_c = event.bus?.emit(ChildC({}))!; + event.bus?.emit(ChildC({})); execution_order.push("ChildC_dispatched"); await child_b.done(); From 6b0cf8c38c78d9b4e1a2d0e9585a56b617830a89 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Wed, 4 Feb 2026 17:58:41 -0800 Subject: [PATCH 43/79] Fix log tree behavior and child completion --- bubus-ts/examples/log_tree_demo.ts | 20 ++- bubus-ts/src/base_event.ts | 26 ++++ bubus-ts/src/event_bus.ts | 124 +++++++++++++--- bubus-ts/tests/log_tree.test.ts | 231 +++++++++++++++++++++++++++++ 4 files changed, 370 insertions(+), 31 deletions(-) create mode 100644 bubus-ts/tests/log_tree.test.ts diff --git a/bubus-ts/examples/log_tree_demo.ts b/bubus-ts/examples/log_tree_demo.ts index 0d22c3f..95cc845 100644 --- a/bubus-ts/examples/log_tree_demo.ts +++ b/bubus-ts/examples/log_tree_demo.ts @@ -40,7 +40,7 @@ async function main(): Promise { async function root_fast_handler(event: InstanceType): Promise { await delay(10); const child = event.bus?.emit( - ChildEvent({ tab_id: "tab-123", event_timeout: 0.05 }) + ChildEvent({ tab_id: "tab-123", event_timeout: 0.1 }) ); if (child) { await child.done(); @@ -48,8 +48,11 @@ async function main(): Promise { return "root_fast_handler_ok"; } - async function root_slow_handler(): Promise { - await delay(120); + async function root_slow_handler(event: InstanceType): Promise { + event.bus?.emit( + ChildEvent({ tab_id: "tab-timeout", event_timeout: 0.1 }) + ); + await delay(400); return "root_slow_handler_timeout"; } @@ -57,15 +60,18 @@ async function main(): Promise { bus_a.on(RootEvent, root_slow_handler); async function child_slow_handler(_event: InstanceType): Promise { - await delay(200); + await delay(150); return "child_slow_handler_done"; } async function child_fast_handler(event: InstanceType): Promise { await delay(10); - event.bus?.emit( + const grandchild = event.bus?.emit( GrandchildEvent({ status: "ok", event_timeout: 0.05 }) ); + if (grandchild) { + await grandchild.done(); + } return "child_handler_ok"; } @@ -75,7 +81,7 @@ async function main(): Promise { } async function grandchild_slow_handler(): Promise { - await delay(80); + await delay(60); return "grandchild_slow_handler_timeout"; } @@ -85,7 +91,7 @@ async function main(): Promise { bus_b.on(GrandchildEvent, grandchild_slow_handler); const root_event = bus_a.dispatch( - RootEvent({ url: "https://example.com", event_timeout: 0.05 }) + RootEvent({ url: "https://example.com", event_timeout: 0.25 }) ); await root_event.done(); diff --git a/bubus-ts/src/base_event.ts b/bubus-ts/src/base_event.ts index 3141668..d32608a 100644 --- a/bubus-ts/src/base_event.ts +++ b/bubus-ts/src/base_event.ts @@ -236,6 +236,32 @@ export class BaseEvent { } } + eventAreAllChildrenComplete(visited: Set = new Set()): boolean { + if (visited.has(this.event_id)) { + return true; + } + visited.add(this.event_id); + for (const child of this.event_children) { + if (child.event_status !== "completed") { + return false; + } + if (!child.eventAreAllChildrenComplete(visited)) { + return false; + } + } + return true; + } + + tryFinalizeCompletion(): void { + if (this.event_pending_buses > 0) { + return; + } + if (!this.eventAreAllChildrenComplete()) { + return; + } + this.markCompleted(); + } + ensureDonePromise(): void { if (this._done_promise) { return; diff --git a/bubus-ts/src/event_bus.ts b/bubus-ts/src/event_bus.ts index aea09e6..c417e01 100644 --- a/bubus-ts/src/event_bus.ts +++ b/bubus-ts/src/event_bus.ts @@ -71,6 +71,15 @@ type EventBusOptions = { export class EventBus { static instances: Set = new Set(); + static findEventById(event_id: string): BaseEvent | null { + for (const bus of EventBus.instances) { + const event = bus.event_history_by_id.get(event_id); + if (event) { + return event; + } + } + return null; + } name: string; max_history_size: number | null; @@ -83,6 +92,7 @@ export class EventBus { find_waiters: Set; handler_stack: EventResult[]; handler_file_paths: Map; + handler_ids: Map; run_now_depth: number; run_now_waiters: Array<() => void>; inside_handler_depth: number; @@ -100,6 +110,7 @@ export class EventBus { this.find_waiters = new Set(); this.handler_stack = []; this.handler_file_paths = new Map(); + this.handler_ids = new Map(); this.run_now_depth = 0; this.run_now_waiters = []; this.inside_handler_depth = 0; @@ -131,6 +142,16 @@ export class EventBus { handler_set.delete(handler as EventHandler); } + private getHandlerId(handler: EventHandler): string { + const existing = this.handler_ids.get(handler); + if (existing) { + return existing; + } + const handler_id = uuidv7(); + this.handler_ids.set(handler, handler_id); + return handler_id; + } + dispatch(event: T, event_key?: EventKey): T { const original_event = event._original_event ?? event; if (!original_event.bus) { @@ -155,9 +176,13 @@ export class EventBus { const current_handler = this.handler_stack[this.handler_stack.length - 1]; if (current_handler) { const parent_event = this.event_history_by_id.get(current_handler.event_id); - if (parent_event && !original_event.event_parent_id) { - original_event.event_parent_id = parent_event.event_id; - this.recordChildEvent(parent_event.event_id, original_event); + if (parent_event) { + if (!original_event.event_parent_id) { + original_event.event_parent_id = parent_event.event_id; + } + if (original_event.event_parent_id === parent_event.event_id) { + this.recordChildEvent(parent_event.event_id, original_event); + } } } @@ -165,6 +190,8 @@ export class EventBus { this.event_history_by_id.set(original_event.event_id, original_event); this.trimHistory(); + this.createPendingHandlerResults(original_event); + original_event.event_pending_buses += 1; this.pending_queue.push(original_event); this.startRunloop(); @@ -316,12 +343,16 @@ export class EventBus { const original_child = child_event._original_event ?? child_event; const parent_event = this.event_history_by_id.get(parent_event_id); if (parent_event) { - parent_event.event_children.push(original_child); + if (!parent_event.event_children.some((child) => child.event_id === original_child.event_id)) { + parent_event.event_children.push(original_child); + } } const current_result = this.handler_stack[this.handler_stack.length - 1]; if (current_result) { - current_result.event_children.push(original_child); + if (!current_result.event_children.some((child) => child.event_id === original_child.event_id)) { + current_result.event_children.push(original_child); + } original_child.event_emitted_by_handler_id = current_result.handler_id; } } @@ -517,15 +548,20 @@ export class EventBus { const handlers = this.collectHandlers(event); const handler_results = handlers.map((handler) => { const handler_name = handler.name || "anonymous"; - const handler_id = uuidv7(); - const result = new EventResult({ - event_id: event.event_id, - handler_id, - handler_name, - handler_file_path: this.handler_file_paths.get(handler) ?? undefined, - eventbus_name: this.name - }); - event.event_results.set(handler_id, result); + const handler_id = this.getHandlerId(handler); + const existing = event.event_results.get(handler_id); + const result = + existing ?? + new EventResult({ + event_id: event.event_id, + handler_id, + handler_name, + handler_file_path: this.handler_file_paths.get(handler) ?? undefined, + eventbus_name: this.name + }); + if (!existing) { + event.event_results.set(handler_id, result); + } return { handler, result }; }); @@ -578,10 +614,10 @@ export class EventBus { } } - event.event_pending_buses -= 1; - if (event.event_pending_buses <= 0) { - event.event_pending_buses = 0; - event.markCompleted(); + event.event_pending_buses = Math.max(0, event.event_pending_buses - 1); + event.tryFinalizeCompletion(); + if (event.event_status === "completed") { + this.notifyParentsFor(event); } } @@ -648,6 +684,23 @@ export class EventBus { ); } + private notifyParentsFor(event: BaseEvent): void { + const visited = new Set(); + let parent_id = event.event_parent_id; + while (parent_id && !visited.has(parent_id)) { + visited.add(parent_id); + const parent = EventBus.findEventById(parent_id); + if (!parent) { + break; + } + parent.tryFinalizeCompletion(); + if (parent.event_status !== "completed") { + break; + } + parent_id = parent.event_parent_id; + } + } + _getBusScopedEvent(event: T): T { const original_event = event._original_event ?? event; const bus = this; @@ -660,10 +713,6 @@ export class EventBus { if (!original_child.event_parent_id) { original_child.event_parent_id = parent_event_id; } - const current_handler = bus.handler_stack[bus.handler_stack.length - 1]; - if (!current_handler || current_handler.event_id !== parent_event_id) { - bus.recordChildEvent(parent_event_id, original_child); - } const dispatcher = Reflect.get(target, prop, receiver) as ( event: BaseEvent, event_key?: EventKey @@ -864,8 +913,16 @@ export class EventBus { const emitted_children = parent_children.filter( (child) => child.event_emitted_by_handler_id === result.handler_id ); - const combined_children = [...direct_children, ...emitted_children]; - const children_to_print = combined_children.filter( + const children_by_id = new Map(); + direct_children.forEach((child) => { + children_by_id.set(child.event_id, child); + }); + emitted_children.forEach((child) => { + if (!children_by_id.has(child.event_id)) { + children_by_id.set(child.event_id, child); + } + }); + const children_to_print = Array.from(children_by_id.values()).filter( (child) => !visited.has(child.event_id) ); @@ -958,6 +1015,25 @@ export class EventBus { } } + private createPendingHandlerResults(event: BaseEvent): void { + const handlers = this.collectHandlers(event); + handlers.forEach((handler) => { + const handler_id = this.getHandlerId(handler); + if (event.event_results.has(handler_id)) { + return; + } + const handler_name = handler.name || "anonymous"; + const result = new EventResult({ + event_id: event.event_id, + handler_id, + handler_name, + handler_file_path: this.handler_file_paths.get(handler) ?? undefined, + eventbus_name: this.name + }); + event.event_results.set(handler_id, result); + }); + } + private collectHandlers(event: BaseEvent): EventHandler[] { const handlers: EventHandler[] = []; diff --git a/bubus-ts/tests/log_tree.test.ts b/bubus-ts/tests/log_tree.test.ts new file mode 100644 index 0000000..51f0e34 --- /dev/null +++ b/bubus-ts/tests/log_tree.test.ts @@ -0,0 +1,231 @@ +import assert from "node:assert/strict"; +import { test } from "node:test"; + +import { z } from "zod"; + +import { BaseEvent, EventBus, EventResult } from "../src/index.js"; + +const RootEvent = BaseEvent.extend("RootEvent", { data: z.string().optional() }); +const ChildEvent = BaseEvent.extend("ChildEvent", { value: z.number().optional() }); +const GrandchildEvent = BaseEvent.extend("GrandchildEvent", { nested: z.record(z.number()).optional() }); + +class ValueError extends Error { + constructor(message: string) { + super(message); + this.name = "ValueError"; + } +} + +test("logTree: single event", () => { + const bus = new EventBus("SingleBus"); + + const event = RootEvent({ data: "test" }); + event.event_status = "completed"; + event.event_completed_at = event.event_created_at; + + bus.event_history.push(event); + bus.event_history_by_id.set(event.event_id, event); + + const output = bus.logTree(); + + assert.ok(output.includes("└── βœ… RootEvent#")); + assert.ok(output.includes("[") && output.includes("]")); +}); + +test("logTree: with handler results", () => { + const bus = new EventBus("HandlerBus"); + + const event = RootEvent({ data: "test" }); + event.event_status = "completed"; + event.event_completed_at = event.event_created_at; + + const handler_id = "handler-1"; + const result = new EventResult({ + event_id: event.event_id, + handler_id, + handler_name: "test_handler", + eventbus_name: "HandlerBus" + }); + result.markStarted(); + result.markCompleted("status: success"); + event.event_results.set(handler_id, result); + + bus.event_history.push(event); + bus.event_history_by_id.set(event.event_id, event); + + const output = bus.logTree(); + + assert.ok(output.includes("└── βœ… RootEvent#")); + assert.ok(output.includes("HandlerBus.test_handler#")); + assert.ok(output.includes("\"status: success\"")); +}); + +test("logTree: with handler errors", () => { + const bus = new EventBus("ErrorBus"); + + const event = RootEvent({ data: "test" }); + event.event_status = "completed"; + event.event_completed_at = event.event_created_at; + + const handler_id = "handler-2"; + const result = new EventResult({ + event_id: event.event_id, + handler_id, + handler_name: "error_handler", + eventbus_name: "ErrorBus" + }); + result.markStarted(); + result.markError(new ValueError("Test error message")); + event.event_results.set(handler_id, result); + + bus.event_history.push(event); + bus.event_history_by_id.set(event.event_id, event); + + const output = bus.logTree(); + + assert.ok(output.includes("ErrorBus.error_handler#")); + assert.ok(output.includes("ValueError: Test error message")); +}); + +test("logTree: complex nested", () => { + const bus = new EventBus("ComplexBus"); + + const root = RootEvent({ data: "root_data" }); + root.event_status = "completed"; + root.event_completed_at = root.event_created_at; + + const root_handler_id = "handler-root"; + const root_result = new EventResult({ + event_id: root.event_id, + handler_id: root_handler_id, + handler_name: "root_handler", + eventbus_name: "ComplexBus" + }); + root_result.markStarted(); + root_result.markCompleted("Root processed"); + root.event_results.set(root_handler_id, root_result); + + const child = ChildEvent({ value: 100 }); + child.event_parent_id = root.event_id; + child.event_status = "completed"; + child.event_completed_at = child.event_created_at; + root_result.event_children.push(child); + + const child_handler_id = "handler-child"; + const child_result = new EventResult({ + event_id: child.event_id, + handler_id: child_handler_id, + handler_name: "child_handler", + eventbus_name: "ComplexBus" + }); + child_result.markStarted(); + child_result.markCompleted([1, 2, 3]); + child.event_results.set(child_handler_id, child_result); + + const grandchild = GrandchildEvent({}); + grandchild.event_parent_id = child.event_id; + grandchild.event_status = "completed"; + grandchild.event_completed_at = grandchild.event_created_at; + child_result.event_children.push(grandchild); + + const grandchild_handler_id = "handler-grandchild"; + const grandchild_result = new EventResult({ + event_id: grandchild.event_id, + handler_id: grandchild_handler_id, + handler_name: "grandchild_handler", + eventbus_name: "ComplexBus" + }); + grandchild_result.markStarted(); + grandchild_result.markCompleted(null); + grandchild.event_results.set(grandchild_handler_id, grandchild_result); + + bus.event_history.push(root, child, grandchild); + bus.event_history_by_id.set(root.event_id, root); + bus.event_history_by_id.set(child.event_id, child); + bus.event_history_by_id.set(grandchild.event_id, grandchild); + + const output = bus.logTree(); + + assert.ok(output.includes("βœ… RootEvent#")); + assert.ok(output.includes("βœ… ComplexBus.root_handler#")); + assert.ok(output.includes("βœ… ChildEvent#")); + assert.ok(output.includes("βœ… ComplexBus.child_handler#")); + assert.ok(output.includes("βœ… GrandchildEvent#")); + assert.ok(output.includes("βœ… ComplexBus.grandchild_handler#")); + assert.ok(output.includes("\"Root processed\"")); + assert.ok(output.includes("list(3 items)")); + assert.ok(output.includes("None")); +}); + +test("logTree: multiple roots", () => { + const bus = new EventBus("MultiBus"); + + const root1 = RootEvent({ data: "first" }); + root1.event_status = "completed"; + root1.event_completed_at = root1.event_created_at; + + const root2 = RootEvent({ data: "second" }); + root2.event_status = "completed"; + root2.event_completed_at = root2.event_created_at; + + bus.event_history.push(root1, root2); + bus.event_history_by_id.set(root1.event_id, root1); + bus.event_history_by_id.set(root2.event_id, root2); + + const output = bus.logTree(); + + assert.equal(output.split("β”œβ”€β”€ βœ… RootEvent#").length - 1, 1); + assert.equal(output.split("└── βœ… RootEvent#").length - 1, 1); +}); + +test("logTree: timing info", () => { + const bus = new EventBus("TimingBus"); + + const event = RootEvent({}); + event.event_status = "completed"; + event.event_completed_at = event.event_created_at; + + const handler_id = "handler-time"; + const result = new EventResult({ + event_id: event.event_id, + handler_id, + handler_name: "timed_handler", + eventbus_name: "TimingBus" + }); + result.markStarted(); + result.markCompleted("done"); + event.event_results.set(handler_id, result); + + bus.event_history.push(event); + bus.event_history_by_id.set(event.event_id, event); + + const output = bus.logTree(); + + assert.ok(output.includes("(")); + assert.ok(output.includes("s)")); +}); + +test("logTree: running handler", () => { + const bus = new EventBus("RunningBus"); + + const event = RootEvent({}); + event.event_status = "started"; + + const handler_id = "handler-running"; + const result = new EventResult({ + event_id: event.event_id, + handler_id, + handler_name: "running_handler", + eventbus_name: "RunningBus" + }); + result.markStarted(); + event.event_results.set(handler_id, result); + + bus.event_history.push(event); + bus.event_history_by_id.set(event.event_id, event); + + const output = bus.logTree(); + + assert.ok(output.includes("RunningBus.running_handler#")); + assert.ok(output.includes("RootEvent#")); +}); From d891ff25ef86fc97592992b62ed02bf8d2f6058c Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Thu, 5 Feb 2026 10:43:59 -0800 Subject: [PATCH 44/79] add asynclocalstorage --- bubus-ts/src/async_context.ts | 43 +++ bubus-ts/src/base_event.ts | 2 + bubus-ts/src/event_bus.ts | 8 +- bubus-ts/tests/context_propagation.test.ts | 349 +++++++++++++++++++++ 4 files changed, 400 insertions(+), 2 deletions(-) create mode 100644 bubus-ts/src/async_context.ts create mode 100644 bubus-ts/tests/context_propagation.test.ts diff --git a/bubus-ts/src/async_context.ts b/bubus-ts/src/async_context.ts new file mode 100644 index 0000000..fc44a3d --- /dev/null +++ b/bubus-ts/src/async_context.ts @@ -0,0 +1,43 @@ +type AsyncLocalStorageLike = { + getStore(): unknown; + run(store: unknown, callback: () => T): T; + enterWith?(store: unknown): void; +}; + +export let async_local_storage: AsyncLocalStorageLike | null = null; + +const is_node = + typeof process !== "undefined" && + typeof process.versions !== "undefined" && + typeof process.versions.node === "string"; + +if (is_node) { + try { + const importer = new Function( + "specifier", + "return import(specifier)" + ) as (specifier: string) => Promise<{ AsyncLocalStorage?: new () => AsyncLocalStorageLike }>; + const mod = await importer("node:async_hooks"); + if (mod?.AsyncLocalStorage) { + async_local_storage = new mod.AsyncLocalStorage(); + } + } catch { + async_local_storage = null; + } +} + +export const capture_async_context = (): unknown | null => { + if (!async_local_storage) { + return null; + } + return async_local_storage.getStore() ?? null; +}; + +export const run_with_async_context = (context: unknown | null, fn: () => T): T => { + if (!async_local_storage) { + return fn(); + } + return async_local_storage.run(context ?? undefined, fn); +}; + +export const has_async_local_storage = (): boolean => async_local_storage !== null; diff --git a/bubus-ts/src/base_event.ts b/bubus-ts/src/base_event.ts index d32608a..e85ce21 100644 --- a/bubus-ts/src/base_event.ts +++ b/bubus-ts/src/base_event.ts @@ -73,6 +73,7 @@ export class BaseEvent { event_key_symbol?: symbol; bus?: EventBus; _original_event?: BaseEvent; + _dispatch_context?: unknown | null; static schema = BaseEventSchema; static event_type?: string; @@ -124,6 +125,7 @@ export class BaseEvent { this._done_promise = null; this._done_resolve = null; this._done_reject = null; + this._dispatch_context = undefined; } static defaultTimeout(): number { diff --git a/bubus-ts/src/event_bus.ts b/bubus-ts/src/event_bus.ts index c417e01..0728779 100644 --- a/bubus-ts/src/event_bus.ts +++ b/bubus-ts/src/event_bus.ts @@ -1,5 +1,6 @@ import { BaseEvent } from "./base_event.js"; import { EventResult } from "./event_result.js"; +import { capture_async_context, run_with_async_context } from "./async_context.js"; import { v7 as uuidv7 } from "uuid"; @@ -160,6 +161,9 @@ export class EventBus { if (!Array.isArray(original_event.event_path)) { original_event.event_path = []; } + if (original_event._dispatch_context === undefined) { + original_event._dispatch_context = capture_async_context(); + } if (typeof event_key === "symbol") { original_event.event_key_symbol = event_key; @@ -629,7 +633,7 @@ export class EventBus { handler_event: BaseEvent = event ): Promise { if (event.event_timeout === null) { - return handler(handler_event); + return run_with_async_context(event._dispatch_context ?? null, () => handler(handler_event)); } const timeout_seconds = event.event_timeout; @@ -656,7 +660,7 @@ export class EventBus { }, timeout_ms); Promise.resolve() - .then(() => handler(handler_event)) + .then(() => run_with_async_context(event._dispatch_context ?? null, () => handler(handler_event))) .then((value) => { if (settled) { return; diff --git a/bubus-ts/tests/context_propagation.test.ts b/bubus-ts/tests/context_propagation.test.ts new file mode 100644 index 0000000..ef3c6b3 --- /dev/null +++ b/bubus-ts/tests/context_propagation.test.ts @@ -0,0 +1,349 @@ +import assert from "node:assert/strict"; +import { test } from "node:test"; + +import { BaseEvent, EventBus } from "../src/index.js"; +import { async_local_storage, has_async_local_storage } from "../src/async_context.js"; + +type ContextStore = { + request_id?: string; + user_id?: string; + trace_id?: string; +}; + +const SimpleEvent = BaseEvent.extend("SimpleEvent", {}); +const ChildEvent = BaseEvent.extend("ChildEvent", {}); + +const skip_if_no_async_local_storage = !has_async_local_storage(); + +const require_async_local_storage = () => { + assert.ok(async_local_storage, "AsyncLocalStorage not available"); + return async_local_storage; +}; + +const get_store = (store: ContextStore | undefined | null): ContextStore => store ?? {}; + +test( + "context propagates to handler", + { skip: skip_if_no_async_local_storage }, + async () => { + const bus = new EventBus("ContextTestBus"); + const captured_values: ContextStore = {}; + const storage = require_async_local_storage(); + + bus.on(SimpleEvent, () => { + const store = storage.getStore() as ContextStore | undefined; + captured_values.request_id = store?.request_id; + captured_values.user_id = store?.user_id; + }); + + await storage.run( + { request_id: "req-12345", user_id: "user-abc" }, + async () => { + const event = bus.dispatch(SimpleEvent({})); + await event.done(); + } + ); + + assert.equal(captured_values.request_id, "req-12345"); + assert.equal(captured_values.user_id, "user-abc"); + } +); + +test( + "context propagates through nested handlers", + { skip: skip_if_no_async_local_storage }, + async () => { + const bus = new EventBus("NestedContextBus"); + const captured_parent: ContextStore = {}; + const captured_child: ContextStore = {}; + const storage = require_async_local_storage(); + + bus.on(SimpleEvent, async (event) => { + const store = storage.getStore() as ContextStore | undefined; + captured_parent.request_id = store?.request_id; + captured_parent.trace_id = store?.trace_id; + + const child = event.bus?.dispatch(ChildEvent({})); + if (child) { + await child.done(); + } + }); + + bus.on(ChildEvent, () => { + const store = storage.getStore() as ContextStore | undefined; + captured_child.request_id = store?.request_id; + captured_child.trace_id = store?.trace_id; + }); + + await storage.run( + { request_id: "req-nested-123", trace_id: "trace-xyz" }, + async () => { + const event = bus.dispatch(SimpleEvent({})); + await event.done(); + } + ); + + assert.equal(captured_parent.request_id, "req-nested-123"); + assert.equal(captured_parent.trace_id, "trace-xyz"); + assert.equal(captured_child.request_id, "req-nested-123"); + assert.equal(captured_child.trace_id, "trace-xyz"); + } +); + +test( + "context isolation between dispatches", + { skip: skip_if_no_async_local_storage }, + async () => { + const bus = new EventBus("IsolationTestBus"); + const captured_values: string[] = []; + const storage = require_async_local_storage(); + + bus.on(SimpleEvent, async () => { + const store = storage.getStore() as ContextStore | undefined; + captured_values.push(store?.request_id ?? ""); + }); + + const event_a = storage.run({ request_id: "req-A" }, () => bus.dispatch(SimpleEvent({}))); + const event_b = storage.run({ request_id: "req-B" }, () => bus.dispatch(SimpleEvent({}))); + + await event_a.done(); + await event_b.done(); + + assert.ok(captured_values.includes("req-A")); + assert.ok(captured_values.includes("req-B")); + } +); + +test( + "context propagates to multiple handlers", + { skip: skip_if_no_async_local_storage }, + async () => { + const bus = new EventBus("ParallelContextBus"); + const captured_values: string[] = []; + const storage = require_async_local_storage(); + + bus.on(SimpleEvent, () => { + const store = storage.getStore() as ContextStore | undefined; + captured_values.push(`h1:${store?.request_id ?? ""}`); + }); + + bus.on(SimpleEvent, () => { + const store = storage.getStore() as ContextStore | undefined; + captured_values.push(`h2:${store?.request_id ?? ""}`); + }); + + await storage.run({ request_id: "req-parallel" }, async () => { + const event = bus.dispatch(SimpleEvent({})); + await event.done(); + }); + + assert.ok(captured_values.includes("h1:req-parallel")); + assert.ok(captured_values.includes("h2:req-parallel")); + } +); + +test( + "context propagates through event forwarding", + { skip: skip_if_no_async_local_storage }, + async () => { + const bus_a = new EventBus("BusA"); + const bus_b = new EventBus("BusB"); + const captured_bus_a: ContextStore = {}; + const captured_bus_b: ContextStore = {}; + const storage = require_async_local_storage(); + + bus_a.on(SimpleEvent, () => { + const store = storage.getStore() as ContextStore | undefined; + captured_bus_a.request_id = store?.request_id; + }); + + bus_b.on(SimpleEvent, () => { + const store = storage.getStore() as ContextStore | undefined; + captured_bus_b.request_id = store?.request_id; + }); + + bus_a.on("*", bus_b.dispatch); + + await storage.run({ request_id: "req-forwarded" }, async () => { + const event = bus_a.dispatch(SimpleEvent({})); + await event.done(); + await bus_b.waitUntilIdle(); + }); + + assert.equal(captured_bus_a.request_id, "req-forwarded"); + assert.equal(captured_bus_b.request_id, "req-forwarded"); + } +); + +test( + "handler can modify context without affecting parent", + { skip: skip_if_no_async_local_storage }, + async () => { + const bus = new EventBus("ModifyContextBus"); + const storage = require_async_local_storage(); + let parent_value_after_child = ""; + + bus.on(SimpleEvent, async (event) => { + if (!storage.enterWith) { + throw new Error("AsyncLocalStorage.enterWith is required for this test"); + } + storage.enterWith({ request_id: "parent-value" }); + const child = event.bus?.dispatch(ChildEvent({})); + if (child) { + await child.done(); + } + const store = get_store(storage.getStore() as ContextStore | undefined); + parent_value_after_child = store.request_id ?? ""; + }); + + bus.on(ChildEvent, () => { + if (!storage.enterWith) { + throw new Error("AsyncLocalStorage.enterWith is required for this test"); + } + storage.enterWith({ request_id: "child-modified" }); + }); + + await storage.run({}, async () => { + const event = bus.dispatch(SimpleEvent({})); + await event.done(); + }); + + assert.equal(parent_value_after_child, "parent-value"); + } +); + +test( + "event parent_id tracking still works with context propagation", + { skip: skip_if_no_async_local_storage }, + async () => { + const bus = new EventBus("ParentIdTrackingBus"); + const storage = require_async_local_storage(); + let parent_event_id: string | undefined; + let child_event_parent_id: string | undefined; + + bus.on(SimpleEvent, async (event) => { + parent_event_id = event.event_id; + const child = event.bus?.dispatch(ChildEvent({})); + if (child) { + await child.done(); + } + }); + + bus.on(ChildEvent, (event) => { + child_event_parent_id = event.event_parent_id; + }); + + await storage.run({ request_id: "req-parent-tracking" }, async () => { + const event = bus.dispatch(SimpleEvent({})); + await event.done(); + }); + + assert.ok(parent_event_id); + assert.ok(child_event_parent_id); + assert.equal(child_event_parent_id, parent_event_id); + } +); + +test( + "dispatch context and parent_id both work together", + { skip: skip_if_no_async_local_storage }, + async () => { + const bus = new EventBus("CombinedContextBus"); + const storage = require_async_local_storage(); + const results: Record = {}; + + bus.on(SimpleEvent, async (event) => { + const store = storage.getStore() as ContextStore | undefined; + results.parent_request_id = store?.request_id; + results.parent_event_id = event.event_id; + const child = event.bus?.dispatch(ChildEvent({})); + if (child) { + await child.done(); + } + }); + + bus.on(ChildEvent, (event) => { + const store = storage.getStore() as ContextStore | undefined; + results.child_request_id = store?.request_id; + results.child_event_parent_id = event.event_parent_id; + }); + + await storage.run({ request_id: "req-combined-test" }, async () => { + const event = bus.dispatch(SimpleEvent({})); + await event.done(); + }); + + assert.equal(results.parent_request_id, "req-combined-test"); + assert.equal(results.child_request_id, "req-combined-test"); + assert.equal(results.child_event_parent_id, results.parent_event_id); + } +); + +test( + "deeply nested context and parent tracking", + { skip: skip_if_no_async_local_storage }, + async () => { + const bus = new EventBus("DeepNestingBus"); + const storage = require_async_local_storage(); + const results: Array<{ + level: number; + request_id?: string; + event_id: string; + parent_id?: string; + }> = []; + + const Level2Event = BaseEvent.extend("Level2Event", {}); + const Level3Event = BaseEvent.extend("Level3Event", {}); + + bus.on(SimpleEvent, async (event) => { + const store = storage.getStore() as ContextStore | undefined; + results.push({ + level: 1, + request_id: store?.request_id, + event_id: event.event_id, + parent_id: event.event_parent_id + }); + const child = event.bus?.dispatch(Level2Event({})); + if (child) { + await child.done(); + } + }); + + bus.on(Level2Event, async (event) => { + const store = storage.getStore() as ContextStore | undefined; + results.push({ + level: 2, + request_id: store?.request_id, + event_id: event.event_id, + parent_id: event.event_parent_id + }); + const child = event.bus?.dispatch(Level3Event({})); + if (child) { + await child.done(); + } + }); + + bus.on(Level3Event, (event) => { + const store = storage.getStore() as ContextStore | undefined; + results.push({ + level: 3, + request_id: store?.request_id, + event_id: event.event_id, + parent_id: event.event_parent_id + }); + }); + + await storage.run({ request_id: "req-deep-nesting" }, async () => { + const event = bus.dispatch(SimpleEvent({})); + await event.done(); + }); + + assert.equal(results.length, 3); + for (const result of results) { + assert.equal(result.request_id, "req-deep-nesting"); + } + assert.equal(results[0].parent_id, undefined); + assert.equal(results[1].parent_id, results[0].event_id); + assert.equal(results[2].parent_id, results[1].event_id); + } +); From 40a248e421466e688eea2d5e8dee9b6e8484c6bc Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Thu, 5 Feb 2026 16:15:50 -0800 Subject: [PATCH 45/79] fix tests and improve naming consistency --- bubus-ts/README.md | 174 ++- bubus-ts/examples/log_tree_demo.ts | 34 +- bubus-ts/src/async_context.ts | 6 +- bubus-ts/src/base_event.ts | 210 ++-- bubus-ts/src/event_bus.ts | 859 +++++++++---- bubus-ts/src/index.ts | 4 +- bubus-ts/src/semaphores.ts | 82 ++ bubus-ts/src/types.ts | 11 +- bubus-ts/tests/comprehensive_patterns.test.ts | 25 +- bubus-ts/tests/context_propagation.test.ts | 4 +- bubus-ts/tests/debounce.test.ts | 83 ++ bubus-ts/tests/event_results.test.ts | 19 +- bubus-ts/tests/find.test.ts | 452 +++++++ bubus-ts/tests/handlers.test.ts | 152 +++ bubus-ts/tests/locking.test.ts | 1063 +++++++++++++++++ bubus-ts/tests/timeout.test.ts | 475 +++++++- bubus-ts/tests/typed_results.test.ts | 195 +++ 17 files changed, 3486 insertions(+), 362 deletions(-) create mode 100644 bubus-ts/src/semaphores.ts create mode 100644 bubus-ts/tests/handlers.test.ts create mode 100644 bubus-ts/tests/locking.test.ts create mode 100644 bubus-ts/tests/typed_results.test.ts diff --git a/bubus-ts/README.md b/bubus-ts/README.md index bf1f97a..b967cc2 100644 --- a/bubus-ts/README.md +++ b/bubus-ts/README.md @@ -14,7 +14,7 @@ gotchas we uncovered while matching behavior. It intentionally does **not** re-d ### 2) Cross-bus queue jump (forwarding) - Python uses a global re-entrant lock to let awaited events process immediately on every bus where they appear. - TS does **not** use AsyncLocalStorage or a global lock (browser support). -- Instead, `EventBus.instances` + `run_now_depth` pauses each runloop and processes the same event immediately across buses. +- Instead, `EventBus.instances` + `immediate_processing_stack_depth` pauses each runloop and processes the same event immediately across buses. ### 3) `event.bus` is a BusScopedEvent view - In Python, `event.event_bus` is dynamic (contextvars). @@ -28,22 +28,182 @@ gotchas we uncovered while matching behavior. It intentionally does **not** re-d ### 5) No middleware, no WAL, no SQLite mirrors - Those Python features were intentionally dropped for the JS version. +### 6) Default timeouts come from the EventBus +- `BaseEvent.event_timeout` defaults to `null`. +- When dispatched, `EventBus` applies its default `event_timeout` (60s unless configured). +- You can set `{ event_timeout: null }` on the bus to disable timeouts entirely. +- Handlers that exceed 15s emit a warning (deadlock detection signal); the event still continues unless a timeout is hit. + +## EventBus Options +All options are passed to `new EventBus(name, options)`. + +- `max_history_size?: number | null` (default: `100`) + - Max number of events kept in history. Set to `null` for unlimited history. +- `event_concurrency?: "global-serial" | "bus-serial" | "parallel" | "auto"` (default: `"bus-serial"`) + - Controls how many **events** can be processed at a time. + - `"global-serial"` enforces FIFO across all buses. + - `"bus-serial"` enforces FIFO per bus, allows cross-bus overlap. + - `"parallel"` allows events to process concurrently. + - `"auto"` uses the bus default (mostly useful for overrides). +- `handler_concurrency?: "global-serial" | "bus-serial" | "parallel" | "auto"` (default: `"bus-serial"`) + - Controls how many **handlers** run at once for each event. + - Same semantics as `event_concurrency`, but applied to handler execution. +- `event_timeout?: number | null` (default: `60`) + - Default handler timeout in seconds, applied when `event.event_timeout` is `null`. + - Set to `null` to disable timeouts globally for the bus. + +## Concurrency Overrides and Precedence + +You can override concurrency per event and per handler: + +```ts +const FastEvent = BaseEvent.extend("FastEvent", { + payload: z.string() +}); + +// Per-event override (highest precedence) +const event = FastEvent({ + payload: "x", + event_concurrency: "parallel", + handler_concurrency: "parallel" +}); + +// Per-handler override (lower precedence) +bus.on(FastEvent, handler, { handler_concurrency: "parallel" }); +``` + +Precedence order (highest β†’ lowest): +1. Event instance overrides (`event_concurrency`, `handler_concurrency`) +2. Handler options (`handler_concurrency`) +3. Bus defaults (`event_concurrency`, `handler_concurrency`) + +`"auto"` resolves to the bus default. + +## Handler Options + +Handlers can be configured with `HandlerOptions`: + +```ts +bus.on(SomeEvent, handler, { + order: -10, // serial ordering (lower runs earlier) + handler_concurrency: "parallel" +}); +``` + +- `order: number` runs handlers in ascending order (serial). +- `order: null` puts the handler into the parallel bucket. +- `handler_concurrency` allows per-handler overrides. + +If an event sets `handler_concurrency: "parallel"`, that wins even if a handler is ordered. + +## Limiters (how concurrency is enforced) + +We use four limiters: + +- `EventBus.global_event_limiter` +- `EventBus.global_handler_limiter` +- `bus.bus_event_limiter` +- `bus.bus_handler_limiter` + +They are applied centrally when scheduling events and handlers, so concurrency is controlled without scattering +mutex checks throughout the code. + +## Full lifecycle across concurrency modes + +Below is the complete execution flow for nested events, including forwarding across buses, and how it behaves +under different `event_concurrency` / `handler_concurrency` configurations. + +### 1) Base execution flow (applies to all modes) + +**Dispatch (non-awaited):** +1. `dispatch()` normalizes to `original_event`, sets `bus` if missing. +2. Captures `_dispatch_context` (AsyncLocalStorage if available). +3. Applies `event_timeout_default` if `event.event_timeout === null`. +4. If this bus is already in `event_path` (or `eventHasVisited()`), return a BusScopedEvent without queueing. +5. Append bus name to `event_path`, record child relationship (if `event_parent_id` is set). +6. Add to `event_history` + `event_history_by_id`. +7. Increment `event_pending_buses`. +8. Push to `pending_event_queue` and `startRunloop()`. + +**Runloop + processing:** +1. `runloop()` drains `pending_event_queue`. +2. Adds event id to `in_flight_event_ids`. +3. Calls `scheduleEventProcessing()` (async). +4. `scheduleEventProcessing()` selects the event limiter and runs `processEvent()`. +5. `processEvent()`: + - `event.markStarted()` + - `notifyFinders(event)` + - creates handler results (`event_results`) + - runs handlers (respecting handler limiter) + - decrements `event_pending_buses` and calls `event.tryFinalizeCompletion()` + +### 2) Event concurrency modes (`event_concurrency`) + +- **`global-serial`**: events are serialized across *all* buses using the global event limiter. +- **`bus-serial`**: events are serialized per bus; different buses can overlap. +- **`parallel`**: no event limiter; events can run concurrently on the same bus. +- **`auto`**: resolves to the bus default. + +**Mixed buses:** each bus enforces its own event mode. Forwarding to another bus does not inherit the source bus’s mode. + +### 3) Handler concurrency modes (`handler_concurrency`) + +`handler_concurrency` controls how handlers run **for a single event**: + +- **`global-serial`**: only one handler at a time across all buses using the global handler limiter. +- **`bus-serial`**: handlers serialize per bus. +- **`parallel`**: handlers run concurrently for the event. +- **`auto`**: resolves to the bus default. + +**Interaction with event concurrency:** +Even if events are parallel, handlers can still be serialized: +`event_concurrency: "parallel"` + `handler_concurrency: "bus-serial"` means events start concurrently but handler execution on a bus is serialized. + +### 4) Forwarding across buses (non-awaited) + +When a handler on Bus A calls `bus_b.dispatch(event)` without awaiting: +- Bus A continues running its handler. +- Bus B queues and processes the event according to **Bus B’s** concurrency settings. +- No coupling unless both buses use the global limiters. + +### 5) Queue-jump (`await event.done()` inside handlers) + +When `event.done()` is awaited inside a handler, **queue-jump** happens: + +1. `BaseEvent.done()` detects it’s inside a handler and calls `_runImmediately()`. +2. `_runImmediately()` removes the event from the pending queue (if present). +3. `runImmediatelyAcrossBuses()` processes the event immediately on all buses where it is queued. +4. While immediate processing is active, each affected bus increments `immediate_processing_stack_depth`, + and its `runloop()` pauses to prevent unrelated events from running. +5. Once immediate processing completes, `immediate_processing_waiters` resume the paused runloops. + +**Important:** queue-jump bypasses all event and handler limiters to guarantee correctness and FIFO semantics. + +### 6) Precedence recap + +Highest β†’ lowest: +1. Event instance fields (`event_concurrency`, `handler_concurrency`) +2. Handler options (`handler_concurrency`) +3. Bus defaults + +`"auto"` always resolves to the bus default. + ## Gotchas and Design Choices (What surprised us) -### A) Why we keep a handler stack (context without AsyncLocalStorage) -We need to know **which handler is currently executing** to correctly assign: +### A) Handler attribution without AsyncLocalStorage +We need to know **which handler emitted a child** to correctly assign: - `event_parent_id` - `event_emitted_by_handler_id` - and to attach child events under the correct handler in the tree. -Looking at `EventResult.status` alone is not enough because multiple handlers can be `started` at the same time -(nested awaits). The stack gives us deterministic, correct parentage without AsyncLocalStorage. +In TS we do this by injecting a **BusScopedEvent** into handlers, which captures the active handler id and +propagates it via `event_emitted_by_handler_id`. This keeps parentage deterministic even with nested awaits. -### B) Why `run_now_depth` exists +### B) Why `immediate_processing_stack_depth` exists When an event is awaited inside a handler, the event must **jump the queue**. If the runloop continues normally, it could process unrelated events ("overshoot"), breaking FIFO guarantees. -`run_now_depth` pauses the runloop while we run the awaited event immediately. Once the queue-jump completes, +`immediate_processing_stack_depth` pauses the runloop while we run the awaited event immediately. Once the queue-jump completes, the runloop resumes in FIFO order. This matches the Python behavior. ### C) BusScopedEvent: why it exists and how it works diff --git a/bubus-ts/examples/log_tree_demo.ts b/bubus-ts/examples/log_tree_demo.ts index 95cc845..192346a 100644 --- a/bubus-ts/examples/log_tree_demo.ts +++ b/bubus-ts/examples/log_tree_demo.ts @@ -2,23 +2,23 @@ import { z } from "zod"; import { BaseEvent, EventBus } from "../src/index.js"; -const RootEvent = BaseEvent.extend( - "RootEvent", - { url: z.string() }, - { event_result_schema: z.string(), event_result_type: "string" } -); - -const ChildEvent = BaseEvent.extend( - "ChildEvent", - { tab_id: z.string() }, - { event_result_schema: z.string(), event_result_type: "string" } -); - -const GrandchildEvent = BaseEvent.extend( - "GrandchildEvent", - { status: z.string() }, - { event_result_schema: z.string(), event_result_type: "string" } -); +const RootEvent = BaseEvent.extend("RootEvent", { + url: z.string(), + event_result_schema: z.string(), + event_result_type: "string" +}); + +const ChildEvent = BaseEvent.extend("ChildEvent", { + tab_id: z.string(), + event_result_schema: z.string(), + event_result_type: "string" +}); + +const GrandchildEvent = BaseEvent.extend("GrandchildEvent", { + status: z.string(), + event_result_schema: z.string(), + event_result_type: "string" +}); const delay = (ms: number): Promise => new Promise((resolve) => { diff --git a/bubus-ts/src/async_context.ts b/bubus-ts/src/async_context.ts index fc44a3d..0ef59d0 100644 --- a/bubus-ts/src/async_context.ts +++ b/bubus-ts/src/async_context.ts @@ -26,18 +26,18 @@ if (is_node) { } } -export const capture_async_context = (): unknown | null => { +export const captureAsyncContext = (): unknown | null => { if (!async_local_storage) { return null; } return async_local_storage.getStore() ?? null; }; -export const run_with_async_context = (context: unknown | null, fn: () => T): T => { +export const runWithAsyncContext = (context: unknown | null, fn: () => T): T => { if (!async_local_storage) { return fn(); } return async_local_storage.run(context ?? undefined, fn); }; -export const has_async_local_storage = (): boolean => async_local_storage !== null; +export const hasAsyncLocalStorage = (): boolean => async_local_storage !== null; diff --git a/bubus-ts/src/base_event.ts b/bubus-ts/src/base_event.ts index e85ce21..c762999 100644 --- a/bubus-ts/src/base_event.ts +++ b/bubus-ts/src/base_event.ts @@ -3,6 +3,8 @@ import { v7 as uuidv7 } from "uuid"; import type { EventBus } from "./event_bus.js"; import { EventResult } from "./event_result.js"; +import type { ConcurrencyMode } from "./semaphores.js"; +import { CONCURRENCY_MODES } from "./semaphores.js"; export const BaseEventSchema = z @@ -12,14 +14,26 @@ export const BaseEventSchema = z event_type: z.string(), event_timeout: z.number().positive().nullable(), event_parent_id: z.string().uuid().optional(), - event_path: z.array(z.string()).optional() + event_path: z.array(z.string()).optional(), + event_result_type: z.string().optional(), + event_result_schema: z.unknown().optional(), + event_concurrency: z.enum(CONCURRENCY_MODES).optional(), + handler_concurrency: z.enum(CONCURRENCY_MODES).optional() }) .passthrough(); export type BaseEventData = z.infer; type BaseEventFields = Pick< BaseEventData, - "event_id" | "event_created_at" | "event_type" | "event_timeout" | "event_parent_id" + | "event_id" + | "event_created_at" + | "event_type" + | "event_timeout" + | "event_parent_id" + | "event_result_type" + | "event_result_schema" + | "event_concurrency" + | "handler_concurrency" >; export type BaseEventInit> = TFields & @@ -42,11 +56,18 @@ export type EventFactory = { event_type?: string; event_result_schema?: z.ZodTypeAny; event_result_type?: string; + fromJSON?: (data: unknown) => BaseEvent & z.infer>; }; -export type EventExtendOptions = { - event_result_schema?: z.ZodTypeAny; - event_result_type?: string; +type ZodShapeFrom> = { + [K in keyof TShape as K extends + | "event_result_schema" + | "event_result_type" + | "event_result_schema_json" + ? never + : TShape[K] extends z.ZodTypeAny + ? K + : never]: Extract; }; export class BaseEvent { @@ -57,7 +78,6 @@ export class BaseEvent { event_timeout: number | null; event_parent_id?: string; event_path: string[]; - event_processed_path: string[]; event_factory?: Function; event_result_schema?: z.ZodTypeAny; event_result_type?: string; @@ -70,8 +90,9 @@ export class BaseEvent { event_started_at?: string; event_completed_at?: string; event_errors: unknown[]; - event_key_symbol?: symbol; bus?: EventBus; + event_concurrency?: ConcurrencyMode; + handler_concurrency?: ConcurrencyMode; _original_event?: BaseEvent; _dispatch_context?: unknown | null; @@ -89,18 +110,21 @@ export class BaseEvent { event_result_type?: string; }; const event_type = data.event_type ?? ctor.event_type ?? ctor.name; + const event_result_schema = data.event_result_schema ?? ctor.event_result_schema; + const event_result_type = data.event_result_type ?? ctor.event_result_type; const event_id = data.event_id ?? uuidv7(); const event_created_at = data.event_created_at ?? new Date().toISOString(); - const event_timeout = - data.event_timeout === undefined ? BaseEvent.defaultTimeout() : data.event_timeout; + const event_timeout = data.event_timeout ?? null; const base_data = { ...data, event_id, event_created_at, event_type, - event_timeout + event_timeout, + event_result_schema, + event_result_type }; const schema = ctor.schema ?? BaseEventSchema; @@ -111,14 +135,13 @@ export class BaseEvent { this.event_path = Array.isArray((parsed as { event_path?: string[] }).event_path) ? ([...(parsed as { event_path?: string[] }).event_path] as string[]) : []; - this.event_processed_path = []; this.event_pending_buses = 0; this.event_status = "pending"; this.event_created_at_ms = Date.parse(this.event_created_at); this.event_errors = []; this.event_factory = ctor.factory; - this.event_result_schema = ctor.event_result_schema; - this.event_result_type = ctor.event_result_type; + this.event_result_schema = event_result_schema; + this.event_result_type = event_result_type; this.event_results = new Map(); this.event_children = []; @@ -128,10 +151,6 @@ export class BaseEvent { this._dispatch_context = undefined; } - static defaultTimeout(): number { - return 300; - } - static nextIsoTimestamp(): string { const now_ms = Date.now(); const next_ms = Math.max(now_ms, BaseEvent._last_timestamp_ms + 1); @@ -140,24 +159,63 @@ export class BaseEvent { } static extend( - shape: TShape, - options?: EventExtendOptions + shape: TShape ): EventFactory; - static extend( + static extend>( + shape: TShape + ): EventFactory>; + static extend>( event_type: string, - shape: TShape, - options?: EventExtendOptions - ): EventFactory; - static extend( + shape: TShape + ): EventFactory>; + static extend>( arg1: string | TShape, - arg2?: TShape | EventExtendOptions, - arg3?: EventExtendOptions - ): EventFactory { - return extendEvent( - arg1 as string | TShape, - arg2 as TShape | EventExtendOptions | undefined, - arg3 - ); + arg2?: TShape + ): EventFactory> { + const event_type = typeof arg1 === "string" ? arg1 : undefined; + const raw_shape = (typeof arg1 === "string" ? arg2 ?? {} : arg1) as Record< + string, + unknown + >; + + const event_result_schema = is_zod_schema(raw_shape.event_result_schema) + ? (raw_shape.event_result_schema as z.ZodTypeAny) + : undefined; + const event_result_type = + typeof raw_shape.event_result_type === "string" ? raw_shape.event_result_type : undefined; + + const shape = extract_zod_shape(raw_shape); + const full_schema = BaseEventSchema.extend(shape); + + class ExtendedEvent extends BaseEvent { + static schema = full_schema; + static event_type = event_type; + static factory?: Function; + static event_result_schema = event_result_schema; + static event_result_type = event_result_type; + + constructor(data: EventInit>) { + super(data as BaseEventInit>); + } + } + + function EventFactory( + data: EventInit> + ): BaseEvent & z.infer>> { + return new ExtendedEvent(data); + } + + EventFactory.schema = full_schema as EventSchema>; + EventFactory.event_type = event_type; + EventFactory.event_result_schema = event_result_schema; + EventFactory.event_result_type = event_result_type; + EventFactory.fromJSON = (data: unknown) => + ExtendedEvent.fromJSON(data) as BaseEvent & z.infer>>; + EventFactory.prototype = ExtendedEvent.prototype; + (EventFactory as unknown as { class: typeof ExtendedEvent }).class = ExtendedEvent; + (ExtendedEvent as unknown as { factory?: Function }).factory = EventFactory; + + return EventFactory as EventFactory>; } static parse(this: T, data: unknown): InstanceType { @@ -166,6 +224,20 @@ export class BaseEvent { return new this(parsed) as InstanceType; } + static fromJSON(this: T, data: unknown): InstanceType { + if (!data || typeof data !== "object") { + return this.parse(data); + } + const record = { ...(data as Record) }; + if (record.event_result_schema && !is_zod_schema(record.event_result_schema)) { + const zod_any = z as unknown as { fromJSONSchema?: (schema: unknown) => z.ZodTypeAny }; + if (typeof zod_any.fromJSONSchema === "function") { + record.event_result_schema = zod_any.fromJSONSchema(record.event_result_schema); + } + } + return new this(record as BaseEventInit>) as InstanceType; + } + toJSON(): BaseEventData { return { event_id: this.event_id, @@ -173,7 +245,13 @@ export class BaseEvent { event_type: this.event_type, event_timeout: this.event_timeout, event_parent_id: this.event_parent_id, - event_path: this.event_path + event_path: this.event_path, + event_result_type: this.event_result_type, + event_concurrency: this.event_concurrency, + handler_concurrency: this.handler_concurrency, + event_result_schema: this.event_result_schema + ? to_json_schema(this.event_result_schema) + : this.event_result_schema }; } @@ -275,48 +353,32 @@ export class BaseEvent { } } -export function extendEvent( - shape: TShape -): EventFactory; -export function extendEvent( - event_type: string, - shape: TShape, - options?: EventExtendOptions -): EventFactory; -export function extendEvent( - arg1: string | TShape, - arg2?: TShape | EventExtendOptions, - arg3?: EventExtendOptions -): EventFactory { - const event_type = typeof arg1 === "string" ? arg1 : undefined; - const shape = (typeof arg1 === "string" ? arg2 : arg1) as TShape; - const options = (typeof arg1 === "string" ? arg3 : arg2) as EventExtendOptions | undefined; - - const full_schema = BaseEventSchema.extend(shape); - - class ExtendedEvent extends BaseEvent { - static schema = full_schema; - static event_type = event_type; - static factory?: Function; - static event_result_schema = options?.event_result_schema; - static event_result_type = options?.event_result_type; - - constructor(data: EventInit) { - super(data as BaseEventInit>); +const is_zod_schema = (value: unknown): value is z.ZodTypeAny => + !!value && typeof (value as z.ZodTypeAny).safeParse === "function"; + +const extract_zod_shape = (raw: Record): z.ZodRawShape => { + const shape: z.ZodRawShape = {}; + for (const [key, value] of Object.entries(raw)) { + if (key === "event_result_schema" || key === "event_result_type") { + continue; + } + if (is_zod_schema(value)) { + shape[key] = value; } } + return shape; +}; - function EventFactory(data: EventInit): BaseEvent & z.infer> { - return new ExtendedEvent(data); +const to_json_schema = (schema: unknown): unknown => { + if (!schema) { + return schema; } - - EventFactory.schema = full_schema; - EventFactory.event_type = event_type; - EventFactory.event_result_schema = options?.event_result_schema; - EventFactory.event_result_type = options?.event_result_type; - EventFactory.prototype = ExtendedEvent.prototype; - (EventFactory as unknown as { class: typeof ExtendedEvent }).class = ExtendedEvent; - (ExtendedEvent as unknown as { factory?: Function }).factory = EventFactory; - - return EventFactory as EventFactory; -} + if (!is_zod_schema(schema)) { + return schema; + } + const zod_any = z as unknown as { toJSONSchema?: (schema: z.ZodTypeAny) => unknown }; + if (typeof zod_any.toJSONSchema === "function") { + return zod_any.toJSONSchema(schema); + } + return undefined; +}; diff --git a/bubus-ts/src/event_bus.ts b/bubus-ts/src/event_bus.ts index 0728779..a996644 100644 --- a/bubus-ts/src/event_bus.ts +++ b/bubus-ts/src/event_bus.ts @@ -1,7 +1,14 @@ import { BaseEvent } from "./base_event.js"; import { EventResult } from "./event_result.js"; -import { capture_async_context, run_with_async_context } from "./async_context.js"; -import { v7 as uuidv7 } from "uuid"; +import { captureAsyncContext, runWithAsyncContext } from "./async_context.js"; +import { v5 as uuidv5, v7 as uuidv7 } from "uuid"; +import { + AsyncLimiter, + type ConcurrencyMode, + limiterForMode, + resolveConcurrencyMode, + runWithLimiter +} from "./semaphores.js"; export class EventHandlerTimeoutError extends Error { @@ -38,7 +45,7 @@ export class EventHandlerCancelledError extends Error { } } -const with_resolvers = () => { +const withResolvers = () => { if (typeof Promise.withResolvers === "function") { return Promise.withResolvers(); } @@ -52,12 +59,7 @@ const with_resolvers = () => { return { promise, resolve, reject }; }; -import type { - EventClass, - EventHandler, - EventKey, - FindOptions -} from "./types.js"; +import type { EventHandler, EventKey, FindOptions, HandlerOptions } from "./types.js"; type FindWaiter = { event_key: EventKey; @@ -66,12 +68,30 @@ type FindWaiter = { timeout_id?: ReturnType; }; +type HandlerEntry = { + id: string; + handler: EventHandler; + handler_name: string; + handler_file_path?: string; + handler_registered_at: string; + options?: HandlerOptions; + event_key: string | "*"; +}; + +const HANDLER_ID_NAMESPACE = uuidv5("bubus-handler", uuidv5.DNS); + type EventBusOptions = { max_history_size?: number | null; + event_concurrency?: ConcurrencyMode; + handler_concurrency?: ConcurrencyMode; + event_timeout?: number | null; }; export class EventBus { static instances: Set = new Set(); + static global_event_limiter = new AsyncLimiter(1); + static global_handler_limiter = new AsyncLimiter(1); + static global_inside_handler_depth = 0; static findEventById(event_id: string): BaseEvent | null { for (const bus of EventBus.instances) { const event = bus.event_history_by_id.get(event_id); @@ -84,37 +104,55 @@ export class EventBus { name: string; max_history_size: number | null; - handlers_by_key: Map>; + event_concurrency_default: ConcurrencyMode; + handler_concurrency_default: ConcurrencyMode; + event_timeout_default: number | null; + bus_event_limiter: AsyncLimiter; + bus_handler_limiter: AsyncLimiter; + handlers_by_key: Map>; + handlers_by_id: Map; event_history: BaseEvent[]; event_history_by_id: Map; - pending_queue: BaseEvent[]; - is_running: boolean; + pending_event_queue: BaseEvent[]; + in_flight_event_ids: Set; + runloop_running: boolean; + // Resolves for callers of waitUntilIdle(); only drained when idle is confirmed twice. idle_waiters: Array<() => void>; + // True while an idle check timeout is scheduled. + idle_check_pending: boolean; + // Number of consecutive idle snapshots seen; must reach 2 to resolve waiters. + idle_check_streak: number; + // Pending find() callers waiting for a matching future event. find_waiters: Set; - handler_stack: EventResult[]; - handler_file_paths: Map; - handler_ids: Map; - run_now_depth: number; - run_now_waiters: Array<() => void>; - inside_handler_depth: number; + // Depth counter for "immediate processing" (queue-jump) inside handlers. + // While > 0, the runloop pauses to avoid processing unrelated events. + immediate_processing_stack_depth: number; + // Runloop waiters that resume once immediate_processing_stack_depth returns to 0. + immediate_processing_waiters: Array<() => void>; constructor(name: string = "EventBus", options: EventBusOptions = {}) { this.name = name; this.max_history_size = options.max_history_size === undefined ? 100 : options.max_history_size; + this.event_concurrency_default = options.event_concurrency ?? "bus-serial"; + this.handler_concurrency_default = options.handler_concurrency ?? "bus-serial"; + this.event_timeout_default = + options.event_timeout === undefined ? 60 : options.event_timeout; + this.bus_event_limiter = new AsyncLimiter(1); + this.bus_handler_limiter = new AsyncLimiter(1); this.handlers_by_key = new Map(); + this.handlers_by_id = new Map(); this.event_history = []; this.event_history_by_id = new Map(); - this.pending_queue = []; - this.is_running = false; + this.pending_event_queue = []; + this.in_flight_event_ids = new Set(); + this.runloop_running = false; this.idle_waiters = []; + this.idle_check_pending = false; + this.idle_check_streak = 0; this.find_waiters = new Set(); - this.handler_stack = []; - this.handler_file_paths = new Map(); - this.handler_ids = new Map(); - this.run_now_depth = 0; - this.run_now_waiters = []; - this.inside_handler_depth = 0; + this.immediate_processing_stack_depth = 0; + this.immediate_processing_waiters = []; EventBus.instances.add(this); @@ -122,35 +160,71 @@ export class EventBus { this.emit = this.emit.bind(this); } - on(event_key: EventKey | "*", handler: EventHandler): void { - const handler_set = this.handlers_by_key.get(event_key) ?? new Set(); - handler_set.add(handler as EventHandler); - this.handlers_by_key.set(event_key, handler_set); + on( + event_key: EventKey | "*", + handler: EventHandler, + options: HandlerOptions = {} + ): void { + const normalized_key = this.normalizeEventKey(event_key); + const handler_name = handler.name || "anonymous"; + const handler_file_path = this.inferHandlerFilePath() ?? undefined; + const handler_registered_at = BaseEvent.nextIsoTimestamp(); + const handler_id = this.computeHandlerId( + normalized_key, + handler_name, + handler_file_path, + handler_registered_at + ); - if (!this.handler_file_paths.has(handler as EventHandler)) { - const file_path = this.inferHandlerFilePath(); - if (file_path) { - this.handler_file_paths.set(handler as EventHandler, file_path); - } - } + let handler_ids = this.handlers_by_key.get(normalized_key); + if (!handler_ids) { + handler_ids = new Set(); + this.handlers_by_key.set(normalized_key, handler_ids); + } + handler_ids.add(handler_id); + + this.handlers_by_id.set(handler_id, { + id: handler_id, + handler: handler as EventHandler, + handler_name, + handler_file_path, + handler_registered_at, + options: Object.keys(options).length > 0 ? options : undefined, + event_key: normalized_key + }); } off(event_key: EventKey | "*", handler: EventHandler): void { - const handler_set = this.handlers_by_key.get(event_key); - if (!handler_set) { + const normalized_key = this.normalizeEventKey(event_key); + const handler_ids = this.handlers_by_key.get(normalized_key); + if (!handler_ids || handler_ids.size === 0) { return; } - handler_set.delete(handler as EventHandler); + for (const handler_id of Array.from(handler_ids)) { + const entry = this.handlers_by_id.get(handler_id); + if (!entry) { + handler_ids.delete(handler_id); + continue; + } + if (entry.handler === (handler as EventHandler)) { + handler_ids.delete(handler_id); + this.handlers_by_id.delete(handler_id); + } + } + if (handler_ids.size === 0) { + this.handlers_by_key.delete(normalized_key); + } } - private getHandlerId(handler: EventHandler): string { - const existing = this.handler_ids.get(handler); - if (existing) { - return existing; - } - const handler_id = uuidv7(); - this.handler_ids.set(handler, handler_id); - return handler_id; + private computeHandlerId( + event_key: string | "*", + handler_name: string, + handler_file_path: string | undefined, + handler_registered_at: string + ): string { + const file_path = handler_file_path ?? "unknown"; + const seed = `${this.name}|${event_key}|${handler_name}|${file_path}|${handler_registered_at}`; + return uuidv5(seed, HANDLER_ID_NAMESPACE); } dispatch(event: T, event_key?: EventKey): T { @@ -162,13 +236,13 @@ export class EventBus { original_event.event_path = []; } if (original_event._dispatch_context === undefined) { - original_event._dispatch_context = capture_async_context(); + original_event._dispatch_context = captureAsyncContext(); } - - if (typeof event_key === "symbol") { - original_event.event_key_symbol = event_key; + if (original_event.event_timeout === null) { + original_event.event_timeout = this.event_timeout_default; } + if (original_event.event_path.includes(this.name) || this.eventHasVisited(original_event)) { return this._getBusScopedEvent(original_event) as T; } @@ -177,16 +251,14 @@ export class EventBus { original_event.event_path.push(this.name); } - const current_handler = this.handler_stack[this.handler_stack.length - 1]; - if (current_handler) { - const parent_event = this.event_history_by_id.get(current_handler.event_id); + if (original_event.event_parent_id) { + const parent_event = this.event_history_by_id.get(original_event.event_parent_id); if (parent_event) { - if (!original_event.event_parent_id) { - original_event.event_parent_id = parent_event.event_id; - } - if (original_event.event_parent_id === parent_event.event_id) { - this.recordChildEvent(parent_event.event_id, original_event); - } + this.recordChildEvent( + parent_event.event_id, + original_event, + original_event.event_emitted_by_handler_id + ); } } @@ -194,10 +266,8 @@ export class EventBus { this.event_history_by_id.set(original_event.event_id, original_event); this.trimHistory(); - this.createPendingHandlerResults(original_event); - original_event.event_pending_buses += 1; - this.pending_queue.push(original_event); + this.pending_event_queue.push(original_event); this.startRunloop(); return this._getBusScopedEvent(original_event) as T; @@ -250,21 +320,27 @@ export class EventBus { return true; }; - if (past !== false) { + if (past !== false || future !== false) { const now_ms = Date.now(); const cutoff_ms = past === true ? null : now_ms - Math.max(0, Number(past)) * 1000; for (let i = this.event_history.length - 1; i >= 0; i -= 1) { const event = this.event_history[i]; - if (event.event_status !== "completed") { + if (!matches(event)) { continue; } - if (cutoff_ms !== null && event.event_created_at_ms < cutoff_ms) { - continue; + if (event.event_status === "completed") { + if (past === false) { + continue; + } + if (cutoff_ms !== null && event.event_created_at_ms < cutoff_ms) { + continue; + } + return this._getBusScopedEvent(event) as T; } - if (matches(event)) { - return event as T; + if (future !== false) { + return this._getBusScopedEvent(event) as T; } } } @@ -277,7 +353,7 @@ export class EventBus { const waiter: FindWaiter = { event_key, matches, - resolve: (event) => resolve(event as T) + resolve: (event) => resolve(this._getBusScopedEvent(event) as T) }; if (future !== true) { @@ -302,9 +378,9 @@ export class EventBus { return event; } - const index = this.pending_queue.indexOf(original_event); + const index = this.pending_event_queue.indexOf(original_event); if (index >= 0) { - this.pending_queue.splice(index, 1); + this.pending_event_queue.splice(index, 1); } await this.runImmediatelyAcrossBuses(original_event); @@ -312,14 +388,72 @@ export class EventBus { } async waitUntilIdle(): Promise { - if (!this.is_running && this.pending_queue.length === 0) { + if (this.isIdleSnapshot()) { return; } return new Promise((resolve) => { this.idle_waiters.push(resolve); + this.scheduleIdleCheck(); }); } + private scheduleIdleCheck(): void { + if (this.idle_check_pending) { + return; + } + this.idle_check_pending = true; + setTimeout(() => { + this.idle_check_pending = false; + this.resolveIdleWaitersIfDone(); + }, 0); + } + + private isIdleSnapshot(): boolean { + return ( + this.pending_event_queue.length === 0 && + this.in_flight_event_ids.size === 0 && + !this.hasPendingResults() && + !this.runloop_running + ); + } + + private resolveIdleWaitersIfDone(): void { + if (!this.isIdleSnapshot()) { + this.idle_check_streak = 0; + if (this.idle_waiters.length > 0) { + this.scheduleIdleCheck(); + } + return; + } + this.idle_check_streak += 1; + if (this.idle_check_streak < 2) { + if (this.idle_waiters.length > 0) { + this.scheduleIdleCheck(); + } + return; + } + this.idle_check_streak = 0; + const idle_waiters = this.idle_waiters; + this.idle_waiters = []; + for (const resolve of idle_waiters) { + resolve(); + } + } + + private hasPendingResults(): boolean { + for (const event of this.event_history) { + for (const result of event.event_results.values()) { + if (result.eventbus_name !== this.name) { + continue; + } + if (result.status === "pending") { + return true; + } + } + } + return false; + } + eventIsChildOf(event: BaseEvent, ancestor: BaseEvent): boolean { if (event.event_id === ancestor.event_id) { return false; @@ -343,7 +477,11 @@ export class EventBus { return this.eventIsChildOf(descendant, event); } - recordChildEvent(parent_event_id: string, child_event: BaseEvent): void { + recordChildEvent( + parent_event_id: string, + child_event: BaseEvent, + handler_id?: string + ): void { const original_child = child_event._original_event ?? child_event; const parent_event = this.event_history_by_id.get(parent_event_id); if (parent_event) { @@ -352,12 +490,16 @@ export class EventBus { } } - const current_result = this.handler_stack[this.handler_stack.length - 1]; - if (current_result) { - if (!current_result.event_children.some((child) => child.event_id === original_child.event_id)) { - current_result.event_children.push(original_child); + const target_handler_id = + handler_id ?? original_child.event_emitted_by_handler_id ?? undefined; + if (target_handler_id) { + const current_result = parent_event?.event_results.get(target_handler_id); + if (current_result) { + if (!current_result.event_children.some((child) => child.event_id === original_child.event_id)) { + current_result.event_children.push(original_child); + } } - original_child.event_emitted_by_handler_id = current_result.handler_id; + original_child.event_emitted_by_handler_id = target_handler_id; } } @@ -419,7 +561,7 @@ export class EventBus { } isInsideHandler(): boolean { - return this.inside_handler_depth > 0; + return EventBus.global_inside_handler_depth > 0; } private async runImmediatelyAcrossBuses(event: BaseEvent): Promise { @@ -430,18 +572,26 @@ export class EventBus { } for (const bus of buses) { - bus.run_now_depth += 1; + bus.immediate_processing_stack_depth += 1; } try { for (const bus of buses) { - const index = bus.pending_queue.indexOf(event); + const index = bus.pending_event_queue.indexOf(event); if (index >= 0) { - bus.pending_queue.splice(index, 1); + bus.pending_event_queue.splice(index, 1); } - if (!event.event_processed_path.includes(bus.name)) { - await bus.processEvent(event); + if (bus.eventHasVisited(event)) { + continue; } + if (bus.in_flight_event_ids.has(event.event_id)) { + continue; + } + bus.in_flight_event_ids.add(event.event_id); + await bus.scheduleEventProcessing(event, { + bypass_event_limiters: true, + bypass_handler_limiters: true + }); } if (event.event_status !== "completed") { @@ -449,7 +599,10 @@ export class EventBus { } } finally { for (const bus of buses) { - bus.run_now_depth = Math.max(0, bus.run_now_depth - 1); + bus.immediate_processing_stack_depth = Math.max( + 0, + bus.immediate_processing_stack_depth - 1 + ); bus.releaseRunNowWaiters(); } } @@ -468,7 +621,7 @@ export class EventBus { if (!bus.event_history_by_id.has(event.event_id)) { continue; } - if (event.event_processed_path.includes(bus.name)) { + if (bus.eventHasVisited(event)) { continue; } if (!seen.has(bus)) { @@ -486,98 +639,210 @@ export class EventBus { } private releaseRunNowWaiters(): void { - if (this.run_now_depth !== 0 || this.run_now_waiters.length === 0) { + if ( + this.immediate_processing_stack_depth !== 0 || + this.immediate_processing_waiters.length === 0 + ) { return; } - const waiters = this.run_now_waiters; - this.run_now_waiters = []; + const waiters = this.immediate_processing_waiters; + this.immediate_processing_waiters = []; for (const resolve of waiters) { - resolve(); + try { + // Each waiter is a Promise resolver created by runloop() while it was paused. + // Resolving it resumes that runloop tick so it can continue draining the queue. + resolve(); + } catch (error) { + // Should never happen: these are internal Promise resolve callbacks. + console.error("[bubus] immediate processing waiter threw", error); + } } } private startRunloop(): void { - if (this.is_running) { + if (this.runloop_running) { return; } - this.is_running = true; - setTimeout(() => { - setTimeout(() => { - void this.runloop(); - }, 0); - }, 0); + this.runloop_running = true; + queueMicrotask(() => { + void this.runloop(); + }); } - private async runloop(): Promise { - while (this.pending_queue.length > 0) { - await Promise.resolve(); - if (this.run_now_depth > 0) { - await new Promise((resolve) => { - this.run_now_waiters.push(resolve); + private async scheduleEventProcessing( + event: BaseEvent, + options: { + bypass_event_limiters?: boolean; + bypass_handler_limiters?: boolean; + pre_acquired_limiter?: AsyncLimiter | null; + } = {} + ): Promise { + try { + const limiter = options.bypass_event_limiters ? null : this.resolveEventLimiter(event); + const pre_acquired_limiter = options.pre_acquired_limiter ?? null; + if (pre_acquired_limiter) { + await this.processEvent(event, { bypass_handler_limiters: options.bypass_handler_limiters }); + } else { + await runWithLimiter(limiter, async () => { + await this.processEvent(event, { bypass_handler_limiters: options.bypass_handler_limiters }); }); - continue; - } - const next_event = this.pending_queue.shift(); - if (!next_event) { - continue; } - if (this.eventHasVisited(next_event)) { - continue; + } finally { + if (options.pre_acquired_limiter) { + options.pre_acquired_limiter.release(); } - await this.processEvent(next_event); - await Promise.resolve(); + this.in_flight_event_ids.delete(event.event_id); + this.resolveIdleWaitersIfDone(); } - this.is_running = false; - const idle_waiters = this.idle_waiters; - this.idle_waiters = []; - for (const resolve of idle_waiters) { - resolve(); + } + + private async runloop(): Promise { + for (;;) { + while (this.pending_event_queue.length > 0) { + await Promise.resolve(); + if (this.immediate_processing_stack_depth > 0) { + await new Promise((resolve) => { + this.immediate_processing_waiters.push(resolve); + }); + continue; + } + const next_event = this.pending_event_queue[0]; + if (!next_event) { + continue; + } + const original_event = next_event._original_event ?? next_event; + if (this.eventHasVisited(original_event)) { + this.pending_event_queue.shift(); + continue; + } + let pre_acquired_limiter: AsyncLimiter | null = null; + const event_limiter = this.resolveEventLimiter(original_event); + if (event_limiter) { + await event_limiter.acquire(); + pre_acquired_limiter = event_limiter; + } + this.pending_event_queue.shift(); + if (this.in_flight_event_ids.has(original_event.event_id)) { + if (pre_acquired_limiter) { + pre_acquired_limiter.release(); + } + continue; + } + this.in_flight_event_ids.add(original_event.event_id); + void this.scheduleEventProcessing(original_event, { + bypass_event_limiters: true, + pre_acquired_limiter + }); + await Promise.resolve(); + } + this.runloop_running = false; + if (this.pending_event_queue.length > 0) { + this.startRunloop(); + return; + } + this.resolveIdleWaitersIfDone(); + return; } } - private async processEvent(event: BaseEvent): Promise { + private async processEvent( + event: BaseEvent, + options: { bypass_handler_limiters?: boolean } = {} + ): Promise { if (this.eventHasVisited(event)) { return; } - if (!Array.isArray(event.event_processed_path)) { - event.event_processed_path = []; - } - if (!event.event_processed_path.includes(this.name)) { - event.event_processed_path.push(this.name); - } event.markStarted(); this.notifyFinders(event); - const handlers = this.collectHandlers(event); - const handler_results = handlers.map((handler) => { - const handler_name = handler.name || "anonymous"; - const handler_id = this.getHandlerId(handler); - const existing = event.event_results.get(handler_id); - const result = - existing ?? - new EventResult({ - event_id: event.event_id, - handler_id, - handler_name, - handler_file_path: this.handler_file_paths.get(handler) ?? undefined, - eventbus_name: this.name - }); - if (!existing) { - event.event_results.set(handler_id, result); + const deadlock_timer = + event.event_timeout === null + ? null + : setTimeout(() => { + if (event.event_status === "completed") { + return; + } + const started_at = event.event_started_at ?? event.event_created_at; + const elapsed_ms = Date.now() - Date.parse(started_at); + const elapsed_seconds = (elapsed_ms / 1000).toFixed(1); + console.warn( + `[bubus] Possible deadlock: ${event.event_type}#${event.event_id} still ${event.event_status} on ${this.name} after ${elapsed_seconds}s (timeout ${event.event_timeout}s)` + ); + }, event.event_timeout * 1000); + + try { + const handler_entries = this.createPendingHandlerResults(event); + + const handler_promises = handler_entries.map((entry) => + this.runHandlerEntry(event, entry.handler, entry.result, entry.options, { + bypass_handler_limiters: options.bypass_handler_limiters + }) + ); + await Promise.all(handler_promises); + + event.event_pending_buses = Math.max(0, event.event_pending_buses - 1); + event.tryFinalizeCompletion(); + if (event.event_status === "completed") { + this.notifyParentsFor(event); } - return { handler, result }; - }); + } finally { + if (deadlock_timer) { + clearTimeout(deadlock_timer); + } + } + } + + private resolveEventLimiter(event: BaseEvent): AsyncLimiter | null { + const resolved = resolveConcurrencyMode( + event.event_concurrency, + this.event_concurrency_default + ); + return limiterForMode(resolved, EventBus.global_event_limiter, this.bus_event_limiter); + } + + private resolveHandlerLimiter( + event: BaseEvent, + options?: HandlerOptions + ): AsyncLimiter | null { + const event_override = + event.handler_concurrency && event.handler_concurrency !== "auto" + ? event.handler_concurrency + : undefined; + const handler_override = + options?.handler_concurrency && options.handler_concurrency !== "auto" + ? options.handler_concurrency + : undefined; + const fallback = this.handler_concurrency_default; + const resolved = resolveConcurrencyMode( + event_override ?? handler_override ?? fallback, + fallback + ); + return limiterForMode(resolved, EventBus.global_handler_limiter, this.bus_handler_limiter); + } - const handler_event = this._getBusScopedEvent(event); + private async runHandlerEntry( + event: BaseEvent, + handler: EventHandler, + result: EventResult, + options?: HandlerOptions, + run_options: { bypass_handler_limiters?: boolean } = {} + ): Promise { + if (result.status === "error" && result.error instanceof EventHandlerCancelledError) { + return; + } - for (const { handler, result } of handler_results) { + const handler_event = this._getBusScopedEvent(event, result); + const limiter = run_options.bypass_handler_limiters + ? null + : this.resolveHandlerLimiter(event, options); + + await runWithLimiter(limiter, async () => { if (result.status === "error" && result.error instanceof EventHandlerCancelledError) { - continue; + return; } - this.inside_handler_depth += 1; - this.handler_stack.push(result); + EventBus.global_inside_handler_depth += 1; try { result.markStarted(); const handler_result = await this.runHandlerWithTimeout(event, handler, handler_event); @@ -607,22 +872,18 @@ export class EventBus { parent_error: error } ); - event.cancelPendingChildProcessing(cancelled_error); + this.cancelPendingChildProcessing(event, cancelled_error); } else { result.markError(error); event.markFailed(error); } } finally { - this.handler_stack.pop(); - this.inside_handler_depth = Math.max(0, this.inside_handler_depth - 1); + EventBus.global_inside_handler_depth = Math.max( + 0, + EventBus.global_inside_handler_depth - 1 + ); } - } - - event.event_pending_buses = Math.max(0, event.event_pending_buses - 1); - event.tryFinalizeCompletion(); - if (event.event_status === "completed") { - this.notifyParentsFor(event); - } + }); } @@ -632,59 +893,79 @@ export class EventBus { handler: EventHandler, handler_event: BaseEvent = event ): Promise { + const handler_name = handler.name || "anonymous"; + const warn_ms = 15000; + const started_at_ms = Date.now(); + const should_warn = + event.event_timeout === null || event.event_timeout * 1000 > warn_ms; + const warn_timer = should_warn + ? setTimeout(() => { + const elapsed_ms = Date.now() - started_at_ms; + const elapsed_seconds = (elapsed_ms / 1000).toFixed(1); + console.warn( + `[bubus] Slow handler: ${event.event_type}.${handler_name} running ${elapsed_seconds}s on ${this.name}` + ); + }, warn_ms) + : null; + const clear_warn = () => { + if (warn_timer) { + clearTimeout(warn_timer); + } + }; + const run_handler = () => + Promise.resolve().then(() => + runWithAsyncContext(event._dispatch_context ?? null, () => handler(handler_event)) + ); + if (event.event_timeout === null) { - return run_with_async_context(event._dispatch_context ?? null, () => handler(handler_event)); + return run_handler().finally(clear_warn); } const timeout_seconds = event.event_timeout; const timeout_ms = timeout_seconds * 1000; - const { promise, resolve, reject } = with_resolvers(); + const { promise, resolve, reject } = withResolvers(); let settled = false; + const finalize = (fn: (value?: unknown) => void) => { + return (value?: unknown) => { + if (settled) { + return; + } + settled = true; + clearTimeout(timer); + clear_warn(); + fn(value); + }; + }; + const timer = setTimeout(() => { - if (settled) { - return; - } - settled = true; - reject( + finalize(reject)( new EventHandlerTimeoutError( - `handler ${handler.name || "anonymous"} timed out after ${timeout_seconds}s`, + `handler ${handler_name} timed out after ${timeout_seconds}s`, { event_type: event.event_type, - handler_name: handler.name || "anonymous", + handler_name, timeout_seconds } ) ); }, timeout_ms); - Promise.resolve() - .then(() => run_with_async_context(event._dispatch_context ?? null, () => handler(handler_event))) - .then((value) => { - if (settled) { - return; - } - settled = true; - clearTimeout(timer); - resolve(value); - }) - .catch((error) => { - if (settled) { - return; - } - settled = true; - clearTimeout(timer); - reject(error); - }); + run_handler().then(finalize(resolve)).catch(finalize(reject)); return promise; } private eventHasVisited(event: BaseEvent): boolean { - return ( - Array.isArray(event.event_processed_path) && - event.event_processed_path.includes(this.name) + const results = Array.from(event.event_results.values()).filter( + (result) => result.eventbus_name === this.name + ); + if (results.length === 0) { + return false; + } + return results.every( + (result) => result.status === "completed" || result.status === "error" ); } @@ -705,10 +986,11 @@ export class EventBus { } } - _getBusScopedEvent(event: T): T { + _getBusScopedEvent(event: T, handler_result?: EventResult): T { const original_event = event._original_event ?? event; const bus = this; const parent_event_id = original_event.event_id; + const handler_id = handler_result?.handler_id; const bus_proxy = new Proxy(bus, { get(target, prop, receiver) { if (prop === "dispatch" || prop === "emit") { @@ -717,6 +999,9 @@ export class EventBus { if (!original_child.event_parent_id) { original_child.event_parent_id = parent_event_id; } + if (handler_id && !original_child.event_emitted_by_handler_id) { + original_child.event_emitted_by_handler_id = handler_id; + } const dispatcher = Reflect.get(target, prop, receiver) as ( event: BaseEvent, event_key?: EventKey @@ -757,6 +1042,71 @@ export class EventBus { return scoped as T; } + private cancelPendingChildProcessing( + event: BaseEvent, + error: EventHandlerCancelledError + ): void { + const visited = new Set(); + const cancel_child = (child: BaseEvent): void => { + const original_child = child._original_event ?? child; + if (visited.has(original_child.event_id)) { + return; + } + visited.add(original_child.event_id); + + const path = Array.isArray(original_child.event_path) + ? original_child.event_path + : []; + const buses_to_cancel = new Set(path); + for (const bus of EventBus.instances) { + if (!buses_to_cancel.has(bus.name)) { + continue; + } + bus.cancelEventOnBus(original_child, error); + } + + for (const grandchild of original_child.event_children) { + cancel_child(grandchild); + } + }; + + for (const child of event.event_children) { + cancel_child(child); + } + } + + private cancelEventOnBus(event: BaseEvent, error: EventHandlerCancelledError): void { + const original_event = event._original_event ?? event; + const handler_entries = this.createPendingHandlerResults(original_event); + let updated = false; + for (const entry of handler_entries) { + if (entry.result.status === "pending") { + entry.result.markError(error); + updated = true; + } + } + + let removed = 0; + if (this.pending_event_queue.length > 0) { + const before_len = this.pending_event_queue.length; + this.pending_event_queue = this.pending_event_queue.filter( + (queued) => (queued._original_event ?? queued).event_id !== original_event.event_id + ); + removed = before_len - this.pending_event_queue.length; + } + + if (removed > 0 && !this.in_flight_event_ids.has(original_event.event_id)) { + original_event.event_pending_buses = Math.max(0, original_event.event_pending_buses - 1); + } + + if (updated || removed > 0) { + original_event.tryFinalizeCompletion(); + if (original_event.event_status === "completed") { + this.notifyParentsFor(original_event); + } + } + } + private buildTreeLine( event: BaseEvent, indent: string, @@ -1019,55 +1369,81 @@ export class EventBus { } } - private createPendingHandlerResults(event: BaseEvent): void { + private createPendingHandlerResults( + event: BaseEvent + ): Array<{ + handler: EventHandler; + result: EventResult; + options?: HandlerOptions; + }> { const handlers = this.collectHandlers(event); - handlers.forEach((handler) => { - const handler_id = this.getHandlerId(handler); - if (event.event_results.has(handler_id)) { - return; + return handlers.map(({ handler_id, handler, handler_name, handler_file_path, options }) => { + const existing = event.event_results.get(handler_id); + const result = + existing ?? + new EventResult({ + event_id: event.event_id, + handler_id, + handler_name, + handler_file_path, + eventbus_name: this.name + }); + if (!existing) { + event.event_results.set(handler_id, result); } - const handler_name = handler.name || "anonymous"; - const result = new EventResult({ - event_id: event.event_id, - handler_id, - handler_name, - handler_file_path: this.handler_file_paths.get(handler) ?? undefined, - eventbus_name: this.name - }); - event.event_results.set(handler_id, result); + return { handler, result, options }; }); } - private collectHandlers(event: BaseEvent): EventHandler[] { - const handlers: EventHandler[] = []; - - const string_handlers = this.handlers_by_key.get(event.event_type); - if (string_handlers) { - handlers.push(...string_handlers); - } - - const class_handlers = this.handlers_by_key.get(event.constructor as EventClass); - if (class_handlers) { - handlers.push(...class_handlers); - } - - if (event.event_factory) { - const factory_handlers = this.handlers_by_key.get(event.event_factory as EventKey); - if (factory_handlers) { - handlers.push(...factory_handlers); - } - } - - if (event.event_key_symbol) { - const symbol_handlers = this.handlers_by_key.get(event.event_key_symbol); - if (symbol_handlers) { - handlers.push(...symbol_handlers); + private collectHandlers( + event: BaseEvent + ): Array<{ + handler_id: string; + handler: EventHandler; + handler_name: string; + handler_file_path?: string; + options?: HandlerOptions; + }> { + const handlers: Array<{ + handler_id: string; + handler: EventHandler; + handler_name: string; + handler_file_path?: string; + options?: HandlerOptions; + }> = []; + + const keyed_handlers = this.handlers_by_key.get(event.event_type); + if (keyed_handlers) { + for (const handler_id of keyed_handlers.values()) { + const entry = this.handlers_by_id.get(handler_id); + if (!entry) { + continue; + } + handlers.push({ + handler_id, + handler: entry.handler, + handler_name: entry.handler_name, + handler_file_path: entry.handler_file_path, + options: entry.options + }); } } const wildcard_handlers = this.handlers_by_key.get("*"); if (wildcard_handlers) { - handlers.push(...wildcard_handlers); + for (const handler_id of wildcard_handlers.values()) { + const entry = this.handlers_by_id.get(handler_id); + if (!entry) { + continue; + } + handlers.push({ + handler_id, + handler: entry.handler, + handler_name: entry.handler_name, + handler_file_path: entry.handler_file_path, + options: entry.options + }); + } } return handlers; @@ -1077,20 +1453,27 @@ export class EventBus { if (event_key === "*") { return true; } - if (typeof event_key === "string") { - return event.event_type === event_key; + const normalized = this.normalizeEventKey(event_key); + if (normalized === "*") { + return true; } - if (typeof event_key === "symbol") { - return event.event_key_symbol === event_key; + return event.event_type === normalized; + } + + private normalizeEventKey(event_key: EventKey | "*"): string | "*" { + if (event_key === "*") { + return "*"; } - if (event.event_factory && event_key === event.event_factory) { - return true; + if (typeof event_key === "string") { + return event_key; } - const ctor = event.constructor as EventClass & { factory?: Function }; - if (ctor.factory && event_key === ctor.factory) { - return true; + const event_type = (event_key as { event_type?: unknown }).event_type; + if (typeof event_type === "string" && event_type.length > 0 && event_type !== "BaseEvent") { + return event_type; } - return event.constructor === event_key; + throw new Error( + "event_key must be a string or an event class with a static event_type (not BaseEvent)" + ); } private trimHistory(): void { diff --git a/bubus-ts/src/index.ts b/bubus-ts/src/index.ts index 969cdd6..b494ed0 100644 --- a/bubus-ts/src/index.ts +++ b/bubus-ts/src/index.ts @@ -1,10 +1,12 @@ -export { BaseEvent, BaseEventSchema, extendEvent } from "./base_event.js"; +export { BaseEvent, BaseEventSchema } from "./base_event.js"; export { EventResult } from "./event_result.js"; export { EventBus, EventHandlerTimeoutError, EventHandlerCancelledError } from "./event_bus.js"; +export type { ConcurrencyMode } from "./semaphores.js"; export type { EventClass, EventHandler, EventKey, + HandlerOptions, EventStatus, FindOptions, FindWindow diff --git a/bubus-ts/src/semaphores.ts b/bubus-ts/src/semaphores.ts new file mode 100644 index 0000000..ab2693b --- /dev/null +++ b/bubus-ts/src/semaphores.ts @@ -0,0 +1,82 @@ +export const CONCURRENCY_MODES = ["global-serial", "bus-serial", "parallel", "auto"] as const; +export type ConcurrencyMode = (typeof CONCURRENCY_MODES)[number]; + +export class AsyncLimiter { + size: number; + in_use: number; + waiters: Array<() => void>; + + constructor(size: number) { + this.size = size; + this.in_use = 0; + this.waiters = []; + } + + async acquire(): Promise { + if (this.size === Infinity) { + return; + } + if (this.in_use < this.size) { + this.in_use += 1; + return; + } + await new Promise((resolve) => { + this.waiters.push(resolve); + }); + this.in_use += 1; + } + + release(): void { + if (this.size === Infinity) { + return; + } + this.in_use = Math.max(0, this.in_use - 1); + const next = this.waiters.shift(); + if (next) { + next(); + } + } +} + +export const resolveConcurrencyMode = ( + mode: ConcurrencyMode | undefined, + fallback: ConcurrencyMode +): ConcurrencyMode => { + const normalized_fallback = fallback === "auto" ? "bus-serial" : fallback; + if (!mode || mode === "auto") { + return normalized_fallback; + } + return mode; +}; + +export const limiterForMode = ( + mode: ConcurrencyMode, + global_limiter: AsyncLimiter, + bus_limiter: AsyncLimiter +): AsyncLimiter | null => { + if (mode === "parallel") { + return null; + } + if (mode === "global-serial") { + return global_limiter; + } + if (mode === "bus-serial") { + return bus_limiter; + } + return bus_limiter; +}; + +export const runWithLimiter = async ( + limiter: AsyncLimiter | null, + fn: () => Promise +): Promise => { + if (!limiter) { + return await fn(); + } + await limiter.acquire(); + try { + return await fn(); + } finally { + limiter.release(); + } +}; diff --git a/bubus-ts/src/types.ts b/bubus-ts/src/types.ts index d7abad6..ab675a3 100644 --- a/bubus-ts/src/types.ts +++ b/bubus-ts/src/types.ts @@ -1,13 +1,20 @@ import type { BaseEvent } from "./base_event.js"; +import type { ConcurrencyMode } from "./semaphores.js"; export type EventStatus = "pending" | "started" | "completed"; -export type EventClass = new (...args: any[]) => T; +export type EventClass = { event_type?: string } & (new ( + ...args: any[] +) => T); -export type EventKey = string | symbol | EventClass; +export type EventKey = string | EventClass; export type EventHandler = (event: T) => void | Promise; +export type HandlerOptions = { + handler_concurrency?: ConcurrencyMode; +}; + export type FindWindow = boolean | number; export type FindOptions = { diff --git a/bubus-ts/tests/comprehensive_patterns.test.ts b/bubus-ts/tests/comprehensive_patterns.test.ts index 70dca74..fc319bf 100644 --- a/bubus-ts/tests/comprehensive_patterns.test.ts +++ b/bubus-ts/tests/comprehensive_patterns.test.ts @@ -101,6 +101,7 @@ test("comprehensive patterns: forwarding, async/sync dispatch, parent tracking", test("race condition stress", async () => { const bus_1 = new EventBus("bus1"); const bus_2 = new EventBus("bus2"); + const RootEvent = BaseEvent.extend("RootEvent", {}); const results: string[] = []; @@ -135,13 +136,13 @@ test("race condition stress", async () => { bus_1.on(ImmediateChildEvent, child_handler); bus_2.on(QueuedChildEvent, child_handler); bus_2.on(ImmediateChildEvent, child_handler); - bus_1.on(BaseEvent, parent_handler); - bus_1.on(BaseEvent, bad_handler); + bus_1.on(RootEvent, parent_handler); + bus_1.on(RootEvent, bad_handler); for (let run = 0; run < 5; run += 1) { results.length = 0; - const event = bus_1.dispatch(new BaseEvent({})); + const event = bus_1.dispatch(RootEvent({})); await event.done(); await bus_1.waitUntilIdle(); await bus_2.waitUntilIdle(); @@ -422,9 +423,21 @@ test("multi-bus queues are independent when awaiting child", async () => { const event1_end_idx = execution_order.indexOf("Bus1_Event1_end"); assert.ok(child_end_idx < event1_end_idx); - assert.ok(!execution_order.includes("Bus1_Event2_start")); - assert.ok(!execution_order.includes("Bus2_Event3_start")); - assert.ok(!execution_order.includes("Bus2_Event4_start")); + const bus1_event2_start_idx = execution_order.indexOf("Bus1_Event2_start"); + if (bus1_event2_start_idx !== -1) { + assert.ok(bus1_event2_start_idx > event1_end_idx); + } + + const bus2_event3_start_idx = execution_order.indexOf("Bus2_Event3_start"); + const bus2_event4_start_idx = execution_order.indexOf("Bus2_Event4_start"); + assert.ok(bus2_event3_start_idx !== -1 || bus2_event4_start_idx !== -1); + const bus2_start_idx = + bus2_event3_start_idx === -1 + ? bus2_event4_start_idx + : bus2_event4_start_idx === -1 + ? bus2_event3_start_idx + : Math.min(bus2_event3_start_idx, bus2_event4_start_idx); + assert.ok(bus2_start_idx < event1_end_idx); await bus_1.waitUntilIdle(); await bus_2.waitUntilIdle(); diff --git a/bubus-ts/tests/context_propagation.test.ts b/bubus-ts/tests/context_propagation.test.ts index ef3c6b3..e85ca8b 100644 --- a/bubus-ts/tests/context_propagation.test.ts +++ b/bubus-ts/tests/context_propagation.test.ts @@ -2,7 +2,7 @@ import assert from "node:assert/strict"; import { test } from "node:test"; import { BaseEvent, EventBus } from "../src/index.js"; -import { async_local_storage, has_async_local_storage } from "../src/async_context.js"; +import { async_local_storage, hasAsyncLocalStorage } from "../src/async_context.js"; type ContextStore = { request_id?: string; @@ -13,7 +13,7 @@ type ContextStore = { const SimpleEvent = BaseEvent.extend("SimpleEvent", {}); const ChildEvent = BaseEvent.extend("ChildEvent", {}); -const skip_if_no_async_local_storage = !has_async_local_storage(); +const skip_if_no_async_local_storage = !hasAsyncLocalStorage(); const require_async_local_storage = () => { assert.ok(async_local_storage, "AsyncLocalStorage not available"); diff --git a/bubus-ts/tests/debounce.test.ts b/bubus-ts/tests/debounce.test.ts index c7275d6..39ee4eb 100644 --- a/bubus-ts/tests/debounce.test.ts +++ b/bubus-ts/tests/debounce.test.ts @@ -49,3 +49,86 @@ test("advanced debounce prefers history, then waits for future, then dispatches" assert.ok(resolved_event); assert.equal(resolved_event.event_type, "SyncEvent"); }); + +test("debounce returns existing fresh event", async () => { + const bus = new EventBus("DebounceFreshBus"); + + const original = await bus.dispatch(ScreenshotEvent({ target_id: "tab1" })).done(); + + const is_fresh = (event: typeof original): boolean => { + const completed_at = event.event_completed_at ? Date.parse(event.event_completed_at) : 0; + return Date.now() - completed_at < 5000; + }; + + const result = + (await bus.find( + ScreenshotEvent, + (event) => event.target_id === "tab1" && is_fresh(event), + { past: true, future: false } + )) ?? (await bus.dispatch(ScreenshotEvent({ target_id: "tab1" })).done()); + + assert.equal(result.event_id, original.event_id); +}); + +test("debounce dispatches new when no match", async () => { + const bus = new EventBus("DebounceNoMatchBus"); + + const result = + (await bus.find( + ScreenshotEvent, + (event) => event.target_id === "tab1", + { past: true, future: false } + )) ?? (await bus.dispatch(ScreenshotEvent({ target_id: "tab1" })).done()); + + assert.ok(result); + assert.equal(result.target_id, "tab1"); + assert.equal(result.event_status, "completed"); +}); + +test("debounce dispatches new when existing is stale", async () => { + const bus = new EventBus("DebounceStaleBus"); + + await bus.dispatch(ScreenshotEvent({ target_id: "tab1" })).done(); + + const result = + (await bus.find( + ScreenshotEvent, + (event) => event.target_id === "tab1" && false, + { past: true, future: false } + )) ?? (await bus.dispatch(ScreenshotEvent({ target_id: "tab1" })).done()); + + assert.ok(result); + const screenshots = bus.event_history.filter( + (event) => event.event_type === "ScreenshotEvent" + ); + assert.equal(screenshots.length, 2); +}); + +test("debounce or-chain handles sequential lookups without blocking", async () => { + const bus = new EventBus("DebounceSequentialBus"); + + const result1 = + (await bus.find( + ScreenshotEvent, + (event) => event.target_id === "tab1", + { past: true, future: false } + )) ?? (await bus.dispatch(ScreenshotEvent({ target_id: "tab1" })).done()); + + const result2 = + (await bus.find( + ScreenshotEvent, + (event) => event.target_id === "tab1", + { past: true, future: false } + )) ?? (await bus.dispatch(ScreenshotEvent({ target_id: "tab1" })).done()); + + const result3 = + (await bus.find( + ScreenshotEvent, + (event) => event.target_id === "tab2", + { past: true, future: false } + )) ?? (await bus.dispatch(ScreenshotEvent({ target_id: "tab2" })).done()); + + assert.equal(result1.event_id, result2.event_id); + assert.notEqual(result1.event_id, result3.event_id); + assert.equal(result3.target_id, "tab2"); +}); diff --git a/bubus-ts/tests/event_results.test.ts b/bubus-ts/tests/event_results.test.ts index 40577b9..977d687 100644 --- a/bubus-ts/tests/event_results.test.ts +++ b/bubus-ts/tests/event_results.test.ts @@ -5,17 +5,14 @@ import { z } from "zod"; import { BaseEvent, EventBus } from "../src/index.js"; -const StringResultEvent = BaseEvent.extend( - "StringResultEvent", - {}, - { event_result_schema: z.string(), event_result_type: "string" } -); - -const ObjectResultEvent = BaseEvent.extend( - "ObjectResultEvent", - {}, - { event_result_schema: z.object({ value: z.string(), count: z.number() }) } -); +const StringResultEvent = BaseEvent.extend("StringResultEvent", { + event_result_schema: z.string(), + event_result_type: "string" +}); + +const ObjectResultEvent = BaseEvent.extend("ObjectResultEvent", { + event_result_schema: z.object({ value: z.string(), count: z.number() }) +}); const NoResultSchemaEvent = BaseEvent.extend("NoResultSchemaEvent", {}); diff --git a/bubus-ts/tests/find.test.ts b/bubus-ts/tests/find.test.ts index da73a75..b56107d 100644 --- a/bubus-ts/tests/find.test.ts +++ b/bubus-ts/tests/find.test.ts @@ -7,8 +7,16 @@ import { BaseEvent, EventBus } from "../src/index.js"; const ParentEvent = BaseEvent.extend("ParentEvent", {}); const ChildEvent = BaseEvent.extend("ChildEvent", {}); +const GrandchildEvent = BaseEvent.extend("GrandchildEvent", {}); const UnrelatedEvent = BaseEvent.extend("UnrelatedEvent", {}); const ScreenshotEvent = BaseEvent.extend("ScreenshotEvent", { target_id: z.string() }); +const NavigateEvent = BaseEvent.extend("NavigateEvent", { url: z.string() }); +const TabCreatedEvent = BaseEvent.extend("TabCreatedEvent", { tab_id: z.string() }); +const SystemEvent = BaseEvent.extend("SystemEvent", {}); +const UserActionEvent = BaseEvent.extend("UserActionEvent", { + action: z.string(), + user_id: z.string() +}); const delay = (ms: number): Promise => new Promise((resolve) => { @@ -29,6 +37,17 @@ test("find past returns most recent completed event", async () => { assert.equal(found_event.event_id, second_event.event_id); }); +test("find past returns null when no matching event exists", async () => { + const bus = new EventBus("FindPastNoneBus"); + + const start = Date.now(); + const found_event = await bus.find(ParentEvent, { past: true, future: false }); + const elapsed_ms = Date.now() - start; + + assert.equal(found_event, null); + assert.ok(elapsed_ms < 100); +}); + test("find past window filters by time", async () => { const bus = new EventBus("FindWindowBus"); @@ -68,6 +87,30 @@ test("find future waits for event", async () => { assert.equal(found_event.event_type, "ParentEvent"); }); +test("find future works with string event keys", async () => { + const bus = new EventBus("FindFutureStringBus"); + + const find_promise = bus.find("ParentEvent", { past: false, future: 0.5 }); + + setTimeout(() => { + bus.dispatch(ParentEvent({})); + }, 30); + + const found_event = await find_promise; + assert.ok(found_event); + assert.equal(found_event.event_type, "ParentEvent"); +}); + +test("find future ignores past events", async () => { + const bus = new EventBus("FindFutureIgnoresPastBus"); + + const prior = bus.dispatch(ParentEvent({})); + await prior.done(); + + const found_event = await bus.find(ParentEvent, { past: false, future: 0.05 }); + assert.equal(found_event, null); +}); + test("find future times out when no event arrives", async () => { const bus = new EventBus("FindFutureTimeoutBus"); @@ -75,6 +118,106 @@ test("find future times out when no event arrives", async () => { assert.equal(found_event, null); }); +test("find past=false future=false returns null immediately", async () => { + const bus = new EventBus("FindNeitherBus"); + + const start = Date.now(); + const found_event = await bus.find(ParentEvent, { past: false, future: false }); + const elapsed_ms = Date.now() - start; + + assert.equal(found_event, null); + assert.ok(elapsed_ms < 100); +}); + +test("find past+future returns past event immediately", async () => { + const bus = new EventBus("FindPastFutureBus"); + + const dispatched = bus.dispatch(ParentEvent({})); + await dispatched.done(); + + const start = Date.now(); + const found_event = await bus.find(ParentEvent, { past: true, future: 0.5 }); + const elapsed_ms = Date.now() - start; + + assert.ok(found_event); + assert.equal(found_event.event_id, dispatched.event_id); + assert.ok(elapsed_ms < 100); +}); + +test("find past+future waits for future when no past match", async () => { + const bus = new EventBus("FindPastFutureWaitBus"); + + const find_promise = bus.find(ChildEvent, { past: true, future: 0.3 }); + + setTimeout(() => { + bus.dispatch(ChildEvent({})); + }, 50); + + const found_event = await find_promise; + assert.ok(found_event); + assert.equal(found_event.event_type, "ChildEvent"); +}); + +test("find past/future windows are independent", async () => { + const bus = new EventBus("FindWindowIndependentBus"); + + const old_event = bus.dispatch(ParentEvent({})); + await old_event.done(); + await delay(120); + + const start = Date.now(); + const found_event = await bus.find(ParentEvent, { past: 0.05, future: 0.05 }); + const elapsed_ms = Date.now() - start; + + assert.equal(found_event, null); + assert.ok(elapsed_ms > 30); +}); + +test("find past true future float returns old event immediately", async () => { + const bus = new EventBus("FindPastTrueFutureFloatBus"); + + const dispatched = bus.dispatch(ParentEvent({})); + await dispatched.done(); + await delay(120); + + const found_event = await bus.find(ParentEvent, { past: true, future: 0.1 }); + assert.ok(found_event); + assert.equal(found_event.event_id, dispatched.event_id); +}); + +test("find past float future waits for new event", async () => { + const bus = new EventBus("FindPastFloatFutureWaitBus"); + + const old_event = bus.dispatch(ParentEvent({})); + await old_event.done(); + await delay(120); + + const find_promise = bus.find(ParentEvent, { past: 0.05, future: 0.2 }); + + setTimeout(() => { + bus.dispatch(ParentEvent({})); + }, 50); + + const found_event = await find_promise; + assert.ok(found_event); + assert.notEqual(found_event.event_id, old_event.event_id); +}); + +test("find past true future true returns past event immediately", async () => { + const bus = new EventBus("FindPastTrueFutureTrueBus"); + + const dispatched = bus.dispatch(ParentEvent({})); + await dispatched.done(); + + const start = Date.now(); + const found_event = await bus.find(ParentEvent, { past: true, future: true }); + const elapsed_ms = Date.now() - start; + + assert.ok(found_event); + assert.equal(found_event.event_id, dispatched.event_id); + assert.ok(elapsed_ms < 100); +}); + test("find respects where filter", async () => { const bus = new EventBus("FindWhereBus"); @@ -93,6 +236,60 @@ test("find respects where filter", async () => { assert.equal(found_event.event_id, event_b.event_id); }); +test("find where filter works with future waiting", async () => { + const bus = new EventBus("FindWhereFutureBus"); + + const find_promise = bus.find( + UserActionEvent, + (event) => event.user_id === "user123", + { past: false, future: 0.3 } + ); + + setTimeout(() => { + bus.dispatch(UserActionEvent({ action: "logout", user_id: "user456" })); + bus.dispatch(UserActionEvent({ action: "login", user_id: "user123" })); + }, 50); + + const found_event = await find_promise; + assert.ok(found_event); + assert.equal(found_event.user_id, "user123"); +}); + +test("find with multiple concurrent waiters resolves correct events", async () => { + const bus = new EventBus("FindConcurrentBus"); + + const find_normal = bus.find( + UserActionEvent, + (event) => event.action === "normal", + { past: false, future: 0.5 } + ); + const find_special = bus.find( + UserActionEvent, + (event) => event.action === "special", + { past: false, future: 0.5 } + ); + const find_system = bus.find("SystemEvent", { past: false, future: 0.5 }); + + setTimeout(() => { + bus.dispatch(UserActionEvent({ action: "normal", user_id: "u1" })); + bus.dispatch(SystemEvent({})); + bus.dispatch(UserActionEvent({ action: "special", user_id: "u2" })); + }, 50); + + const [normal, system, special] = await Promise.all([ + find_normal, + find_system, + find_special + ]); + + assert.ok(normal); + assert.equal(normal.action, "normal"); + assert.ok(system); + assert.equal(system.event_type, "SystemEvent"); + assert.ok(special); + assert.equal(special.action, "special"); +}); + test("find child_of returns child event", async () => { const bus = new EventBus("FindChildBus"); @@ -129,3 +326,258 @@ test("find child_of returns null for non-child", async () => { assert.equal(found_event, null); }); + +test("find child_of returns grandchild event", async () => { + const bus = new EventBus("FindGrandchildBus"); + + let child_event_id: string | null = null; + bus.on(ParentEvent, async (event) => { + const child = await event.bus?.emit(ChildEvent({})).done(); + child_event_id = child?.event_id ?? null; + }); + bus.on(ChildEvent, async (event) => { + await event.bus?.emit(GrandchildEvent({})).done(); + }); + + const parent_event = bus.dispatch(ParentEvent({})); + await parent_event.done(); + await bus.waitUntilIdle(); + + const grandchild_event = await bus.find(GrandchildEvent, { + past: true, + future: false, + child_of: parent_event + }); + + assert.ok(grandchild_event); + assert.equal(grandchild_event.event_parent_id, child_event_id); +}); + +test("find child_of works across forwarded buses", async () => { + const main_bus = new EventBus("MainBus"); + const auth_bus = new EventBus("AuthBus"); + + let child_event_id: string | null = null; + + main_bus.on(ParentEvent, auth_bus.dispatch); + auth_bus.on(ParentEvent, async (event) => { + const child = await event.bus?.emit(ChildEvent({})).done(); + child_event_id = child.event_id; + }); + + const parent_event = main_bus.dispatch(ParentEvent({})); + await parent_event.done(); + await main_bus.waitUntilIdle(); + await auth_bus.waitUntilIdle(); + + const found_child = await auth_bus.find(ChildEvent, { + past: 5, + future: 5, + child_of: parent_event + }); + + assert.ok(found_child); + assert.equal(found_child.event_id, child_event_id); +}); + +test("find child_of filters to correct parent among siblings", async () => { + const bus = new EventBus("FindCorrectParentBus"); + + bus.on(NavigateEvent, async (event) => { + await event.bus?.emit(TabCreatedEvent({ tab_id: `tab_for_${event.url}` })).done(); + }); + bus.on(TabCreatedEvent, () => {}); + + const nav_1 = bus.dispatch(NavigateEvent({ url: "site1" })); + const nav_2 = bus.dispatch(NavigateEvent({ url: "site2" })); + await nav_1.done(); + await nav_2.done(); + + const tab_1 = await bus.find(TabCreatedEvent, { + child_of: nav_1, + past: true, + future: false + }); + const tab_2 = await bus.find(TabCreatedEvent, { + child_of: nav_2, + past: true, + future: false + }); + + assert.ok(tab_1); + assert.ok(tab_2); + assert.equal(tab_1.tab_id, "tab_for_site1"); + assert.equal(tab_2.tab_id, "tab_for_site2"); +}); + +test("find future with child_of waits for matching child", async () => { + const bus = new EventBus("FindFutureChildBus"); + + bus.on(ParentEvent, async (event) => { + await delay(30); + await event.bus?.emit(ChildEvent({})).done(); + }); + + const parent_event = bus.dispatch(ParentEvent({})); + + const find_promise = bus.find(ChildEvent, { + child_of: parent_event, + past: false, + future: 0.3 + }); + + const child_event = await find_promise; + assert.ok(child_event); + assert.equal(child_event.event_parent_id, parent_event.event_id); +}); + +test("find with past float and where filter", async () => { + const bus = new EventBus("FindWherePastFloatBus"); + + const old_event = bus.dispatch(ScreenshotEvent({ target_id: "tab1" })); + await old_event.done(); + await delay(120); + const new_event = bus.dispatch(ScreenshotEvent({ target_id: "tab2" })); + await new_event.done(); + + const found_tab2 = await bus.find( + ScreenshotEvent, + (event) => event.target_id === "tab2", + { past: 0.1, future: false } + ); + + assert.ok(found_tab2); + assert.equal(found_tab2.event_id, new_event.event_id); + + const found_tab1 = await bus.find( + ScreenshotEvent, + (event) => event.target_id === "tab1", + { past: 0.1, future: false } + ); + assert.equal(found_tab1, null); +}); + +test("find with child_of and past float", async () => { + const bus = new EventBus("FindChildPastFloatBus"); + + let child_event_id: string | null = null; + bus.on(ParentEvent, async (event) => { + const child = await event.bus?.emit(ChildEvent({})).done(); + child_event_id = child?.event_id ?? null; + }); + + const parent_event = bus.dispatch(ParentEvent({})); + await parent_event.done(); + await bus.waitUntilIdle(); + + const found_child = await bus.find(ChildEvent, { + child_of: parent_event, + past: 5, + future: false + }); + + assert.ok(found_child); + assert.equal(found_child.event_id, child_event_id); +}); + +test("find with all parameters combined", async () => { + const bus = new EventBus("FindAllParamsBus"); + + let child_event_id: string | null = null; + bus.on(ParentEvent, async (event) => { + const child = await event.bus?.emit(ScreenshotEvent({ target_id: "child_tab" })).done(); + child_event_id = child?.event_id ?? null; + }); + + const parent_event = bus.dispatch(ParentEvent({})); + await parent_event.done(); + await bus.waitUntilIdle(); + + const found_child = await bus.find( + ScreenshotEvent, + (event) => event.target_id === "child_tab", + { + child_of: parent_event, + past: 5, + future: false + } + ); + + assert.ok(found_child); + assert.equal(found_child.event_id, child_event_id); +}); + +test("find past ignores in-progress events but returns after completion", async () => { + const bus = new EventBus("FindCompletedOnlyBus"); + + bus.on(ParentEvent, async () => { + await delay(80); + }); + + const dispatched = bus.dispatch(ParentEvent({})); + await delay(10); + + const early_find = await bus.find(ParentEvent, { past: true, future: false }); + assert.equal(early_find, null); + + await dispatched.done(); + + const later_find = await bus.find(ParentEvent, { past: true, future: false }); + assert.ok(later_find); + assert.equal(later_find.event_id, dispatched.event_id); +}); + +test("find future resolves before handlers complete", async () => { + const bus = new EventBus("FindBeforeCompleteBus"); + + bus.on(ParentEvent, async () => { + await delay(80); + }); + + const find_promise = bus.find(ParentEvent, { past: false, future: 0.5 }); + + setTimeout(() => { + bus.dispatch(ParentEvent({})); + }, 20); + + const found_event = await find_promise; + assert.ok(found_event); + assert.equal(found_event.event_status, "started"); + + await found_event.done(); + assert.equal(found_event.event_status, "completed"); +}); + +test("find catches child event that fired during parent handler", async () => { + const bus = new EventBus("FindRaceConditionBus"); + + let tab_event_id: string | null = null; + bus.on(NavigateEvent, async (event) => { + const tab_event = await event.bus?.emit(TabCreatedEvent({ tab_id: "new_tab" })).done(); + tab_event_id = tab_event?.event_id ?? null; + }); + bus.on(TabCreatedEvent, () => {}); + + const nav_event = bus.dispatch(NavigateEvent({ url: "https://example.com" })); + await nav_event.done(); + + const found_tab = await bus.find(TabCreatedEvent, { + child_of: nav_event, + past: true, + future: false + }); + + assert.ok(found_tab); + assert.equal(found_tab.event_id, tab_event_id); +}); + +test("find returns promise that can be awaited later", async () => { + const bus = new EventBus("FindPromiseBus"); + + const find_promise = bus.find(ParentEvent, { past: false, future: 0.5 }); + assert.ok(find_promise instanceof Promise); + + bus.dispatch(ParentEvent({})); + const found_event = await find_promise; + assert.ok(found_event); +}); diff --git a/bubus-ts/tests/handlers.test.ts b/bubus-ts/tests/handlers.test.ts new file mode 100644 index 0000000..3fc0fa0 --- /dev/null +++ b/bubus-ts/tests/handlers.test.ts @@ -0,0 +1,152 @@ +import assert from "node:assert/strict"; +import { test } from "node:test"; + +import { z } from "zod"; + +import { BaseEvent, EventBus } from "../src/index.js"; + +const UserActionEvent = BaseEvent.extend("UserActionEvent", { + action: z.string(), + user_id: z.string() +}); + +const SystemEventModel = BaseEvent.extend("SystemEventModel", { + event_name: z.string() +}); + +test("handler registration via string, class, and wildcard", async () => { + const bus = new EventBus("HandlerRegistrationBus"); + const results: Record = { + specific: [], + model: [], + universal: [] + }; + + const user_handler = async (event: InstanceType): Promise => { + results.specific.push(event.action); + return "user_handled"; + }; + + const system_handler = async (event: InstanceType): Promise => { + results.model.push(event.event_name); + return "system_handled"; + }; + + const universal_handler = async (event: BaseEvent): Promise => { + results.universal.push(event.event_type); + return "universal"; + }; + + const system_event_class = (SystemEventModel as unknown as { class: typeof BaseEvent }).class; + + bus.on("UserActionEvent", user_handler); + bus.on(system_event_class, system_handler); + bus.on("*", universal_handler); + + bus.dispatch(UserActionEvent({ action: "login", user_id: "u1" })); + bus.dispatch(SystemEventModel({ event_name: "startup" })); + await bus.waitUntilIdle(); + + assert.deepEqual(results.specific, ["login"]); + assert.deepEqual(results.model, ["startup"]); + assert.deepEqual(new Set(results.universal), new Set(["UserActionEvent", "SystemEventModel"])); +}); + +test("handlers can be sync or async", async () => { + const bus = new EventBus("SyncAsyncHandlersBus"); + + const sync_handler = (_event: BaseEvent): string => "sync"; + const async_handler = async (_event: BaseEvent): Promise => "async"; + + bus.on("TestEvent", sync_handler); + bus.on("TestEvent", async_handler); + + const handlers = bus.handlers_by_key.get("TestEvent"); + assert.equal(handlers?.size ?? 0, 2); + + const event = bus.dispatch(BaseEvent.extend("TestEvent", {})({})); + await event.done(); + + const results = Array.from(event.event_results.values()).map((result) => result.result); + assert.ok(results.includes("sync")); + assert.ok(results.includes("async")); +}); + +test("instance, class, and static method handlers", async () => { + const bus = new EventBus("MethodHandlersBus"); + const results: string[] = []; + + class EventProcessor { + name: string; + value: number; + + constructor(name: string, value: number) { + this.name = name; + this.value = value; + } + + sync_method_handler = (event: InstanceType): Record => { + results.push(`${this.name}_sync`); + return { processor: this.name, value: this.value, action: event.action }; + }; + + async async_method_handler( + event: InstanceType + ): Promise> { + await new Promise((resolve) => setTimeout(resolve, 10)); + results.push(`${this.name}_async`); + return { processor: this.name, value: this.value * 2, action: event.action }; + } + + static class_method_handler(event: InstanceType): string { + results.push("classmethod"); + return `Handled by ${event.event_type}`; + } + + static static_method_handler(_event: InstanceType): string { + results.push("staticmethod"); + return "Handled by static method"; + } + } + + const processor1 = new EventProcessor("Processor1", 10); + const processor2 = new EventProcessor("Processor2", 20); + + bus.on(UserActionEvent, processor1.sync_method_handler); + bus.on(UserActionEvent, processor1.async_method_handler.bind(processor1)); + bus.on(UserActionEvent, processor2.sync_method_handler); + bus.on("UserActionEvent", EventProcessor.class_method_handler); + bus.on("UserActionEvent", EventProcessor.static_method_handler); + + const event = UserActionEvent({ action: "test_methods", user_id: "u123" }); + const completed_event = bus.dispatch(event); + await completed_event.done(); + + assert.equal(results.length, 5); + assert.ok(results.includes("Processor1_sync")); + assert.ok(results.includes("Processor1_async")); + assert.ok(results.includes("Processor2_sync")); + assert.ok(results.includes("classmethod")); + assert.ok(results.includes("staticmethod")); + + const result_values = Array.from(completed_event.event_results.values()).map((result) => result.result); + + const p1_sync = result_values.find( + (result) => + typeof result === "object" && + result !== null && + (result as { processor?: string; value?: number }).processor === "Processor1" && + (result as { value?: number }).value === 10 + ) as { action?: string } | undefined; + + const p1_async = result_values.find( + (result) => + typeof result === "object" && + result !== null && + (result as { processor?: string; value?: number }).processor === "Processor1" && + (result as { value?: number }).value === 20 + ) as { action?: string } | undefined; + + assert.equal(p1_sync?.action, "test_methods"); + assert.equal(p1_async?.action, "test_methods"); +}); diff --git a/bubus-ts/tests/locking.test.ts b/bubus-ts/tests/locking.test.ts new file mode 100644 index 0000000..bc9e84b --- /dev/null +++ b/bubus-ts/tests/locking.test.ts @@ -0,0 +1,1063 @@ +import assert from "node:assert/strict"; +import { test } from "node:test"; + +import { z } from "zod"; + +import { BaseEvent, EventBus } from "../src/index.js"; + +/* +Potential failure modes + +A) Event concurrency modes +- global-serial not enforcing strict FIFO across multiple buses (events interleave). +- bus-serial allows cross-bus interleaving but still must be FIFO within a bus; breaks under forwarding. +- parallel accidentally serializes (e.g., limiter still used) or breaks queue-jump semantics. +- auto not resolving correctly to bus defaults. + +B) Handler concurrency modes +- global-serial not enforcing strict handler order across buses. +- bus-serial leaks parallelism between handlers on the same bus. +- parallel accidentally serializes or fails to gate per-handler ordering. +- auto not resolving correctly to handler options or bus defaults. + +C) Precedence resolution +- Event overrides not taking precedence over handler options. +- Handler options not taking precedence over bus defaults. +- Conflicting settings (event says parallel, handler says serial) choose wrong winner. + +D) Queue-jump / awaited events +- event.done() inside handler doesn’t jump the queue across buses. +- Queue-jump bypasses limiters incorrectly in contexts where it shouldn’t. +- Queue-jump fails when event already in-flight. + +E) FIFO correctness +- FIFO order broken under bus-serial with interleaved emissions. +- FIFO order broken under global-serial across buses. +- FIFO order broken with forwarded events. + +F) Forwarding & bus context +- Forwarded event’s event.bus mutates current handler context (wrong bus). +- Child events emitted after forwarding are mis-parented. +- event.event_path diverges between buses. +- Handler attribution lost when forwarded across buses (tree/log issues). + +G) Parent/child tracking +- Child events not correctly linked to the parent handler when emitted via event.bus. +- event_children missing under concurrency due to async timing. +- event_pending_buses not decremented properly, leaving events stuck. + +H) Find semantics under concurrency +- find(past) returns event not yet completed. +- find(future) doesn’t resolve when event finishes in another bus. +- find with child_of returns mismatched events under concurrency. + +I) Timeouts + cancellation propagation +- Timeout doesn’t cancel pending child handlers. +- Cancelled results not marked or mis-attributed to the wrong handler. +- Timeout doesn’t propagate across forwarded buses (event still waits forever). + +J) Handler result validation +- event_result_schema not enforced under parallel handler completion. +- Invalid result doesn’t mark handler error or event failure. +- Timeout + schema error ordering wrong (e.g., schema error overwrites timeout). + +K) Idle / completion +- waitUntilIdle() returns early with in-flight events. +- event.done() resolves before children complete. +- event.done() never resolves due to deadlock in runloop. + +L) Reentrancy / nested awaits +- Nested awaited child events starve sibling handlers. +- Awaited child events skip limiter incorrectly (deadlocks or ordering regressions). + +M) Edge-cases +- Multiple handlers for same event type with different options collide. +- Handler throws synchronously before await (still counted, no leaks). +- Handler returns a rejected promise (properly surfaced). +- Event emitted with event_concurrency/handler_concurrency invalid value (schema rejects). +- Event emitted with no bus set (done should reject). +*/ + +const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms)); +const withResolvers = () => { + let resolve!: (value: T | PromiseLike) => void; + let reject!: (reason?: unknown) => void; + const promise = new Promise((resolve_fn, reject_fn) => { + resolve = resolve_fn; + reject = reject_fn; + }); + return { promise, resolve, reject }; +}; + +test("global-serial: only one event processes at a time across buses", async () => { + const SerialEvent = BaseEvent.extend("SerialEvent", { + order: z.number(), + source: z.string() + }); + + const bus_a = new EventBus("GlobalSerialA", { event_concurrency: "global-serial" }); + const bus_b = new EventBus("GlobalSerialB", { event_concurrency: "global-serial" }); + + let in_flight = 0; + let max_in_flight = 0; + const starts: string[] = []; + + const handler = async (event: InstanceType) => { + in_flight += 1; + max_in_flight = Math.max(max_in_flight, in_flight); + starts.push(`${event.source}:${event.order}`); + await sleep(10); + in_flight -= 1; + }; + + bus_a.on(SerialEvent, handler); + bus_b.on(SerialEvent, handler); + + for (let i = 0; i < 3; i += 1) { + bus_a.dispatch(SerialEvent({ order: i, source: "a" })); + bus_b.dispatch(SerialEvent({ order: i, source: "b" })); + } + + await bus_a.waitUntilIdle(); + await bus_b.waitUntilIdle(); + + assert.equal(max_in_flight, 1); + + const starts_a = starts.filter((value) => value.startsWith("a:")).map((value) => Number(value.split(":")[1])); + const starts_b = starts.filter((value) => value.startsWith("b:")).map((value) => Number(value.split(":")[1])); + + assert.deepEqual(starts_a, [0, 1, 2]); + assert.deepEqual(starts_b, [0, 1, 2]); +}); + +test("global-serial: awaited child jumps ahead of queued events across buses", async () => { + const ParentEvent = BaseEvent.extend("ParentEvent", {}); + const ChildEvent = BaseEvent.extend("ChildEvent", {}); + const QueuedEvent = BaseEvent.extend("QueuedEvent", {}); + + const bus_a = new EventBus("GlobalSerialParent", { event_concurrency: "global-serial" }); + const bus_b = new EventBus("GlobalSerialChild", { event_concurrency: "global-serial" }); + + const order: string[] = []; + + bus_b.on(ChildEvent, async () => { + order.push("child_start"); + await sleep(5); + order.push("child_end"); + }); + + bus_b.on(QueuedEvent, async () => { + order.push("queued_start"); + await sleep(1); + order.push("queued_end"); + }); + + bus_a.on(ParentEvent, async (event) => { + order.push("parent_start"); + bus_b.emit(QueuedEvent({})); + const child = bus_b.emit(ChildEvent({})); + order.push("child_dispatched"); + await child.done(); + order.push("child_awaited"); + order.push("parent_end"); + }); + + const parent = bus_a.dispatch(ParentEvent({})); + await parent.done(); + await bus_b.waitUntilIdle(); + + const child_start_idx = order.indexOf("child_start"); + const child_end_idx = order.indexOf("child_end"); + const queued_start_idx = order.indexOf("queued_start"); + + assert.ok(child_start_idx !== -1); + assert.ok(child_end_idx !== -1); + assert.ok(queued_start_idx !== -1); + assert.ok(child_start_idx < queued_start_idx); + assert.ok(child_end_idx < queued_start_idx); +}); + +test("global-serial: handler limiter serializes handlers across buses", async () => { + const HandlerEvent = BaseEvent.extend("HandlerEvent", { + order: z.number(), + source: z.string() + }); + + const bus_a = new EventBus("GlobalHandlerA", { + event_concurrency: "parallel", + handler_concurrency: "global-serial" + }); + const bus_b = new EventBus("GlobalHandlerB", { + event_concurrency: "parallel", + handler_concurrency: "global-serial" + }); + + let in_flight = 0; + let max_in_flight = 0; + + const handler = async () => { + in_flight += 1; + max_in_flight = Math.max(max_in_flight, in_flight); + await sleep(5); + in_flight -= 1; + }; + + bus_a.on(HandlerEvent, handler); + bus_b.on(HandlerEvent, handler); + + for (let i = 0; i < 4; i += 1) { + bus_a.dispatch(HandlerEvent({ order: i, source: "a" })); + bus_b.dispatch(HandlerEvent({ order: i, source: "b" })); + } + + await bus_a.waitUntilIdle(); + await bus_b.waitUntilIdle(); + + assert.equal(max_in_flight, 1); +}); + +test("bus-serial: events serialize per bus but overlap across buses", async () => { + const SerialEvent = BaseEvent.extend("SerialPerBusEvent", { + order: z.number(), + source: z.string() + }); + + const bus_a = new EventBus("BusSerialA", { event_concurrency: "bus-serial" }); + const bus_b = new EventBus("BusSerialB", { event_concurrency: "bus-serial" }); + + let in_flight_global = 0; + let max_in_flight_global = 0; + let in_flight_a = 0; + let in_flight_b = 0; + let max_in_flight_a = 0; + let max_in_flight_b = 0; + + let resolve_b_started: (() => void) | null = null; + const b_started = new Promise((resolve) => { + resolve_b_started = resolve; + }); + + bus_a.on(SerialEvent, async () => { + in_flight_global += 1; + in_flight_a += 1; + max_in_flight_global = Math.max(max_in_flight_global, in_flight_global); + max_in_flight_a = Math.max(max_in_flight_a, in_flight_a); + await b_started; + await sleep(10); + in_flight_global -= 1; + in_flight_a -= 1; + }); + + bus_b.on(SerialEvent, async () => { + in_flight_global += 1; + in_flight_b += 1; + max_in_flight_global = Math.max(max_in_flight_global, in_flight_global); + max_in_flight_b = Math.max(max_in_flight_b, in_flight_b); + if (resolve_b_started) { + resolve_b_started(); + resolve_b_started = null; + } + await sleep(10); + in_flight_global -= 1; + in_flight_b -= 1; + }); + + bus_a.dispatch(SerialEvent({ order: 0, source: "a" })); + bus_b.dispatch(SerialEvent({ order: 0, source: "b" })); + + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]); + + assert.equal(max_in_flight_a, 1); + assert.equal(max_in_flight_b, 1); + assert.ok(max_in_flight_global >= 2); +}); + +test("bus-serial: FIFO order preserved per bus with interleaving", async () => { + const SerialEvent = BaseEvent.extend("SerialInterleavedEvent", { + order: z.number(), + source: z.string() + }); + + const bus_a = new EventBus("BusSerialOrderA", { event_concurrency: "bus-serial" }); + const bus_b = new EventBus("BusSerialOrderB", { event_concurrency: "bus-serial" }); + + const starts_a: number[] = []; + const starts_b: number[] = []; + + bus_a.on(SerialEvent, async (event) => { + starts_a.push(event.order); + await sleep(2); + }); + + bus_b.on(SerialEvent, async (event) => { + starts_b.push(event.order); + await sleep(2); + }); + + for (let i = 0; i < 4; i += 1) { + bus_a.dispatch(SerialEvent({ order: i, source: "a" })); + bus_b.dispatch(SerialEvent({ order: i, source: "b" })); + } + + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]); + + assert.deepEqual(starts_a, [0, 1, 2, 3]); + assert.deepEqual(starts_b, [0, 1, 2, 3]); +}); + +test("bus-serial: awaiting child on one bus does not block other bus queue", async () => { + const ParentEvent = BaseEvent.extend("BusSerialParent", {}); + const ChildEvent = BaseEvent.extend("BusSerialChild", {}); + const OtherEvent = BaseEvent.extend("BusSerialOther", {}); + + const bus_a = new EventBus("BusSerialParentBus", { event_concurrency: "bus-serial" }); + const bus_b = new EventBus("BusSerialOtherBus", { event_concurrency: "bus-serial" }); + + const order: string[] = []; + + bus_a.on(ChildEvent, async () => { + order.push("child_start"); + await sleep(10); + order.push("child_end"); + }); + + bus_a.on(ParentEvent, async (event) => { + order.push("parent_start"); + const child = event.bus?.emit(ChildEvent({}))!; + await child.done(); + order.push("parent_end"); + }); + + bus_b.on(OtherEvent, async () => { + order.push("other_start"); + await sleep(2); + order.push("other_end"); + }); + + const parent = bus_a.dispatch(ParentEvent({})); + await sleep(0); + bus_b.dispatch(OtherEvent({})); + + await parent.done(); + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]); + + const other_start_idx = order.indexOf("other_start"); + const parent_end_idx = order.indexOf("parent_end"); + assert.ok(other_start_idx !== -1); + assert.ok(parent_end_idx !== -1); + assert.ok(other_start_idx < parent_end_idx); +}); + +test("parallel: events overlap on same bus when event_concurrency is parallel", async () => { + const ParallelEvent = BaseEvent.extend("ParallelEvent", { order: z.number() }); + const bus = new EventBus("ParallelEventBus", { + event_concurrency: "parallel", + handler_concurrency: "parallel" + }); + + let in_flight = 0; + let max_in_flight = 0; + const { promise, resolve } = withResolvers(); + setTimeout(() => resolve(), 20); + + bus.on(ParallelEvent, async (event) => { + in_flight += 1; + max_in_flight = Math.max(max_in_flight, in_flight); + await promise; + await sleep(10); + in_flight -= 1; + }); + + bus.dispatch(ParallelEvent({ order: 0 })); + bus.dispatch(ParallelEvent({ order: 1 })); + + await bus.waitUntilIdle(); + assert.ok(max_in_flight >= 2); +}); + +test("parallel: handlers overlap for same event when handler_concurrency is parallel", async () => { + const ParallelHandlerEvent = BaseEvent.extend("ParallelHandlerEvent", {}); + const bus = new EventBus("ParallelHandlerBus", { + event_concurrency: "bus-serial", + handler_concurrency: "parallel" + }); + + let in_flight = 0; + let max_in_flight = 0; + const { promise, resolve } = withResolvers(); + + const handler_a = async () => { + in_flight += 1; + max_in_flight = Math.max(max_in_flight, in_flight); + await promise; + in_flight -= 1; + }; + + const handler_b = async () => { + in_flight += 1; + max_in_flight = Math.max(max_in_flight, in_flight); + await promise; + in_flight -= 1; + }; + + bus.on(ParallelHandlerEvent, handler_a); + bus.on(ParallelHandlerEvent, handler_b); + + const event = bus.dispatch(ParallelHandlerEvent({})); + await sleep(0); + resolve(); + await event.done(); + await bus.waitUntilIdle(); + + assert.ok(max_in_flight >= 2); +}); + +test("parallel: global-serial handler limiter still serializes across buses", async () => { + const ParallelEvent = BaseEvent.extend("ParallelEventGlobalHandler", { + source: z.string() + }); + + const bus_a = new EventBus("ParallelHandlerGlobalA", { + event_concurrency: "parallel", + handler_concurrency: "global-serial" + }); + const bus_b = new EventBus("ParallelHandlerGlobalB", { + event_concurrency: "parallel", + handler_concurrency: "global-serial" + }); + + let in_flight = 0; + let max_in_flight = 0; + const { promise, resolve } = withResolvers(); + + const handler = async () => { + in_flight += 1; + max_in_flight = Math.max(max_in_flight, in_flight); + await promise; + in_flight -= 1; + }; + + bus_a.on(ParallelEvent, handler); + bus_b.on(ParallelEvent, handler); + + bus_a.dispatch(ParallelEvent({ source: "a" })); + bus_b.dispatch(ParallelEvent({ source: "b" })); + + await sleep(0); + resolve(); + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]); + + assert.equal(max_in_flight, 1); +}); + +test("precedence: event handler_concurrency overrides handler options", async () => { + const OverrideEvent = BaseEvent.extend("OverrideEvent", { + handler_concurrency: z.literal("bus-serial") + }); + const bus = new EventBus("OverrideBus", { handler_concurrency: "parallel" }); + + let in_flight = 0; + let max_in_flight = 0; + const { promise, resolve } = withResolvers(); + + const handler = async () => { + in_flight += 1; + max_in_flight = Math.max(max_in_flight, in_flight); + await promise; + in_flight -= 1; + }; + + bus.on(OverrideEvent, handler, { handler_concurrency: "parallel" }); + bus.on(OverrideEvent, handler, { handler_concurrency: "parallel" }); + + const event = bus.dispatch(OverrideEvent({ handler_concurrency: "bus-serial" })); + await sleep(0); + resolve(); + await event.done(); + await bus.waitUntilIdle(); + + assert.equal(max_in_flight, 1); +}); + +test("precedence: handler options override bus defaults when event has no override", async () => { + const OptionEvent = BaseEvent.extend("OptionEvent", {}); + const bus = new EventBus("OptionBus", { handler_concurrency: "bus-serial" }); + + let in_flight = 0; + let max_in_flight = 0; + const { promise, resolve } = withResolvers(); + + const handler_a = async () => { + in_flight += 1; + max_in_flight = Math.max(max_in_flight, in_flight); + await promise; + in_flight -= 1; + }; + + const handler_b = async () => { + in_flight += 1; + max_in_flight = Math.max(max_in_flight, in_flight); + await promise; + in_flight -= 1; + }; + + bus.on(OptionEvent, handler_a, { handler_concurrency: "parallel" }); + bus.on(OptionEvent, handler_b, { handler_concurrency: "parallel" }); + + const event = bus.dispatch(OptionEvent({})); + await sleep(0); + resolve(); + await event.done(); + await bus.waitUntilIdle(); + + assert.ok(max_in_flight >= 2); +}); + +test("precedence: event handler_concurrency overrides handler options to parallel", async () => { + const OverrideEvent = BaseEvent.extend("OverrideEventParallelHandlers", { + handler_concurrency: z.literal("parallel") + }); + const bus = new EventBus("OverrideParallelHandlersBus", { handler_concurrency: "bus-serial" }); + + let in_flight = 0; + let max_in_flight = 0; + const { promise, resolve } = withResolvers(); + + const handler = async () => { + in_flight += 1; + max_in_flight = Math.max(max_in_flight, in_flight); + await promise; + in_flight -= 1; + }; + + bus.on(OverrideEvent, handler, { handler_concurrency: "bus-serial" }); + bus.on(OverrideEvent, handler, { handler_concurrency: "bus-serial" }); + + const event = bus.dispatch(OverrideEvent({ handler_concurrency: "parallel" })); + await sleep(0); + resolve(); + await event.done(); + await bus.waitUntilIdle(); + + assert.ok(max_in_flight >= 2); +}); + +test("precedence: event event_concurrency overrides bus defaults to parallel", async () => { + const OverrideEvent = BaseEvent.extend("OverrideEventParallelEvents", { + event_concurrency: z.literal("parallel"), + order: z.number() + }); + const bus = new EventBus("OverrideParallelEventsBus", { + event_concurrency: "bus-serial", + handler_concurrency: "parallel" + }); + + let in_flight = 0; + let max_in_flight = 0; + const { promise, resolve } = withResolvers(); + + bus.on(OverrideEvent, async () => { + in_flight += 1; + max_in_flight = Math.max(max_in_flight, in_flight); + await promise; + in_flight -= 1; + }); + + bus.dispatch(OverrideEvent({ order: 0, event_concurrency: "parallel" })); + bus.dispatch(OverrideEvent({ order: 1, event_concurrency: "parallel" })); + + await sleep(0); + resolve(); + await bus.waitUntilIdle(); + + assert.ok(max_in_flight >= 2); +}); + +test("precedence: event event_concurrency overrides bus defaults to bus-serial", async () => { + const OverrideEvent = BaseEvent.extend("OverrideEventBusSerial", { + event_concurrency: z.literal("bus-serial"), + order: z.number() + }); + const bus = new EventBus("OverrideBusSerialEventsBus", { + event_concurrency: "parallel", + handler_concurrency: "parallel" + }); + + let in_flight = 0; + let max_in_flight = 0; + const { promise, resolve } = withResolvers(); + + bus.on(OverrideEvent, async () => { + in_flight += 1; + max_in_flight = Math.max(max_in_flight, in_flight); + await promise; + in_flight -= 1; + }); + + bus.dispatch(OverrideEvent({ order: 0, event_concurrency: "bus-serial" })); + bus.dispatch(OverrideEvent({ order: 1, event_concurrency: "bus-serial" })); + + await sleep(0); + assert.equal(max_in_flight, 1); + resolve(); + await bus.waitUntilIdle(); +}); + +test("global-serial + handler parallel: handlers overlap but events do not across buses", async () => { + const SerialParallelEvent = BaseEvent.extend("GlobalSerialParallelHandlers", {}); + + const bus_a = new EventBus("GlobalSerialParallelA", { + event_concurrency: "global-serial", + handler_concurrency: "parallel" + }); + const bus_b = new EventBus("GlobalSerialParallelB", { + event_concurrency: "global-serial", + handler_concurrency: "parallel" + }); + + let in_flight = 0; + let max_in_flight = 0; + const { promise, resolve } = withResolvers(); + + const handler = async () => { + in_flight += 1; + max_in_flight = Math.max(max_in_flight, in_flight); + await promise; + in_flight -= 1; + }; + + bus_a.on(SerialParallelEvent, handler); + bus_a.on(SerialParallelEvent, handler); + bus_b.on(SerialParallelEvent, handler); + bus_b.on(SerialParallelEvent, handler); + + bus_a.dispatch(SerialParallelEvent({})); + bus_b.dispatch(SerialParallelEvent({})); + + await sleep(0); + assert.equal(max_in_flight, 2); + resolve(); + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]); +}); + +test("event parallel + handler bus-serial: handlers serialize within a bus across events", async () => { + const ParallelEvent = BaseEvent.extend("ParallelEventsSerialHandlers", { order: z.number() }); + const bus = new EventBus("ParallelEventsSerialHandlersBus", { + event_concurrency: "parallel", + handler_concurrency: "bus-serial" + }); + + let in_flight = 0; + let max_in_flight = 0; + const { promise, resolve } = withResolvers(); + + bus.on(ParallelEvent, async () => { + in_flight += 1; + max_in_flight = Math.max(max_in_flight, in_flight); + await promise; + in_flight -= 1; + }); + + bus.dispatch(ParallelEvent({ order: 0 })); + bus.dispatch(ParallelEvent({ order: 1 })); + + await sleep(0); + assert.equal(max_in_flight, 1); + resolve(); + await bus.waitUntilIdle(); +}); + +test("event parallel + handler bus-serial: handlers overlap across buses", async () => { + const ParallelEvent = BaseEvent.extend("ParallelEventsBusHandlers", { source: z.string() }); + + const bus_a = new EventBus("ParallelBusHandlersA", { + event_concurrency: "parallel", + handler_concurrency: "bus-serial" + }); + const bus_b = new EventBus("ParallelBusHandlersB", { + event_concurrency: "parallel", + handler_concurrency: "bus-serial" + }); + + let in_flight = 0; + let max_in_flight = 0; + const { promise, resolve } = withResolvers(); + + const handler = async () => { + in_flight += 1; + max_in_flight = Math.max(max_in_flight, in_flight); + await promise; + in_flight -= 1; + }; + + bus_a.on(ParallelEvent, handler); + bus_b.on(ParallelEvent, handler); + + bus_a.dispatch(ParallelEvent({ source: "a" })); + bus_b.dispatch(ParallelEvent({ source: "b" })); + + await sleep(0); + assert.ok(max_in_flight >= 2); + resolve(); + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]); +}); + +test("handler options can enforce global-serial even when bus defaults to parallel", async () => { + const HandlerEvent = BaseEvent.extend("HandlerOptionsGlobalSerial", { source: z.string() }); + + const bus_a = new EventBus("HandlerOptionsGlobalA", { + event_concurrency: "parallel", + handler_concurrency: "parallel" + }); + const bus_b = new EventBus("HandlerOptionsGlobalB", { + event_concurrency: "parallel", + handler_concurrency: "parallel" + }); + + let in_flight = 0; + let max_in_flight = 0; + const { promise, resolve } = withResolvers(); + + const handler = async () => { + in_flight += 1; + max_in_flight = Math.max(max_in_flight, in_flight); + await promise; + in_flight -= 1; + }; + + bus_a.on(HandlerEvent, handler, { handler_concurrency: "global-serial" }); + bus_b.on(HandlerEvent, handler, { handler_concurrency: "global-serial" }); + + bus_a.dispatch(HandlerEvent({ source: "a" })); + bus_b.dispatch(HandlerEvent({ source: "b" })); + + await sleep(0); + assert.equal(max_in_flight, 1); + resolve(); + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]); +}); + +test("auto: event_concurrency auto resolves to bus defaults", async () => { + const AutoEvent = BaseEvent.extend("AutoEvent", { + event_concurrency: z.literal("auto") + }); + const bus = new EventBus("AutoBus", { event_concurrency: "bus-serial" }); + + let in_flight = 0; + let max_in_flight = 0; + + bus.on(AutoEvent, async () => { + in_flight += 1; + max_in_flight = Math.max(max_in_flight, in_flight); + await sleep(5); + in_flight -= 1; + }); + + bus.dispatch(AutoEvent({ event_concurrency: "auto" })); + bus.dispatch(AutoEvent({ event_concurrency: "auto" })); + + await bus.waitUntilIdle(); + assert.equal(max_in_flight, 1); +}); + +test("auto: handler_concurrency auto resolves to bus defaults", async () => { + const AutoHandlerEvent = BaseEvent.extend("AutoHandlerEvent", { + handler_concurrency: z.literal("auto") + }); + const bus = new EventBus("AutoHandlerBus", { handler_concurrency: "bus-serial" }); + + let in_flight = 0; + let max_in_flight = 0; + const { promise, resolve } = withResolvers(); + + const handler = async () => { + in_flight += 1; + max_in_flight = Math.max(max_in_flight, in_flight); + await promise; + in_flight -= 1; + }; + + bus.on(AutoHandlerEvent, handler); + bus.on(AutoHandlerEvent, handler); + + const event = bus.dispatch(AutoHandlerEvent({ handler_concurrency: "auto" })); + await sleep(0); + resolve(); + await event.done(); + await bus.waitUntilIdle(); + + assert.equal(max_in_flight, 1); +}); + +test("queue-jump: awaited child preempts queued sibling on same bus", async () => { + const ParentEvent = BaseEvent.extend("QueueJumpParent", {}); + const ChildEvent = BaseEvent.extend("QueueJumpChild", {}); + const SiblingEvent = BaseEvent.extend("QueueJumpSibling", {}); + + const bus = new EventBus("QueueJumpBus", { event_concurrency: "bus-serial" }); + const order: string[] = []; + + bus.on(ChildEvent, async () => { + order.push("child_start"); + await sleep(5); + order.push("child_end"); + }); + + bus.on(SiblingEvent, async () => { + order.push("sibling_start"); + await sleep(1); + order.push("sibling_end"); + }); + + bus.on(ParentEvent, async (event) => { + order.push("parent_start"); + bus.emit(SiblingEvent({})); + const child = event.bus?.emit(ChildEvent({}))!; + order.push("child_dispatched"); + await child.done(); + order.push("child_awaited"); + order.push("parent_end"); + }); + + const parent = bus.dispatch(ParentEvent({})); + await parent.done(); + await bus.waitUntilIdle(); + + const child_start_idx = order.indexOf("child_start"); + const child_end_idx = order.indexOf("child_end"); + const sibling_start_idx = order.indexOf("sibling_start"); + + assert.ok(child_start_idx !== -1); + assert.ok(child_end_idx !== -1); + assert.ok(sibling_start_idx !== -1); + assert.ok(child_start_idx < sibling_start_idx); + assert.ok(child_end_idx < sibling_start_idx); +}); + +test("queue-jump: awaiting in-flight event does not double-run handlers", async () => { + const InFlightEvent = BaseEvent.extend("InFlightEvent", {}); + const bus = new EventBus("InFlightBus", { + event_concurrency: "parallel", + handler_concurrency: "parallel" + }); + + let handler_runs = 0; + let resolve_started: (() => void) | null = null; + const started = new Promise((resolve) => { + resolve_started = resolve; + }); + const { promise: release_child, resolve: resolve_child } = withResolvers(); + + bus.on(InFlightEvent, async () => { + handler_runs += 1; + if (resolve_started) { + resolve_started(); + resolve_started = null; + } + await release_child; + }); + + const child = bus.dispatch(InFlightEvent({})); + await started; + + let done_resolved = false; + const done_promise = child.done().then(() => { + done_resolved = true; + }); + + await sleep(0); + assert.equal(done_resolved, false); + + resolve_child(); + await done_promise; + await bus.waitUntilIdle(); + + assert.equal(handler_runs, 1); +}); + +test("edge-case: event with no handlers completes immediately", async () => { + const NoHandlerEvent = BaseEvent.extend("NoHandlerEvent", {}); + const bus = new EventBus("NoHandlerBus"); + + const event = bus.dispatch(NoHandlerEvent({})); + await event.done(); + await bus.waitUntilIdle(); + + assert.equal(event.event_status, "completed"); + assert.equal(event.event_pending_buses, 0); +}); + +test("fifo: forwarded events preserve order on target bus (bus-serial)", async () => { + const OrderedEvent = BaseEvent.extend("ForwardOrderEvent", { order: z.number() }); + + const bus_a = new EventBus("ForwardOrderA", { event_concurrency: "bus-serial" }); + const bus_b = new EventBus("ForwardOrderB", { event_concurrency: "bus-serial" }); + + const order_a: number[] = []; + const order_b: number[] = []; + + bus_a.on(OrderedEvent, async (event) => { + order_a.push(event.order); + bus_b.dispatch(event); + await sleep(2); + }); + + bus_b.on(OrderedEvent, async (event) => { + const bus_b_results = Array.from(event.event_results.values()).filter( + (result) => result.eventbus_name === "ForwardOrderB" + ); + const in_flight = bus_b_results.filter( + (result) => result.status === "pending" || result.status === "started" + ); + assert.ok(in_flight.length <= 1); + order_b.push(event.order); + await sleep(1); + }); + + for (let i = 0; i < 5; i += 1) { + bus_a.dispatch(OrderedEvent({ order: i })); + } + + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]); + + const history_orders = bus_b.event_history.map((event) => (event as { order?: number }).order); + const results_sizes = bus_b.event_history.map((event) => event.event_results.size); + const bus_b_result_counts = bus_b.event_history.map((event) => + Array.from(event.event_results.values()).filter( + (result) => result.eventbus_name === "ForwardOrderB" + ).length + ); + const processed_flags = bus_b.event_history.map((event) => + Array.from(event.event_results.values()) + .filter((result) => result.eventbus_name === "ForwardOrderB") + .every((result) => result.status === "completed" || result.status === "error") + ); + const pending_counts = bus_b.event_history.map( + (event) => Array.from(event.event_results.values()).filter((result) => result.status === "pending").length + ); + assert.deepEqual(order_a, [0, 1, 2, 3, 4]); + assert.deepEqual(order_b, [0, 1, 2, 3, 4]); + assert.deepEqual(history_orders, [0, 1, 2, 3, 4]); + assert.deepEqual(results_sizes, [2, 2, 2, 2, 2]); + assert.deepEqual(bus_b_result_counts, [1, 1, 1, 1, 1]); + assert.deepEqual(processed_flags, [true, true, true, true, true]); + assert.deepEqual(pending_counts, [0, 0, 0, 0, 0]); +}); + +test("fifo: forwarded events preserve order across chained buses (bus-serial)", async () => { + const OrderedEvent = BaseEvent.extend("ForwardChainEvent", { order: z.number() }); + + const bus_a = new EventBus("ForwardChainA", { event_concurrency: "bus-serial" }); + const bus_b = new EventBus("ForwardChainB", { event_concurrency: "bus-serial" }); + const bus_c = new EventBus("ForwardChainC", { event_concurrency: "bus-serial" }); + + const order_c: number[] = []; + + bus_b.on(OrderedEvent, async () => { + await sleep(2); + }); + + bus_c.on(OrderedEvent, async (event) => { + order_c.push(event.order); + await sleep(1); + }); + + bus_a.on("*", bus_b.dispatch); + bus_b.on("*", bus_c.dispatch); + + for (let i = 0; i < 6; i += 1) { + bus_a.dispatch(OrderedEvent({ order: i })); + } + + await bus_a.waitUntilIdle(); + await bus_b.waitUntilIdle(); + await bus_c.waitUntilIdle(); + + assert.deepEqual(order_c, [0, 1, 2, 3, 4, 5]); +}); + +test("find: past returns most recent completed event (bus-scoped)", async () => { + const DebounceEvent = BaseEvent.extend("FindPastEvent", { value: z.number() }); + const bus = new EventBus("FindPastBus"); + + bus.on(DebounceEvent, async () => {}); + + bus.dispatch(DebounceEvent({ value: 1 })); + bus.dispatch(DebounceEvent({ value: 2 })); + + await bus.waitUntilIdle(); + + const found = await bus.find(DebounceEvent, { past: true, future: false }); + assert.ok(found); + assert.equal(found.value, 2); + assert.equal(found.event_status, "completed"); + assert.ok(found.bus); + assert.equal(found.bus.name, "FindPastBus"); + assert.equal(typeof found.bus.dispatch, "function"); +}); + +test("find: future returns in-flight event and done waits", async () => { + const DebounceEvent = BaseEvent.extend("FindFutureEvent", { value: z.number() }); + const bus = new EventBus("FindFutureBus"); + const { promise, resolve } = withResolvers(); + + bus.on(DebounceEvent, async () => { + await promise; + }); + + bus.dispatch(DebounceEvent({ value: 1 })); + + const found = await bus.find(DebounceEvent, { past: false, future: true }); + assert.ok(found); + assert.equal(found.value, 1); + assert.ok(found.event_status !== "completed"); + assert.ok(found.bus); + assert.equal(found.bus.name, "FindFutureBus"); + + resolve(); + const completed = await found.done(); + assert.equal(completed.event_status, "completed"); +}); + +test("find: future waits for next event when none in-flight", async () => { + const DebounceEvent = BaseEvent.extend("FindWaitEvent", { value: z.number() }); + const bus = new EventBus("FindWaitBus"); + + bus.on(DebounceEvent, async () => {}); + + setTimeout(() => { + bus.dispatch(DebounceEvent({ value: 99 })); + }, 10); + + const found = await bus.find(DebounceEvent, { past: false, future: 0.2 }); + assert.ok(found); + assert.equal(found.value, 99); + assert.ok(found.bus); + assert.equal(found.bus.name, "FindWaitBus"); + await found.done(); +}); + +test("find: most recent wins across completed and in-flight", async () => { + const DebounceEvent = BaseEvent.extend("FindMostRecentEvent", { value: z.number() }); + const bus = new EventBus("FindMostRecentBus"); + const { promise, resolve } = withResolvers(); + + bus.on(DebounceEvent, async (event) => { + if (event.value === 2) { + await promise; + } + }); + + bus.dispatch(DebounceEvent({ value: 1 })); + await bus.waitUntilIdle(); + + bus.dispatch(DebounceEvent({ value: 2 })); + + const found = await bus.find(DebounceEvent, { past: true, future: true }); + assert.ok(found); + assert.equal(found.value, 2); + assert.ok(found.event_status !== "completed"); + + resolve(); + await found.done(); +}); diff --git a/bubus-ts/tests/timeout.test.ts b/bubus-ts/tests/timeout.test.ts index 3a1ed45..5d72b9f 100644 --- a/bubus-ts/tests/timeout.test.ts +++ b/bubus-ts/tests/timeout.test.ts @@ -1,7 +1,12 @@ import assert from "node:assert/strict"; import { test } from "node:test"; -import { BaseEvent, EventBus, EventHandlerTimeoutError } from "../src/index.js"; +import { + BaseEvent, + EventBus, + EventHandlerCancelledError, + EventHandlerTimeoutError +} from "../src/index.js"; const TimeoutEvent = BaseEvent.extend("TimeoutEvent", {}); @@ -41,3 +46,471 @@ test("handler completes within timeout", async () => { assert.equal(result.status, "completed"); assert.equal(result.result, "fast"); }); + +test("handler timeouts fire across concurrency modes", async () => { + const modes = ["global-serial", "bus-serial", "parallel"] as const; + + for (const event_mode of modes) { + for (const handler_mode of modes) { + const bus = new EventBus(`Timeout-${event_mode}-${handler_mode}`, { + event_concurrency: event_mode, + handler_concurrency: handler_mode + }); + + bus.on(TimeoutEvent, async () => { + await delay(50); + return "slow"; + }); + + const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.01 })); + await event.done(); + + const result = Array.from(event.event_results.values())[0]; + assert.equal( + result.status, + "error", + `Expected timeout error for event=${event_mode} handler=${handler_mode}` + ); + assert.ok( + result.error instanceof EventHandlerTimeoutError, + `Expected EventHandlerTimeoutError for event=${event_mode} handler=${handler_mode}` + ); + + await bus.waitUntilIdle(); + } + } +}); + +test("timeout still marks event failed when other handlers finish", async () => { + const bus = new EventBus("TimeoutParallelHandlers", { + event_concurrency: "parallel", + handler_concurrency: "parallel" + }); + + const results: string[] = []; + + bus.on(TimeoutEvent, async () => { + await delay(1); + results.push("fast"); + return "fast"; + }); + + bus.on(TimeoutEvent, async () => { + await delay(50); + results.push("slow"); + return "slow"; + }); + + const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.01 })); + await event.done(); + + const statuses = Array.from(event.event_results.values()).map((result) => result.status); + assert.ok(statuses.includes("completed")); + assert.ok(statuses.includes("error")); + assert.equal(event.event_status, "completed"); + assert.ok(event.event_errors.length > 0); + assert.ok(results.includes("fast")); +}); + +test("deadlock warning triggers when event exceeds timeout", async () => { + const bus = new EventBus("DeadlockWarnBus"); + const warnings: string[] = []; + const original_warn = console.warn; + console.warn = (message?: unknown, ...args: unknown[]) => { + warnings.push(String(message)); + if (args.length > 0) { + warnings.push(args.map(String).join(" ")); + } + }; + + try { + bus.on(TimeoutEvent, async () => { + await new Promise(() => { + // never resolve + }); + }); + + const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.01 })); + await event.done(); + } finally { + console.warn = original_warn; + } + + assert.ok( + warnings.some((message) => message.includes("Possible deadlock")), + "Expected deadlock warning" + ); +}); + +test("slow handler warning fires when handler runs long", async () => { + const bus = new EventBus("SlowHandlerWarnBus"); + const warnings: string[] = []; + const original_warn = console.warn; + const original_set_timeout = global.setTimeout; + const original_clear_timeout = global.clearTimeout; + + console.warn = (message?: unknown, ...args: unknown[]) => { + warnings.push(String(message)); + if (args.length > 0) { + warnings.push(args.map(String).join(" ")); + } + }; + + // Force the slow-handler warning timer to fire immediately + global.setTimeout = ((callback: (...args: unknown[]) => void, delay?: number, ...args: unknown[]) => { + if (delay === 15000) { + return original_set_timeout(callback, 0, ...args); + } + return original_set_timeout(callback, delay as number, ...args); + }) as typeof setTimeout; + + global.clearTimeout = ((timeout: ReturnType) => { + return original_clear_timeout(timeout); + }) as typeof clearTimeout; + + try { + bus.on(TimeoutEvent, async () => { + await delay(5); + return "ok"; + }); + + const event = bus.dispatch(TimeoutEvent({ event_timeout: null })); + await event.done(); + } finally { + console.warn = original_warn; + global.setTimeout = original_set_timeout; + global.clearTimeout = original_clear_timeout; + } + + assert.ok( + warnings.some((message) => message.includes("Slow handler")), + "Expected slow handler warning" + ); +}); + +test("event-level concurrency overrides do not bypass timeouts", async () => { + const bus = new EventBus("TimeoutEventOverrideBus", { + event_concurrency: "global-serial", + handler_concurrency: "global-serial" + }); + + bus.on(TimeoutEvent, async () => { + await delay(50); + return "slow"; + }); + + const event = bus.dispatch( + TimeoutEvent({ + event_timeout: 0.01, + event_concurrency: "parallel", + handler_concurrency: "parallel" + }) + ); + await event.done(); + + const result = Array.from(event.event_results.values())[0]; + assert.equal(result.status, "error"); + assert.ok(result.error instanceof EventHandlerTimeoutError); +}); + +test("handler-level concurrency overrides do not bypass timeouts", async () => { + const bus = new EventBus("TimeoutHandlerOverrideBus", { + event_concurrency: "parallel", + handler_concurrency: "global-serial" + }); + + const order: string[] = []; + + bus.on( + TimeoutEvent, + async () => { + order.push("slow_start"); + await delay(50); + order.push("slow_end"); + return "slow"; + }, + { handler_concurrency: "bus-serial" } + ); + + bus.on( + TimeoutEvent, + async () => { + order.push("fast_start"); + await delay(1); + order.push("fast_end"); + return "fast"; + }, + { handler_concurrency: "parallel" } + ); + + const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.01 })); + await event.done(); + + const statuses = Array.from(event.event_results.values()).map((result) => result.status); + assert.ok(statuses.includes("error")); + assert.ok(statuses.includes("completed")); + assert.ok(order.includes("fast_start")); +}); + +test("forwarded event timeouts apply across buses", async () => { + const bus_a = new EventBus("TimeoutForwardA", { event_concurrency: "bus-serial" }); + const bus_b = new EventBus("TimeoutForwardB", { event_concurrency: "bus-serial" }); + + bus_a.on(TimeoutEvent, async (event) => { + bus_b.dispatch(event); + }); + + bus_b.on(TimeoutEvent, async () => { + await delay(50); + return "slow"; + }); + + const event = bus_a.dispatch(TimeoutEvent({ event_timeout: 0.01 })); + await event.done(); + + const results = Array.from(event.event_results.values()); + const bus_b_result = results.find((result) => result.eventbus_name === "TimeoutForwardB"); + assert.ok(bus_b_result); + assert.equal(bus_b_result?.status, "error"); + assert.ok(bus_b_result?.error instanceof EventHandlerTimeoutError); +}); + +test("queue-jump awaited child timeouts still fire across buses", async () => { + const ParentEvent = BaseEvent.extend("TimeoutParentEvent", {}); + const ChildEvent = BaseEvent.extend("TimeoutChildEvent", {}); + + const bus_a = new EventBus("TimeoutQueueJumpA", { event_concurrency: "global-serial" }); + const bus_b = new EventBus("TimeoutQueueJumpB", { event_concurrency: "global-serial" }); + + let child_ref: InstanceType | null = null; + + bus_b.on(ChildEvent, async () => { + await delay(50); + return "slow"; + }); + + bus_a.on(ParentEvent, async () => { + const child = bus_b.dispatch(ChildEvent({ event_timeout: 0.01 })); + child_ref = child; + await child.done(); + }); + + const parent = bus_a.dispatch(ParentEvent({ event_timeout: 0.5 })); + await parent.done(); + + assert.ok(child_ref); + const child_results = Array.from(child_ref!.event_results.values()); + const timeout_result = child_results.find( + (result) => result.error instanceof EventHandlerTimeoutError + ); + assert.ok(timeout_result); +}); + +test("parent timeout cancels pending child handler results under serial handler limiter", async () => { + const ParentEvent = BaseEvent.extend("TimeoutCancelParentEvent", {}); + const ChildEvent = BaseEvent.extend("TimeoutCancelChildEvent", {}); + + const bus = new EventBus("TimeoutCancelBus", { + event_concurrency: "bus-serial", + handler_concurrency: "bus-serial" + }); + + let child_runs = 0; + + bus.on(ChildEvent, async () => { + child_runs += 1; + await delay(30); + return "first"; + }); + + bus.on(ChildEvent, async () => { + child_runs += 1; + await delay(10); + return "second"; + }); + + bus.on(ParentEvent, async (event) => { + event.bus?.emit(ChildEvent({ event_timeout: 0.2 })); + await delay(50); + }); + + const parent = bus.dispatch(ParentEvent({ event_timeout: 0.01 })); + await parent.done(); + await bus.waitUntilIdle(); + + const child = parent.event_children[0]; + assert.ok(child); + + assert.equal(child_runs, 0); + + const cancelled_results = Array.from(child.event_results.values()).filter( + (result) => result.error instanceof EventHandlerCancelledError + ); + assert.ok(cancelled_results.length > 0); +}); + +test("event_timeout null falls back to bus default", async () => { + const bus = new EventBus("TimeoutDefaultBus", { event_timeout: 0.01 }); + + bus.on(TimeoutEvent, async () => { + await delay(50); + return "slow"; + }); + + const event = bus.dispatch(TimeoutEvent({ event_timeout: null })); + await event.done(); + + const result = Array.from(event.event_results.values())[0]; + assert.equal(result.status, "error"); + assert.ok(result.error instanceof EventHandlerTimeoutError); +}); + +test("bus default null disables timeouts when event_timeout is null", async () => { + const bus = new EventBus("TimeoutDisabledBus", { event_timeout: null }); + + bus.on(TimeoutEvent, async () => { + await delay(20); + return "ok"; + }); + + const event = bus.dispatch(TimeoutEvent({ event_timeout: null })); + await event.done(); + + const result = Array.from(event.event_results.values())[0]; + assert.equal(result.status, "completed"); + assert.equal(result.result, "ok"); +}); + +test("multi-level timeout cascade with mixed cancellations", async () => { + const TopEvent = BaseEvent.extend("TimeoutCascadeTop", {}); + const QueuedChildEvent = BaseEvent.extend("TimeoutCascadeQueuedChild", {}); + const AwaitedChildEvent = BaseEvent.extend("TimeoutCascadeAwaitedChild", {}); + const ImmediateGrandchildEvent = BaseEvent.extend("TimeoutCascadeImmediateGrandchild", {}); + const QueuedGrandchildEvent = BaseEvent.extend("TimeoutCascadeQueuedGrandchild", {}); + + const bus = new EventBus("TimeoutCascadeBus", { + event_concurrency: "bus-serial", + handler_concurrency: "bus-serial" + }); + + let queued_child: InstanceType | null = null; + let awaited_child: InstanceType | null = null; + let immediate_grandchild: InstanceType | null = null; + let queued_grandchild: InstanceType | null = null; + + let queued_child_runs = 0; + let immediate_grandchild_runs = 0; + let queued_grandchild_runs = 0; + + const queued_child_fast = async () => { + queued_child_runs += 1; + await delay(5); + return "queued_fast"; + }; + + const queued_child_slow = async () => { + queued_child_runs += 1; + await delay(50); + return "queued_slow"; + }; + + const awaited_child_fast = async () => { + await delay(5); + return "awaited_fast"; + }; + + const awaited_child_slow = async (event: BaseEvent) => { + queued_grandchild = event.bus?.emit( + QueuedGrandchildEvent({ event_timeout: 0.2 }) + )!; + immediate_grandchild = event.bus?.emit( + ImmediateGrandchildEvent({ event_timeout: 0.2 }) + )!; + await immediate_grandchild.done(); + await delay(100); + return "awaited_slow"; + }; + + const immediate_grandchild_slow = async () => { + immediate_grandchild_runs += 1; + await delay(50); + return "immediate_grandchild_slow"; + }; + + const immediate_grandchild_fast = async () => { + immediate_grandchild_runs += 1; + await delay(10); + return "immediate_grandchild_fast"; + }; + + const queued_grandchild_slow = async () => { + queued_grandchild_runs += 1; + await delay(50); + return "queued_grandchild_slow"; + }; + + const queued_grandchild_fast = async () => { + queued_grandchild_runs += 1; + await delay(10); + return "queued_grandchild_fast"; + }; + + bus.on(QueuedChildEvent, queued_child_fast); + bus.on(QueuedChildEvent, queued_child_slow); + bus.on(AwaitedChildEvent, awaited_child_fast); + bus.on(AwaitedChildEvent, awaited_child_slow); + bus.on(ImmediateGrandchildEvent, immediate_grandchild_slow); + bus.on(ImmediateGrandchildEvent, immediate_grandchild_fast); + bus.on(QueuedGrandchildEvent, queued_grandchild_slow); + bus.on(QueuedGrandchildEvent, queued_grandchild_fast); + + bus.on(TopEvent, async (event) => { + queued_child = event.bus?.emit(QueuedChildEvent({ event_timeout: 0.2 }))!; + awaited_child = event.bus?.emit(AwaitedChildEvent({ event_timeout: 0.03 }))!; + await awaited_child.done(); + await delay(80); + }); + + const top = bus.dispatch(TopEvent({ event_timeout: 0.04 })); + await top.done(); + await bus.waitUntilIdle(); + + const top_result = Array.from(top.event_results.values())[0]; + assert.equal(top_result.status, "error"); + assert.ok(top_result.error instanceof EventHandlerTimeoutError); + + assert.ok(queued_child); + const queued_results = Array.from(queued_child!.event_results.values()); + assert.equal(queued_child_runs, 0); + assert.ok(queued_results.length >= 2); + for (const result of queued_results) { + assert.equal(result.status, "error"); + assert.ok(result.error instanceof EventHandlerCancelledError); + assert.ok( + (result.error as EventHandlerCancelledError).parent_error instanceof EventHandlerTimeoutError + ); + } + + assert.ok(awaited_child); + const awaited_results = Array.from(awaited_child!.event_results.values()); + const awaited_completed = awaited_results.filter((result) => result.status === "completed"); + const awaited_timeouts = awaited_results.filter( + (result) => result.error instanceof EventHandlerTimeoutError + ); + assert.equal(awaited_completed.length, 1); + assert.equal(awaited_timeouts.length, 1); + + assert.ok(immediate_grandchild); + const immediate_results = Array.from(immediate_grandchild!.event_results.values()); + assert.equal(immediate_grandchild_runs, 2); + const immediate_completed = immediate_results.filter((result) => result.status === "completed"); + assert.equal(immediate_completed.length, 2); + + assert.ok(queued_grandchild); + const queued_grandchild_results = Array.from(queued_grandchild!.event_results.values()); + assert.equal(queued_grandchild_runs, 0); + const queued_cancelled = queued_grandchild_results.filter( + (result) => result.error instanceof EventHandlerCancelledError + ); + assert.ok(queued_cancelled.length >= 2); +}); diff --git a/bubus-ts/tests/typed_results.test.ts b/bubus-ts/tests/typed_results.test.ts new file mode 100644 index 0000000..36b568b --- /dev/null +++ b/bubus-ts/tests/typed_results.test.ts @@ -0,0 +1,195 @@ +import assert from "node:assert/strict"; +import { test } from "node:test"; + +import { z } from "zod"; + +import { BaseEvent, EventBus } from "../src/index.js"; + +const typed_result_schema = z.object({ + value: z.string(), + count: z.number() +}); + +const TypedResultEvent = BaseEvent.extend("TypedResultEvent", { + event_result_schema: typed_result_schema, + event_result_type: "TypedResult" +}); + +const StringResultEvent = BaseEvent.extend("StringResultEvent", { + event_result_schema: z.string(), + event_result_type: "string" +}); + +const NumberResultEvent = BaseEvent.extend("NumberResultEvent", { + event_result_schema: z.number(), + event_result_type: "number" +}); + +const ComplexResultEvent = BaseEvent.extend("ComplexResultEvent", { + event_result_schema: z.object({ + items: z.array(z.string()), + metadata: z.record(z.string(), z.number()) + }) +}); + +const NoSchemaEvent = BaseEvent.extend("NoSchemaEvent", {}); + +test("typed result schema validates and parses handler result", async () => { + const bus = new EventBus("TypedResultBus"); + + bus.on(TypedResultEvent, () => ({ value: "hello", count: 42 })); + + const event = bus.dispatch(TypedResultEvent({})); + await event.done(); + + const result = Array.from(event.event_results.values())[0]; + assert.equal(result.status, "completed"); + assert.deepEqual(result.result, { value: "hello", count: 42 }); + assert.equal(event.event_result_type, "TypedResult"); +}); + +test("built-in result schemas validate handler results", async () => { + const bus = new EventBus("BuiltinResultBus"); + + bus.on(StringResultEvent, () => "42"); + bus.on(NumberResultEvent, () => 123); + + const string_event = bus.dispatch(StringResultEvent({})); + const number_event = bus.dispatch(NumberResultEvent({})); + await string_event.done(); + await number_event.done(); + + const string_result = Array.from(string_event.event_results.values())[0]; + const number_result = Array.from(number_event.event_results.values())[0]; + + assert.equal(string_result.status, "completed"); + assert.equal(string_result.result, "42"); + assert.equal(number_result.status, "completed"); + assert.equal(number_result.result, 123); +}); + +test("invalid handler result marks error when schema is defined", async () => { + const bus = new EventBus("ResultValidationErrorBus"); + + bus.on(NumberResultEvent, () => "not_a_number"); + + const event = bus.dispatch(NumberResultEvent({})); + await event.done(); + + const result = Array.from(event.event_results.values())[0]; + assert.equal(result.status, "error"); + assert.ok(result.error instanceof Error); + assert.ok(event.event_errors.length > 0); +}); + +test("no schema leaves raw handler result untouched", async () => { + const bus = new EventBus("NoSchemaResultBus"); + + bus.on(NoSchemaEvent, () => ({ raw: true })); + + const event = bus.dispatch(NoSchemaEvent({})); + await event.done(); + + const result = Array.from(event.event_results.values())[0]; + assert.equal(result.status, "completed"); + assert.deepEqual(result.result, { raw: true }); +}); + +test("complex result schema validates nested data", async () => { + const bus = new EventBus("ComplexResultBus"); + + bus.on(ComplexResultEvent, () => ({ + items: ["a", "b"], + metadata: { a: 1, b: 2 } + })); + + const event = bus.dispatch(ComplexResultEvent({})); + await event.done(); + + const result = Array.from(event.event_results.values())[0]; + assert.equal(result.status, "completed"); + assert.deepEqual(result.result, { items: ["a", "b"], metadata: { a: 1, b: 2 } }); +}); + +test("fromJSON converts event_result_schema into zod schema", async () => { + const bus = new EventBus("FromJsonResultBus"); + + const original = TypedResultEvent({ + event_result_schema: typed_result_schema, + event_result_type: "TypedResult" + }); + const json = original.toJSON(); + + const restored = TypedResultEvent.fromJSON?.(json) ?? TypedResultEvent(json as never); + + assert.ok(restored.event_result_schema); + assert.equal(typeof (restored.event_result_schema as { safeParse?: unknown }).safeParse, "function"); + + bus.on(TypedResultEvent, () => ({ value: "from-json", count: 7 })); + + const dispatched = bus.dispatch(restored); + await dispatched.done(); + + const result = Array.from(dispatched.event_results.values())[0]; + assert.equal(result.status, "completed"); + assert.deepEqual(result.result, { value: "from-json", count: 7 }); +}); + +test("roundtrip preserves complex result schema types", async () => { + const bus = new EventBus("RoundtripSchemaBus"); + + const complex_schema = z.object({ + title: z.string(), + count: z.number(), + flags: z.array(z.boolean()), + active: z.boolean(), + meta: z.object({ + tags: z.array(z.string()), + rating: z.number() + }) + }); + + const ComplexRoundtripEvent = BaseEvent.extend("ComplexRoundtripEvent", { + event_result_schema: complex_schema, + event_result_type: "ComplexRoundtrip" + }); + + const original = ComplexRoundtripEvent({ + event_result_schema: complex_schema, + event_result_type: "ComplexRoundtrip" + }); + + const roundtripped = + ComplexRoundtripEvent.fromJSON?.(original.toJSON()) ?? + ComplexRoundtripEvent(original.toJSON() as never); + + const zod_any = z as unknown as { + toJSONSchema?: (schema: unknown) => unknown; + }; + if (typeof zod_any.toJSONSchema === "function") { + const original_schema_json = zod_any.toJSONSchema(complex_schema); + const roundtrip_schema_json = zod_any.toJSONSchema(roundtripped.event_result_schema); + assert.deepEqual(roundtrip_schema_json, original_schema_json); + } + + bus.on(ComplexRoundtripEvent, () => ({ + title: "ok", + count: 3, + flags: [true, false, true], + active: false, + meta: { tags: ["a", "b"], rating: 4 } + })); + + const dispatched = bus.dispatch(roundtripped); + await dispatched.done(); + + const result = Array.from(dispatched.event_results.values())[0]; + assert.equal(result.status, "completed"); + assert.deepEqual(result.result, { + title: "ok", + count: 3, + flags: [true, false, true], + active: false, + meta: { tags: ["a", "b"], rating: 4 } + }); +}); From b678e793d1e53a7a1f53b7b09e51daacee32b7b1 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Thu, 5 Feb 2026 16:19:26 -0800 Subject: [PATCH 46/79] more queue jumping fixes --- bubus-ts/src/event_bus.ts | 33 +++++++++++++++++++++++++++++---- bubus-ts/src/event_result.ts | 2 ++ 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/bubus-ts/src/event_bus.ts b/bubus-ts/src/event_bus.ts index a996644..666eae0 100644 --- a/bubus-ts/src/event_bus.ts +++ b/bubus-ts/src/event_bus.ts @@ -368,8 +368,15 @@ export class EventBus { }); } - async _runImmediately(event: T): Promise { + async _runImmediately( + event: T, + handler_result?: EventResult + ): Promise { const original_event = event._original_event ?? event; + if (handler_result && !handler_result.queue_jump_hold) { + handler_result.queue_jump_hold = true; + this.immediate_processing_stack_depth += 1; + } if (original_event.event_status === "completed") { return event; } @@ -603,7 +610,7 @@ export class EventBus { 0, bus.immediate_processing_stack_depth - 1 ); - bus.releaseRunNowWaiters(); + bus.releaseImmediateProcessingWaiters(); } } } @@ -638,7 +645,7 @@ export class EventBus { return ordered; } - private releaseRunNowWaiters(): void { + private releaseImmediateProcessingWaiters(): void { if ( this.immediate_processing_stack_depth !== 0 || this.immediate_processing_waiters.length === 0 @@ -882,6 +889,14 @@ export class EventBus { 0, EventBus.global_inside_handler_depth - 1 ); + if (result.queue_jump_hold) { + result.queue_jump_hold = false; + this.immediate_processing_stack_depth = Math.max( + 0, + this.immediate_processing_stack_depth - 1 + ); + this.releaseImmediateProcessingWaiters(); + } } }); } @@ -993,6 +1008,15 @@ export class EventBus { const handler_id = handler_result?.handler_id; const bus_proxy = new Proxy(bus, { get(target, prop, receiver) { + if (prop === "_runImmediately") { + return (child_event: BaseEvent) => { + const runner = Reflect.get(target, prop, receiver) as ( + event: BaseEvent, + handler_result?: EventResult + ) => Promise; + return runner.call(target, child_event, handler_result); + }; + } if (prop === "dispatch" || prop === "emit") { return (child_event: BaseEvent, event_key?: EventKey) => { const original_child = child_event._original_event ?? child_event; @@ -1006,7 +1030,8 @@ export class EventBus { event: BaseEvent, event_key?: EventKey ) => BaseEvent; - return dispatcher.call(target, original_child, event_key); + const dispatched = dispatcher.call(target, original_child, event_key); + return target._getBusScopedEvent(dispatched, handler_result); }; } return Reflect.get(target, prop, receiver); diff --git a/bubus-ts/src/event_result.ts b/bubus-ts/src/event_result.ts index cc74016..3b34402 100644 --- a/bubus-ts/src/event_result.ts +++ b/bubus-ts/src/event_result.ts @@ -17,6 +17,7 @@ export class EventResult { result?: unknown; error?: unknown; event_children: BaseEvent[]; + queue_jump_hold: boolean; constructor(params: { event_id: string; @@ -33,6 +34,7 @@ export class EventResult { this.handler_file_path = params.handler_file_path; this.eventbus_name = params.eventbus_name; this.event_children = []; + this.queue_jump_hold = false; } markStarted(): void { From 2e0a9d2dad13a232f1888d01a8d075ee457076b7 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Thu, 5 Feb 2026 16:20:36 -0800 Subject: [PATCH 47/79] add debug logging --- bubus-ts/tests/comprehensive_patterns.test.ts | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/bubus-ts/tests/comprehensive_patterns.test.ts b/bubus-ts/tests/comprehensive_patterns.test.ts index fc319bf..be89814 100644 --- a/bubus-ts/tests/comprehensive_patterns.test.ts +++ b/bubus-ts/tests/comprehensive_patterns.test.ts @@ -163,6 +163,7 @@ test("race condition stress", async () => { test("awaited child jumps queue without overshoot", async () => { const bus = new EventBus("TestBus", { max_history_size: 100 }); const execution_order: string[] = []; + const debug_order: Array<{ label: string; at: string }> = []; const Event1 = BaseEvent.extend("Event1", {}); const Event2 = BaseEvent.extend("Event2", {}); @@ -171,29 +172,39 @@ test("awaited child jumps queue without overshoot", async () => { const event1_handler = async (_event: BaseEvent): Promise => { execution_order.push("Event1_start"); + debug_order.push({ label: "Event1_start", at: new Date().toISOString() }); const child = _event.bus?.emit(LocalChildEvent({}))!; execution_order.push("Child_dispatched"); + debug_order.push({ label: "Child_dispatched", at: new Date().toISOString() }); await child.done(); execution_order.push("Child_await_returned"); + debug_order.push({ label: "Child_await_returned", at: new Date().toISOString() }); execution_order.push("Event1_end"); + debug_order.push({ label: "Event1_end", at: new Date().toISOString() }); return "event1_done"; }; const event2_handler = async (): Promise => { execution_order.push("Event2_start"); + debug_order.push({ label: "Event2_start", at: new Date().toISOString() }); execution_order.push("Event2_end"); + debug_order.push({ label: "Event2_end", at: new Date().toISOString() }); return "event2_done"; }; const event3_handler = async (): Promise => { execution_order.push("Event3_start"); + debug_order.push({ label: "Event3_start", at: new Date().toISOString() }); execution_order.push("Event3_end"); + debug_order.push({ label: "Event3_end", at: new Date().toISOString() }); return "event3_done"; }; const child_handler = async (): Promise => { execution_order.push("Child_start"); + debug_order.push({ label: "Child_start", at: new Date().toISOString() }); execution_order.push("Child_end"); + debug_order.push({ label: "Child_end", at: new Date().toISOString() }); return "child_done"; }; @@ -207,8 +218,11 @@ test("awaited child jumps queue without overshoot", async () => { const event_3 = bus.dispatch(Event3({})); await delay(0); + debug_order.push({ label: "after_delay_0", at: new Date().toISOString() }); await event_1.done(); + debug_order.push({ label: "after_event1_done", at: new Date().toISOString() }); + console.log("debug_order", debug_order); assert.ok(execution_order.includes("Child_start")); assert.ok(execution_order.includes("Child_end")); From 413d0cf3d4661b5a2165aca8625a7d4b8b1c522c Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Thu, 5 Feb 2026 22:07:25 -0800 Subject: [PATCH 48/79] wip --- bubus-ts/package.json | 2 +- bubus-ts/src/base_event.ts | 56 +- bubus-ts/src/event_bus.ts | 370 ++++++---- bubus-ts/src/event_result.ts | 23 + bubus-ts/src/semaphores.ts | 19 + bubus-ts/tests/comprehensive_patterns.test.ts | 638 +++++++++++++++++- bubus-ts/tests/debounce.test.ts | 2 +- bubus-ts/tests/error_handling.test.ts | 228 +++++++ bubus-ts/tests/event_bus_proxy.test.ts | 241 +++++++ bubus-ts/tests/eventbus_basics.test.ts | 513 ++++++++++++++ bubus-ts/tests/forwarding.test.ts | 63 ++ bubus-ts/tests/handlers.test.ts | 6 +- bubus-ts/tests/locking.test.ts | 15 +- bubus-ts/tests/log_tree.test.ts | 27 +- bubus-ts/tests/parent_child.test.ts | 6 +- bubus-ts/tests/performance.test.ts | 2 +- bubus-ts/tests/timeout.test.ts | 498 ++++++++++++++ 17 files changed, 2488 insertions(+), 221 deletions(-) create mode 100644 bubus-ts/tests/error_handling.test.ts create mode 100644 bubus-ts/tests/event_bus_proxy.test.ts create mode 100644 bubus-ts/tests/eventbus_basics.test.ts diff --git a/bubus-ts/package.json b/bubus-ts/package.json index e229ce8..441aa89 100644 --- a/bubus-ts/package.json +++ b/bubus-ts/package.json @@ -18,7 +18,7 @@ "lint": "eslint .", "format": "prettier --write .", "format:check": "prettier --check .", - "test": "node --test --import tsx tests/**/*.test.ts" + "test": "NODE_OPTIONS='--expose-gc' node --expose-gc --test --import tsx tests/**/*.test.ts" }, "keywords": [], "author": "", diff --git a/bubus-ts/src/base_event.ts b/bubus-ts/src/base_event.ts index c762999..4c8c1d8 100644 --- a/bubus-ts/src/base_event.ts +++ b/bubus-ts/src/base_event.ts @@ -3,8 +3,8 @@ import { v7 as uuidv7 } from "uuid"; import type { EventBus } from "./event_bus.js"; import { EventResult } from "./event_result.js"; -import type { ConcurrencyMode } from "./semaphores.js"; -import { CONCURRENCY_MODES } from "./semaphores.js"; +import type { ConcurrencyMode, Deferred } from "./semaphores.js"; +import { CONCURRENCY_MODES, withResolvers } from "./semaphores.js"; export const BaseEventSchema = z @@ -82,7 +82,6 @@ export class BaseEvent { event_result_schema?: z.ZodTypeAny; event_result_type?: string; event_results: Map; - event_children: BaseEvent[]; event_emitted_by_handler_id?: string; event_pending_buses: number; event_status: "pending" | "started" | "completed"; @@ -99,9 +98,7 @@ export class BaseEvent { static schema = BaseEventSchema; static event_type?: string; - _done_promise: Promise | null; - _done_resolve: ((event: this) => void) | null; - _done_reject: ((reason: unknown) => void) | null; + _done: Deferred | null; constructor(data: BaseEventInit> = {}) { const ctor = this.constructor as typeof BaseEvent & { @@ -143,11 +140,8 @@ export class BaseEvent { this.event_result_schema = event_result_schema; this.event_result_type = event_result_type; this.event_results = new Map(); - this.event_children = []; - this._done_promise = null; - this._done_resolve = null; - this._done_reject = null; + this._done = null; this._dispatch_context = undefined; } @@ -259,26 +253,39 @@ export class BaseEvent { return this.event_type; } + get event_children(): BaseEvent[] { + const children: BaseEvent[] = []; + const seen = new Set(); + for (const result of this.event_results.values()) { + for (const child of result.event_children) { + if (!seen.has(child.event_id)) { + seen.add(child.event_id); + children.push(child); + } + } + } + return children; + } + done(): Promise { if (!this.bus) { return Promise.reject(new Error("event has no bus attached")); } - const runner_bus = this.bus as { - _runImmediately: (event: BaseEvent) => Promise; - isInsideHandler: () => boolean; - }; if (this.event_status === "completed") { return Promise.resolve(this); } - if (runner_bus.isInsideHandler()) { - return runner_bus._runImmediately(this) as Promise; - } - return this.waitForCompletion(); + // Always delegate to _runImmediately β€” it walks up the parent event tree + // to determine whether we're inside a handler (works cross-bus). If no + // ancestor handler is in-flight, it falls back to waitForCompletion(). + const runner_bus = this.bus as { + _runImmediately: (event: BaseEvent) => Promise; + }; + return runner_bus._runImmediately(this) as Promise; } waitForCompletion(): Promise { this.ensureDonePromise(); - return this._done_promise as Promise; + return this._done!.promise; } markStarted(): void { @@ -296,9 +303,7 @@ export class BaseEvent { this.event_status = "completed"; this.event_completed_at = BaseEvent.nextIsoTimestamp(); this.ensureDonePromise(); - if (this._done_resolve) { - this._done_resolve(this as this); - } + this._done!.resolve(this); } markFailed(error: unknown): void { @@ -343,13 +348,10 @@ export class BaseEvent { } ensureDonePromise(): void { - if (this._done_promise) { + if (this._done) { return; } - this._done_promise = new Promise((resolve, reject) => { - this._done_resolve = resolve; - this._done_reject = reject; - }); + this._done = withResolvers(); } } diff --git a/bubus-ts/src/event_bus.ts b/bubus-ts/src/event_bus.ts index 666eae0..10f379a 100644 --- a/bubus-ts/src/event_bus.ts +++ b/bubus-ts/src/event_bus.ts @@ -7,7 +7,8 @@ import { type ConcurrencyMode, limiterForMode, resolveConcurrencyMode, - runWithLimiter + runWithLimiter, + withResolvers } from "./semaphores.js"; @@ -45,20 +46,6 @@ export class EventHandlerCancelledError extends Error { } } -const withResolvers = () => { - if (typeof Promise.withResolvers === "function") { - return Promise.withResolvers(); - } - - let resolve!: (value: T | PromiseLike) => void; - let reject!: (reason?: unknown) => void; - const promise = new Promise((resolve_fn, reject_fn) => { - resolve = resolve_fn; - reject = reject_fn; - }); - - return { promise, resolve, reject }; -}; import type { EventHandler, EventKey, FindOptions, HandlerOptions } from "./types.js"; type FindWaiter = { @@ -87,14 +74,53 @@ type EventBusOptions = { event_timeout?: number | null; }; +class EventBusInstanceRegistry { + private _refs = new Set>(); + private _lookup = new WeakMap>(); + private _gc = typeof FinalizationRegistry !== "undefined" + ? new FinalizationRegistry>((ref) => { this._refs.delete(ref); }) + : null; + + add(bus: EventBus): void { + const ref = new WeakRef(bus); + this._refs.add(ref); + this._lookup.set(bus, ref); + this._gc?.register(bus, ref, bus); + } + + delete(bus: EventBus): void { + const ref = this._lookup.get(bus); + if (!ref) return; + this._refs.delete(ref); + this._lookup.delete(bus); + this._gc?.unregister(bus); + } + + has(bus: EventBus): boolean { + return this._lookup.get(bus)?.deref() !== undefined; + } + + get size(): number { + let n = 0; + for (const ref of this._refs) ref.deref() ? n++ : this._refs.delete(ref); + return n; + } + + *[Symbol.iterator](): Iterator { + for (const ref of this._refs) { + const bus = ref.deref(); + if (bus) yield bus; else this._refs.delete(ref); + } + } +} + export class EventBus { - static instances: Set = new Set(); + static instances = new EventBusInstanceRegistry(); static global_event_limiter = new AsyncLimiter(1); static global_handler_limiter = new AsyncLimiter(1); - static global_inside_handler_depth = 0; static findEventById(event_id: string): BaseEvent | null { for (const bus of EventBus.instances) { - const event = bus.event_history_by_id.get(event_id); + const event = bus.event_history.get(event_id); if (event) { return event; } @@ -109,10 +135,8 @@ export class EventBus { event_timeout_default: number | null; bus_event_limiter: AsyncLimiter; bus_handler_limiter: AsyncLimiter; - handlers_by_key: Map>; - handlers_by_id: Map; - event_history: BaseEvent[]; - event_history_by_id: Map; + handlers: Map; + event_history: Map; pending_event_queue: BaseEvent[]; in_flight_event_ids: Set; runloop_running: boolean; @@ -129,6 +153,10 @@ export class EventBus { immediate_processing_stack_depth: number; // Runloop waiters that resume once immediate_processing_stack_depth returns to 0. immediate_processing_waiters: Array<() => void>; + // Stack of EventResults for handlers currently executing on this bus. + // Enables per-bus isInsideHandler() and gives _runImmediately access to the + // calling handler's result even when called on raw (non-proxied) events. + _event_result_stack: EventResult[]; constructor(name: string = "EventBus", options: EventBusOptions = {}) { this.name = name; @@ -140,10 +168,8 @@ export class EventBus { options.event_timeout === undefined ? 60 : options.event_timeout; this.bus_event_limiter = new AsyncLimiter(1); this.bus_handler_limiter = new AsyncLimiter(1); - this.handlers_by_key = new Map(); - this.handlers_by_id = new Map(); - this.event_history = []; - this.event_history_by_id = new Map(); + this.handlers = new Map(); + this.event_history = new Map(); this.pending_event_queue = []; this.in_flight_event_ids = new Set(); this.runloop_running = false; @@ -153,6 +179,7 @@ export class EventBus { this.find_waiters = new Set(); this.immediate_processing_stack_depth = 0; this.immediate_processing_waiters = []; + this._event_result_stack = []; EventBus.instances.add(this); @@ -160,6 +187,17 @@ export class EventBus { this.emit = this.emit.bind(this); } + destroy(): void { + EventBus.instances.delete(this); + this.handlers.clear(); + this.event_history.clear(); + this.pending_event_queue.length = 0; + this.in_flight_event_ids.clear(); + this.find_waiters.clear(); + this.idle_waiters.length = 0; + this.immediate_processing_waiters.length = 0; + } + on( event_key: EventKey | "*", handler: EventHandler, @@ -176,14 +214,7 @@ export class EventBus { handler_registered_at ); - let handler_ids = this.handlers_by_key.get(normalized_key); - if (!handler_ids) { - handler_ids = new Set(); - this.handlers_by_key.set(normalized_key, handler_ids); - } - handler_ids.add(handler_id); - - this.handlers_by_id.set(handler_id, { + this.handlers.set(handler_id, { id: handler_id, handler: handler as EventHandler, handler_name, @@ -194,26 +225,17 @@ export class EventBus { }); } - off(event_key: EventKey | "*", handler: EventHandler): void { + off(event_key: EventKey | "*", handler?: EventHandler | string): void { const normalized_key = this.normalizeEventKey(event_key); - const handler_ids = this.handlers_by_key.get(normalized_key); - if (!handler_ids || handler_ids.size === 0) { - return; - } - for (const handler_id of Array.from(handler_ids)) { - const entry = this.handlers_by_id.get(handler_id); - if (!entry) { - handler_ids.delete(handler_id); + const match_by_id = typeof handler === "string"; + for (const [handler_id, entry] of this.handlers) { + if (entry.event_key !== normalized_key) { continue; } - if (entry.handler === (handler as EventHandler)) { - handler_ids.delete(handler_id); - this.handlers_by_id.delete(handler_id); + if (handler === undefined || (match_by_id ? handler_id === handler : entry.handler === (handler as EventHandler))) { + this.handlers.delete(handler_id); } } - if (handler_ids.size === 0) { - this.handlers_by_key.delete(normalized_key); - } } private computeHandlerId( @@ -252,7 +274,7 @@ export class EventBus { } if (original_event.event_parent_id) { - const parent_event = this.event_history_by_id.get(original_event.event_parent_id); + const parent_event = this.event_history.get(original_event.event_parent_id); if (parent_event) { this.recordChildEvent( parent_event.event_id, @@ -262,8 +284,7 @@ export class EventBus { } } - this.event_history.push(original_event); - this.event_history_by_id.set(original_event.event_id, original_event); + this.event_history.set(original_event.event_id, original_event); this.trimHistory(); original_event.event_pending_buses += 1; @@ -325,8 +346,9 @@ export class EventBus { const cutoff_ms = past === true ? null : now_ms - Math.max(0, Number(past)) * 1000; - for (let i = this.event_history.length - 1; i >= 0; i -= 1) { - const event = this.event_history[i]; + const history_values = Array.from(this.event_history.values()); + for (let i = history_values.length - 1; i >= 0; i -= 1) { + const event = history_values[i]; if (!matches(event)) { continue; } @@ -368,30 +390,70 @@ export class EventBus { }); } + // Called when a handler does `await child.done()` β€” processes the child event + // immediately ("queue-jump") instead of waiting for the runloop to pick it up. + // + // Yield-and-reacquire: if the calling handler holds a handler concurrency limiter, + // we temporarily release it so child handlers on the same bus can acquire it + // (preventing deadlock for bus-serial/global-serial modes). We re-acquire after + // the child completes so the parent handler can continue with the limiter held. async _runImmediately( event: T, handler_result?: EventResult ): Promise { const original_event = event._original_event ?? event; - if (handler_result && !handler_result.queue_jump_hold) { - handler_result.queue_jump_hold = true; + // Find the parent handler's result: prefer the proxy-provided one (only if + // the handler is still running), then this bus's stack, then walk up the + // parent event tree (cross-bus case). If none found, we're not inside a + // handler and should fall back to waitForCompletion. + const proxy_result = handler_result?.status === "started" ? handler_result : undefined; + const effective_result = proxy_result + ?? this._event_result_stack[this._event_result_stack.length - 1] + ?? this._findInFlightAncestorResult(original_event) + ?? undefined; + if (!effective_result) { + // Not inside any handler β€” fall back to normal completion waiting + await original_event.waitForCompletion(); + return event; + } + if (!effective_result.queue_jump_hold) { + effective_result.queue_jump_hold = true; this.immediate_processing_stack_depth += 1; } if (original_event.event_status === "completed") { return event; } - if (original_event.event_status === "started") { - await this.runImmediatelyAcrossBuses(original_event); - return event; - } - const index = this.pending_event_queue.indexOf(original_event); - if (index >= 0) { - this.pending_event_queue.splice(index, 1); + // Yield the parent handler's limiter so child handlers can use it. + // Null out _held_handler_limiter so concurrent calls from the same handler + // (e.g. Promise.all([child1.done(), child2.done()])) don't double-release. + const limiter_to_yield = effective_result?._held_handler_limiter ?? null; + if (limiter_to_yield) { + effective_result!._held_handler_limiter = null; + limiter_to_yield.release(); } - await this.runImmediatelyAcrossBuses(original_event); - return event; + try { + if (original_event.event_status === "started") { + await this.runImmediatelyAcrossBuses(original_event); + return event; + } + + const index = this.pending_event_queue.indexOf(original_event); + if (index >= 0) { + this.pending_event_queue.splice(index, 1); + } + + await this.runImmediatelyAcrossBuses(original_event); + return event; + } finally { + // Re-acquire the parent handler's limiter before returning control. + // Only the call that actually released it will re-acquire. + if (limiter_to_yield) { + await limiter_to_yield.acquire(); + effective_result!._held_handler_limiter = limiter_to_yield; + } + } } async waitUntilIdle(): Promise { @@ -448,7 +510,7 @@ export class EventBus { } private hasPendingResults(): boolean { - for (const event of this.event_history) { + for (const event of this.event_history.values()) { for (const result of event.event_results.values()) { if (result.eventbus_name !== this.name) { continue; @@ -471,7 +533,7 @@ export class EventBus { if (current_parent_id === ancestor.event_id) { return true; } - const parent = this.event_history_by_id.get(current_parent_id); + const parent = this.event_history.get(current_parent_id); if (!parent) { return false; } @@ -490,12 +552,7 @@ export class EventBus { handler_id?: string ): void { const original_child = child_event._original_event ?? child_event; - const parent_event = this.event_history_by_id.get(parent_event_id); - if (parent_event) { - if (!parent_event.event_children.some((child) => child.event_id === original_child.event_id)) { - parent_event.event_children.push(original_child); - } - } + const parent_event = this.event_history.get(parent_event_id); const target_handler_id = handler_id ?? original_child.event_emitted_by_handler_id ?? undefined; @@ -519,7 +576,7 @@ export class EventBus { parent_to_children.set(parent_id, existing); }; - for (const event of this.event_history) { + for (const event of this.event_history.values()) { add_child(event.event_parent_id ?? null, event); } @@ -530,9 +587,9 @@ export class EventBus { const root_events: BaseEvent[] = []; const seen = new Set(); - for (const event of this.event_history) { + for (const event of this.event_history.values()) { const parent_id = event.event_parent_id; - if (!parent_id || parent_id === event.event_id || !this.event_history_by_id.has(parent_id)) { + if (!parent_id || parent_id === event.event_id || !this.event_history.has(parent_id)) { if (!seen.has(event.event_id)) { root_events.push(event); seen.add(event.event_id); @@ -567,10 +624,42 @@ export class EventBus { return lines.join("\n"); } + // Per-bus check: true only if this specific bus has a handler on its stack. + // For cross-bus queue-jumping, done() uses the _is_handler_scoped flag on + // the bus proxy instead (set by _getBusScopedEvent when handler_result exists). isInsideHandler(): boolean { - return EventBus.global_inside_handler_depth > 0; + return this._event_result_stack.length > 0; + } + + // Walk up the parent event chain to find an in-flight ancestor handler result. + // Returns the result if found, null otherwise. Used by _runImmediately to detect + // cross-bus queue-jump scenarios where the calling handler is on a different bus. + _findInFlightAncestorResult(event: BaseEvent): EventResult | null { + const original = event._original_event ?? event; + let current_parent_id = original.event_parent_id; + let current_handler_id = original.event_emitted_by_handler_id; + while (current_handler_id && current_parent_id) { + const parent = EventBus.findEventById(current_parent_id); + if (!parent) break; + const handler_result = parent.event_results.get(current_handler_id); + if (handler_result && handler_result.status === "started") return handler_result; + current_parent_id = parent.event_parent_id; + current_handler_id = parent.event_emitted_by_handler_id; + } + return null; } + // Processes a queue-jumped event across all buses that have it dispatched. + // Called from _runImmediately after the parent handler's limiter has been yielded. + // + // Event limiter bypass: the initiating bus (this) always bypasses its event limiter + // since we're inside a handler that already holds it. Other buses only bypass if + // they resolve to the same limiter instance (i.e. global-serial mode where all + // buses share EventBus.global_event_limiter). + // + // Handler limiters are NOT bypassed β€” child handlers must acquire the handler + // limiter normally. This works because _runImmediately already released the + // parent's handler limiter via yield-and-reacquire. private async runImmediatelyAcrossBuses(event: BaseEvent): Promise { const buses = this.getBusesForImmediateRun(event); if (buses.length === 0) { @@ -582,6 +671,10 @@ export class EventBus { bus.immediate_processing_stack_depth += 1; } + // Determine which event limiter the initiating bus resolves to, so we can + // detect when other buses share the same instance (global-serial). + const initiating_event_limiter = this.resolveEventLimiter(event); + try { for (const bus of buses) { const index = bus.pending_event_queue.indexOf(event); @@ -595,9 +688,18 @@ export class EventBus { continue; } bus.in_flight_event_ids.add(event.event_id); + + // Bypass event limiter on the initiating bus (we're already inside a handler + // that acquired it). For other buses, only bypass if they resolve to the same + // limiter instance (global-serial shares one limiter across all buses). + const bus_event_limiter = bus.resolveEventLimiter(event); + const should_bypass_event_limiter = + bus === this || + (initiating_event_limiter !== null && + bus_event_limiter === initiating_event_limiter); + await bus.scheduleEventProcessing(event, { - bypass_event_limiters: true, - bypass_handler_limiters: true + bypass_event_limiters: should_bypass_event_limiter }); } @@ -625,7 +727,7 @@ export class EventBus { if (bus.name !== name) { continue; } - if (!bus.event_history_by_id.has(event.event_id)) { + if (!bus.event_history.has(event.event_id)) { continue; } if (bus.eventHasVisited(event)) { @@ -638,7 +740,7 @@ export class EventBus { } } - if (!seen.has(this) && this.event_history_by_id.has(event.event_id)) { + if (!seen.has(this) && this.event_history.has(event.event_id)) { ordered.push(this); } @@ -681,7 +783,6 @@ export class EventBus { event: BaseEvent, options: { bypass_event_limiters?: boolean; - bypass_handler_limiters?: boolean; pre_acquired_limiter?: AsyncLimiter | null; } = {} ): Promise { @@ -689,10 +790,10 @@ export class EventBus { const limiter = options.bypass_event_limiters ? null : this.resolveEventLimiter(event); const pre_acquired_limiter = options.pre_acquired_limiter ?? null; if (pre_acquired_limiter) { - await this.processEvent(event, { bypass_handler_limiters: options.bypass_handler_limiters }); + await this.processEvent(event); } else { await runWithLimiter(limiter, async () => { - await this.processEvent(event, { bypass_handler_limiters: options.bypass_handler_limiters }); + await this.processEvent(event); }); } } finally { @@ -753,10 +854,7 @@ export class EventBus { } } - private async processEvent( - event: BaseEvent, - options: { bypass_handler_limiters?: boolean } = {} - ): Promise { + private async processEvent(event: BaseEvent): Promise { if (this.eventHasVisited(event)) { return; } @@ -779,12 +877,10 @@ export class EventBus { }, event.event_timeout * 1000); try { - const handler_entries = this.createPendingHandlerResults(event); + const handler_entries = this.createPendingHandlerResults(event); const handler_promises = handler_entries.map((entry) => - this.runHandlerEntry(event, entry.handler, entry.result, entry.options, { - bypass_handler_limiters: options.bypass_handler_limiters - }) + this.runHandlerEntry(event, entry.handler, entry.result, entry.options) ); await Promise.all(handler_promises); @@ -832,24 +928,24 @@ export class EventBus { event: BaseEvent, handler: EventHandler, result: EventResult, - options?: HandlerOptions, - run_options: { bypass_handler_limiters?: boolean } = {} + options?: HandlerOptions ): Promise { if (result.status === "error" && result.error instanceof EventHandlerCancelledError) { return; } const handler_event = this._getBusScopedEvent(event, result); - const limiter = run_options.bypass_handler_limiters - ? null - : this.resolveHandlerLimiter(event, options); + const limiter = this.resolveHandlerLimiter(event, options); await runWithLimiter(limiter, async () => { if (result.status === "error" && result.error instanceof EventHandlerCancelledError) { return; } - EventBus.global_inside_handler_depth += 1; + // Track which limiter this handler holds so _runImmediately can yield it + // (release before child processing, re-acquire after) to prevent deadlock. + result._held_handler_limiter = limiter; + this._event_result_stack.push(result); try { result.markStarted(); const handler_result = await this.runHandlerWithTimeout(event, handler, handler_event); @@ -885,10 +981,11 @@ export class EventBus { event.markFailed(error); } } finally { - EventBus.global_inside_handler_depth = Math.max( - 0, - EventBus.global_inside_handler_depth - 1 - ); + result._held_handler_limiter = null; + const stack_idx = this._event_result_stack.indexOf(result); + if (stack_idx >= 0) { + this._event_result_stack.splice(stack_idx, 1); + } if (result.queue_jump_hold) { result.queue_jump_hold = false; this.immediate_processing_stack_depth = Math.max( @@ -1437,38 +1534,17 @@ export class EventBus { options?: HandlerOptions; }> = []; - const keyed_handlers = this.handlers_by_key.get(event.event_type); - if (keyed_handlers) { - for (const handler_id of keyed_handlers.values()) { - const entry = this.handlers_by_id.get(handler_id); - if (!entry) { - continue; - } - handlers.push({ - handler_id, - handler: entry.handler, - handler_name: entry.handler_name, - handler_file_path: entry.handler_file_path, - options: entry.options - }); - } - } - - const wildcard_handlers = this.handlers_by_key.get("*"); - if (wildcard_handlers) { - for (const handler_id of wildcard_handlers.values()) { - const entry = this.handlers_by_id.get(handler_id); - if (!entry) { - continue; - } - handlers.push({ - handler_id, - handler: entry.handler, - handler_name: entry.handler_name, - handler_file_path: entry.handler_file_path, - options: entry.options - }); + for (const [handler_id, entry] of this.handlers) { + if (entry.event_key !== event.event_type && entry.event_key !== "*") { + continue; } + handlers.push({ + handler_id, + handler: entry.handler, + handler_name: entry.handler_name, + handler_file_path: entry.handler_file_path, + options: entry.options + }); } return handlers; @@ -1505,29 +1581,33 @@ export class EventBus { if (this.max_history_size === null) { return; } - if (this.event_history.length <= this.max_history_size) { + if (this.event_history.size <= this.max_history_size) { return; } - let remaining_overage = this.event_history.length - this.max_history_size; + let remaining_overage = this.event_history.size - this.max_history_size; - for (let i = 0; i < this.event_history.length && remaining_overage > 0; i += 1) { - const event = this.event_history[i]; + // First pass: remove completed events (oldest first, Map iterates in insertion order) + for (const [event_id, event] of this.event_history) { + if (remaining_overage <= 0) { + break; + } if (event.event_status !== "completed") { continue; } - this.event_history_by_id.delete(event.event_id); - this.event_history.splice(i, 1); - i -= 1; + this.event_history.delete(event_id); remaining_overage -= 1; } - while (remaining_overage > 0 && this.event_history.length > 0) { - const event = this.event_history.shift(); - if (event) { - this.event_history_by_id.delete(event.event_id); + // Second pass: force-remove oldest events regardless of status + if (remaining_overage > 0) { + for (const event_id of this.event_history.keys()) { + if (remaining_overage <= 0) { + break; + } + this.event_history.delete(event_id); + remaining_overage -= 1; } - remaining_overage -= 1; } } } diff --git a/bubus-ts/src/event_result.ts b/bubus-ts/src/event_result.ts index 3b34402..d62e213 100644 --- a/bubus-ts/src/event_result.ts +++ b/bubus-ts/src/event_result.ts @@ -1,6 +1,7 @@ import { v7 as uuidv7 } from "uuid"; import type { BaseEvent } from "./base_event.js"; +import type { AsyncLimiter } from "./semaphores.js"; export type EventResultStatus = "pending" | "started" | "completed" | "error"; @@ -17,7 +18,28 @@ export class EventResult { result?: unknown; error?: unknown; event_children: BaseEvent[]; + // Tracks whether this handler's execution has triggered a queue-jump via done(). + // + // Lifecycle: + // 1. Starts as `false` when the EventResult is created. + // 2. Set to `true` in _runImmediately() when the handler (or its raw event's + // done()) triggers immediate processing. At the same time, + // immediate_processing_stack_depth is incremented by 1 on the bus. + // The guard (!queue_jump_hold) prevents double-incrementing if the + // handler calls done() on multiple children. + // 3. Checked in runHandlerEntry()'s finally block: if true, decrements + // immediate_processing_stack_depth and releases runloop waiters. + // This keeps the runloop paused between when runImmediatelyAcrossBuses() + // returns (its own try/finally decrements) and when the handler itself + // finishes β€” without this hold, the runloop would resume prematurely + // while the handler is still executing after `await child.done()`. + // 4. Reset to `false` in the same finally block after decrementing. queue_jump_hold: boolean; + // The handler concurrency limiter currently held by this handler execution. + // Set by runHandlerEntry so that _runImmediately can temporarily release it + // (yield-and-reacquire) to let child event handlers use the same limiter + // without deadlocking. + _held_handler_limiter: AsyncLimiter | null; constructor(params: { event_id: string; @@ -35,6 +57,7 @@ export class EventResult { this.eventbus_name = params.eventbus_name; this.event_children = []; this.queue_jump_hold = false; + this._held_handler_limiter = null; } markStarted(): void { diff --git a/bubus-ts/src/semaphores.ts b/bubus-ts/src/semaphores.ts index ab2693b..eb90805 100644 --- a/bubus-ts/src/semaphores.ts +++ b/bubus-ts/src/semaphores.ts @@ -1,3 +1,22 @@ +export type Deferred = { + promise: Promise; + resolve: (value: T | PromiseLike) => void; + reject: (reason?: unknown) => void; +}; + +export const withResolvers = (): Deferred => { + if (typeof Promise.withResolvers === "function") { + return Promise.withResolvers(); + } + let resolve!: (value: T | PromiseLike) => void; + let reject!: (reason?: unknown) => void; + const promise = new Promise((resolve_fn, reject_fn) => { + resolve = resolve_fn; + reject = reject_fn; + }); + return { promise, resolve, reject }; +}; + export const CONCURRENCY_MODES = ["global-serial", "bus-serial", "parallel", "auto"] as const; export type ConcurrencyMode = (typeof CONCURRENCY_MODES)[number]; diff --git a/bubus-ts/tests/comprehensive_patterns.test.ts b/bubus-ts/tests/comprehensive_patterns.test.ts index be89814..3f36e74 100644 --- a/bubus-ts/tests/comprehensive_patterns.test.ts +++ b/bubus-ts/tests/comprehensive_patterns.test.ts @@ -64,7 +64,7 @@ test("comprehensive patterns: forwarding, async/sync dispatch, parent tracking", await bus_1.waitUntilIdle(); await bus_2.waitUntilIdle(); - const event_children = bus_1.event_history.filter( + const event_children = Array.from(bus_1.event_history.values()).filter( (event) => event.event_type === "ImmediateChildEvent" || event.event_type === "QueuedChildEvent" ); @@ -217,37 +217,37 @@ test("awaited child jumps queue without overshoot", async () => { const event_2 = bus.dispatch(Event2({})); const event_3 = bus.dispatch(Event3({})); - await delay(0); - debug_order.push({ label: "after_delay_0", at: new Date().toISOString() }); - + // Wait for everything to complete await event_1.done(); - debug_order.push({ label: "after_event1_done", at: new Date().toISOString() }); - console.log("debug_order", debug_order); + await bus.waitUntilIdle(); + // Core assertion: child jumped the queue and ran DURING Event1's handler assert.ok(execution_order.includes("Child_start")); assert.ok(execution_order.includes("Child_end")); const child_start_idx = execution_order.indexOf("Child_start"); const child_end_idx = execution_order.indexOf("Child_end"); const event1_end_idx = execution_order.indexOf("Event1_end"); - assert.ok(child_start_idx < event1_end_idx); - assert.ok(child_end_idx < event1_end_idx); - - assert.ok(!execution_order.includes("Event2_start")); - assert.ok(!execution_order.includes("Event3_start")); - - assert.equal(event_2.event_status, "pending"); - assert.equal(event_3.event_status, "pending"); - - await bus.waitUntilIdle(); + assert.ok(child_start_idx < event1_end_idx, "child must start before Event1 handler returns"); + assert.ok(child_end_idx < event1_end_idx, "child must end before Event1 handler returns"); + // No overshoot: Event2 and Event3 must only start AFTER Event1's handler fully completes. + // In JS, the microtask-based runloop processes them after Event1 completes (so they may + // already be done by this point), but the key guarantee is ordering, not timing. const event2_start_idx = execution_order.indexOf("Event2_start"); const event3_start_idx = execution_order.indexOf("Event3_start"); - assert.ok(event2_start_idx < event3_start_idx); + assert.ok(event2_start_idx > event1_end_idx, "Event2 must not start until Event1 handler returns"); + assert.ok(event3_start_idx > event1_end_idx, "Event3 must not start until Event1 handler returns"); + + // FIFO preserved among queued events + assert.ok(event2_start_idx < event3_start_idx, "Event2 must start before Event3 (FIFO)"); + // All events completed + assert.equal(event_1.event_status, "completed"); assert.equal(event_2.event_status, "completed"); assert.equal(event_3.event_status, "completed"); - const history_list = bus.event_history; + // Timestamp ordering confirms the same + const history_list = Array.from(bus.event_history.values()); const child_event = history_list.find((event) => event.event_type === "ChildEvent"); const event2_from_history = history_list.find((event) => event.event_type === "Event2"); const event3_from_history = history_list.find((event) => event.event_type === "Event3"); @@ -260,6 +260,151 @@ test("awaited child jumps queue without overshoot", async () => { assert.ok(child_event!.event_started_at! < event3_from_history!.event_started_at!); }); +test("done() on non-proxied event still holds immediate_processing_stack_depth", async () => { + const bus = new EventBus("RawDoneBus", { max_history_size: 100 }); + const Event1 = BaseEvent.extend("Event1", {}); + const ChildEvent = BaseEvent.extend("RawChild", {}); + + let depth_after_done = -1; + + bus.on(ChildEvent, () => {}); + + bus.on(Event1, async (event) => { + // Dispatch child via the raw bus (not the proxied event.bus) + const child = bus.dispatch(ChildEvent({})); + // Get the raw (non-proxied) event + const raw_child = child._original_event ?? child; + // done() on raw event bypasses handler_result injection from proxy + await raw_child.done(); + // After done() returns, depth should still be > 0 because + // we're still inside a handler doing queue-jump processing + depth_after_done = bus.immediate_processing_stack_depth; + }); + + bus.dispatch(Event1({})); + await bus.waitUntilIdle(); + + assert.ok( + depth_after_done > 0, + `immediate_processing_stack_depth should be > 0 after raw done() ` + + `but before handler returns, got ${depth_after_done}` + ); +}); + +test("immediate_processing_stack_depth returns to 0 after queue-jump completes", async () => { + const bus = new EventBus("DepthBalanceBus", { max_history_size: 100 }); + const Event1 = BaseEvent.extend("DepthEvent1", {}); + const ChildA = BaseEvent.extend("DepthChildA", {}); + const ChildB = BaseEvent.extend("DepthChildB", {}); + + let depth_during_handler = -1; + let depth_between_dones = -1; + let depth_after_second_done = -1; + + bus.on(ChildA, () => {}); + bus.on(ChildB, () => {}); + + bus.on(Event1, async (event) => { + // First queue-jump + const child_a = event.bus?.emit(ChildA({}))!; + await child_a.done(); + depth_during_handler = bus.immediate_processing_stack_depth; + + // Second queue-jump β€” should NOT double-increment (queue_jump_hold guard) + const child_b = event.bus?.emit(ChildB({}))!; + depth_between_dones = bus.immediate_processing_stack_depth; + await child_b.done(); + depth_after_second_done = bus.immediate_processing_stack_depth; + }); + + bus.dispatch(Event1({})); + await bus.waitUntilIdle(); + + // During handler, depth should be > 0 (held by queue_jump_hold) + assert.ok( + depth_during_handler > 0, + `depth should be > 0 after first done(), got ${depth_during_handler}` + ); + + // Between done() calls, depth should still be held + assert.ok( + depth_between_dones > 0, + `depth should be > 0 between done() calls, got ${depth_between_dones}` + ); + + // After second done(), still held until handler returns + assert.ok( + depth_after_second_done > 0, + `depth should be > 0 after second done(), got ${depth_after_second_done}` + ); + + // After handler finishes and bus is idle, depth must be exactly 0 + assert.equal( + bus.immediate_processing_stack_depth, + 0, + `depth should return to 0 after handler completes, got ${bus.immediate_processing_stack_depth}` + ); +}); + +test("isInsideHandler() is per-bus, not global", async () => { + const bus_a = new EventBus("InsideHandlerA", { max_history_size: 100 }); + const bus_b = new EventBus("InsideHandlerB", { max_history_size: 100 }); + + const EventA = BaseEvent.extend("InsideHandlerEventA", {}); + const EventB = BaseEvent.extend("InsideHandlerEventB", {}); + + let bus_a_inside_during_a_handler = false; + let bus_b_inside_during_a_handler = false; + let bus_a_inside_during_b_handler = false; + let bus_b_inside_during_b_handler = false; + + bus_a.on(EventA, () => { + bus_a_inside_during_a_handler = bus_a.isInsideHandler(); + bus_b_inside_during_a_handler = bus_b.isInsideHandler(); + }); + + bus_b.on(EventB, () => { + bus_a_inside_during_b_handler = bus_a.isInsideHandler(); + bus_b_inside_during_b_handler = bus_b.isInsideHandler(); + }); + + // Dispatch to bus_a first, wait for completion so bus_b has no active handlers + await bus_a.dispatch(EventA({})).done(); + await bus_a.waitUntilIdle(); + + // Then dispatch to bus_b so bus_a has no active handlers + await bus_b.dispatch(EventB({})).done(); + await bus_b.waitUntilIdle(); + + // During bus_a's handler: bus_a should report inside, bus_b should not + assert.equal( + bus_a_inside_during_a_handler, + true, + "bus_a.isInsideHandler() should be true during bus_a handler" + ); + assert.equal( + bus_b_inside_during_a_handler, + false, + "bus_b.isInsideHandler() should be false during bus_a handler" + ); + + // During bus_b's handler: bus_b should report inside, bus_a should not + assert.equal( + bus_b_inside_during_b_handler, + true, + "bus_b.isInsideHandler() should be true during bus_b handler" + ); + assert.equal( + bus_a_inside_during_b_handler, + false, + "bus_a.isInsideHandler() should be false during bus_b handler" + ); + + // After all handlers complete, neither bus should report inside + assert.equal(bus_a.isInsideHandler(), false, "bus_a.isInsideHandler() should be false after idle"); + assert.equal(bus_b.isInsideHandler(), false, "bus_b.isInsideHandler() should be false after idle"); +}); + test("dispatch multiple, await one skips others until after handler completes", async () => { const bus = new EventBus("MultiDispatchBus", { max_history_size: 100 }); const execution_order: string[] = []; @@ -622,3 +767,460 @@ test("deeply nested awaited children", async () => { const event2_start_idx = execution_order.indexOf("Event2_start"); assert.ok(event2_start_idx > event1_end_idx); }); + +// ============================================================================= +// Queue-Jump Concurrency Tests (Two-Bus) +// +// BUG: runImmediatelyAcrossBuses passes { bypass_handler_limiters: true, +// bypass_event_limiters: true } for ALL buses. This causes: +// 1. Handlers to run in parallel regardless of configured concurrency +// 2. Event limiters on remote buses to be skipped +// +// The fix requires "yield-and-reacquire": +// - Before processing the child, temporarily RELEASE the limiter the parent +// handler holds (the parent is suspended in `await child.done()` and isn't +// using it). +// - Process the child event NORMALLY β€” handlers acquire/release the real +// limiter, serializing among themselves as configured. +// - After the child completes, RE-ACQUIRE the limiter for the parent handler +// before it resumes. +// +// For event limiters, only bypass on the initiating bus (where the parent holds +// the limiter). On other buses, respect their event concurrency β€” bypass only +// if they resolve to the SAME limiter instance (i.e. global-serial). +// +// All tests use two buses. The pattern is: +// bus_a: origin bus where TriggerEvent handler dispatches a child +// bus_b: forward bus that also handles the child event +// The trigger handler dispatches the child on bus_a and also to bus_b, +// then awaits child.done(), which queue-jumps the child on both buses. +// ============================================================================= + +test("BUG: queue-jump two-bus bus-serial handlers should serialize on each bus", async () => { + const TriggerEvent = BaseEvent.extend("QJ2BS_Trigger", {}); + const ChildEvent = BaseEvent.extend("QJ2BS_Child", {}); + + const bus_a = new EventBus("QJ2BS_A", { + event_concurrency: "bus-serial", + handler_concurrency: "bus-serial" + }); + const bus_b = new EventBus("QJ2BS_B", { + event_concurrency: "bus-serial", + handler_concurrency: "bus-serial" + }); + + const log: string[] = []; + + // Two handlers per bus. handler_1 is slow (15ms), handler_2 is fast (5ms). + // With bus-serial, handler_1 must finish before handler_2 starts ON EACH BUS. + // With buggy parallel, both start simultaneously and handler_2 finishes first. + const a_handler_1 = async () => { log.push("a1_start"); await delay(15); log.push("a1_end"); }; + const a_handler_2 = async () => { log.push("a2_start"); await delay(5); log.push("a2_end"); }; + const b_handler_1 = async () => { log.push("b1_start"); await delay(15); log.push("b1_end"); }; + const b_handler_2 = async () => { log.push("b2_start"); await delay(5); log.push("b2_end"); }; + + bus_a.on(TriggerEvent, async (event: InstanceType) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))!; + bus_b.dispatch(child); + await child.done(); + }); + bus_a.on(ChildEvent, a_handler_1); + bus_a.on(ChildEvent, a_handler_2); + bus_b.on(ChildEvent, b_handler_1); + bus_b.on(ChildEvent, b_handler_2); + + const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })); + await top.done(); + await bus_a.waitUntilIdle(); + await bus_b.waitUntilIdle(); + + // Bus A: handlers must serialize (a1 finishes before a2 starts) + const a1_end = log.indexOf("a1_end"); + const a2_start = log.indexOf("a2_start"); + assert.ok(a1_end >= 0 && a2_start >= 0, "bus_a handlers should have run"); + assert.ok( + a1_end < a2_start, + `bus_a (bus-serial): a1 should finish before a2 starts. Got: [${log.join(", ")}]` + ); + + // Bus B: handlers must serialize (b1 finishes before b2 starts) + const b1_end = log.indexOf("b1_end"); + const b2_start = log.indexOf("b2_start"); + assert.ok(b1_end >= 0 && b2_start >= 0, "bus_b handlers should have run"); + assert.ok( + b1_end < b2_start, + `bus_b (bus-serial): b1 should finish before b2 starts. Got: [${log.join(", ")}]` + ); +}); + +test("BUG: queue-jump two-bus global-serial handlers should serialize across both buses", async () => { + const TriggerEvent = BaseEvent.extend("QJ2GS_Trigger", {}); + const ChildEvent = BaseEvent.extend("QJ2GS_Child", {}); + + // Global-serial means ONE handler at a time GLOBALLY, across all buses. + const bus_a = new EventBus("QJ2GS_A", { + event_concurrency: "bus-serial", + handler_concurrency: "global-serial" + }); + const bus_b = new EventBus("QJ2GS_B", { + event_concurrency: "bus-serial", + handler_concurrency: "global-serial" + }); + + const log: string[] = []; + + const a_handler_1 = async () => { log.push("a1_start"); await delay(15); log.push("a1_end"); }; + const a_handler_2 = async () => { log.push("a2_start"); await delay(5); log.push("a2_end"); }; + const b_handler_1 = async () => { log.push("b1_start"); await delay(15); log.push("b1_end"); }; + const b_handler_2 = async () => { log.push("b2_start"); await delay(5); log.push("b2_end"); }; + + bus_a.on(TriggerEvent, async (event: InstanceType) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))!; + bus_b.dispatch(child); + await child.done(); + }); + bus_a.on(ChildEvent, a_handler_1); + bus_a.on(ChildEvent, a_handler_2); + bus_b.on(ChildEvent, b_handler_1); + bus_b.on(ChildEvent, b_handler_2); + + const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })); + await top.done(); + await bus_a.waitUntilIdle(); + await bus_b.waitUntilIdle(); + + // With global-serial, no two handlers should overlap anywhere. + // runImmediatelyAcrossBuses processes buses sequentially (bus_a first, + // then bus_b), so the expected order is strictly serial: + // a1_start, a1_end, a2_start, a2_end, b1_start, b1_end, b2_start, b2_end + // + // With the bug (bypass), all handlers on a bus run in parallel: + // a1_start, a2_start, a2_end, a1_end, b1_start, b2_start, b2_end, b1_end + + // Check: within bus_a, handlers are serial + const a1_end = log.indexOf("a1_end"); + const a2_start = log.indexOf("a2_start"); + assert.ok( + a1_end < a2_start, + `global-serial: a1 should finish before a2 starts. Got: [${log.join(", ")}]` + ); + + // Check: within bus_b, handlers are serial + const b1_end = log.indexOf("b1_end"); + const b2_start = log.indexOf("b2_start"); + assert.ok( + b1_end < b2_start, + `global-serial: b1 should finish before b2 starts. Got: [${log.join(", ")}]` + ); + + // Check: bus_a handlers all finish before bus_b handlers start + // (because runImmediatelyAcrossBuses processes sequentially and + // all share the global handler limiter) + const a2_end = log.indexOf("a2_end"); + const b1_start = log.indexOf("b1_start"); + assert.ok( + a2_end < b1_start, + `global-serial: bus_a should finish before bus_b starts. Got: [${log.join(", ")}]` + ); +}); + +test("BUG: queue-jump two-bus mixed: bus_a bus-serial, bus_b parallel", async () => { + const TriggerEvent = BaseEvent.extend("QJ2Mix1_Trigger", {}); + const ChildEvent = BaseEvent.extend("QJ2Mix1_Child", {}); + + const bus_a = new EventBus("QJ2Mix1_A", { + event_concurrency: "bus-serial", + handler_concurrency: "bus-serial" + }); + const bus_b = new EventBus("QJ2Mix1_B", { + event_concurrency: "bus-serial", + handler_concurrency: "parallel" // bus_b handlers should run in parallel + }); + + const log: string[] = []; + + const a_handler_1 = async () => { log.push("a1_start"); await delay(15); log.push("a1_end"); }; + const a_handler_2 = async () => { log.push("a2_start"); await delay(5); log.push("a2_end"); }; + const b_handler_1 = async () => { log.push("b1_start"); await delay(15); log.push("b1_end"); }; + const b_handler_2 = async () => { log.push("b2_start"); await delay(5); log.push("b2_end"); }; + + bus_a.on(TriggerEvent, async (event: InstanceType) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))!; + bus_b.dispatch(child); + await child.done(); + }); + bus_a.on(ChildEvent, a_handler_1); + bus_a.on(ChildEvent, a_handler_2); + bus_b.on(ChildEvent, b_handler_1); + bus_b.on(ChildEvent, b_handler_2); + + const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })); + await top.done(); + await bus_a.waitUntilIdle(); + await bus_b.waitUntilIdle(); + + // Bus A (bus-serial): a1 must finish before a2 starts + const a1_end = log.indexOf("a1_end"); + const a2_start = log.indexOf("a2_start"); + assert.ok( + a1_end < a2_start, + `bus_a (bus-serial): a1 should finish before a2 starts. Got: [${log.join(", ")}]` + ); + + // Bus B (parallel): both handlers should start before the slower one finishes. + // b2 (5ms) starts and finishes before b1 (15ms) finishes. + const b1_end = log.indexOf("b1_end"); + const b2_start = log.indexOf("b2_start"); + assert.ok( + b2_start < b1_end, + `bus_b (parallel): b2 should start before b1 finishes. Got: [${log.join(", ")}]` + ); +}); + +test("BUG: queue-jump two-bus mixed: bus_a parallel, bus_b bus-serial", async () => { + const TriggerEvent = BaseEvent.extend("QJ2Mix2_Trigger", {}); + const ChildEvent = BaseEvent.extend("QJ2Mix2_Child", {}); + + const bus_a = new EventBus("QJ2Mix2_A", { + event_concurrency: "bus-serial", + handler_concurrency: "parallel" // bus_a handlers should run in parallel + }); + const bus_b = new EventBus("QJ2Mix2_B", { + event_concurrency: "bus-serial", + handler_concurrency: "bus-serial" + }); + + const log: string[] = []; + + const a_handler_1 = async () => { log.push("a1_start"); await delay(15); log.push("a1_end"); }; + const a_handler_2 = async () => { log.push("a2_start"); await delay(5); log.push("a2_end"); }; + const b_handler_1 = async () => { log.push("b1_start"); await delay(15); log.push("b1_end"); }; + const b_handler_2 = async () => { log.push("b2_start"); await delay(5); log.push("b2_end"); }; + + bus_a.on(TriggerEvent, async (event: InstanceType) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))!; + bus_b.dispatch(child); + await child.done(); + }); + bus_a.on(ChildEvent, a_handler_1); + bus_a.on(ChildEvent, a_handler_2); + bus_b.on(ChildEvent, b_handler_1); + bus_b.on(ChildEvent, b_handler_2); + + const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })); + await top.done(); + await bus_a.waitUntilIdle(); + await bus_b.waitUntilIdle(); + + // Bus A (parallel): handlers should overlap + const a1_end = log.indexOf("a1_end"); + const a2_start = log.indexOf("a2_start"); + assert.ok( + a2_start < a1_end, + `bus_a (parallel): a2 should start before a1 finishes. Got: [${log.join(", ")}]` + ); + + // Bus B (bus-serial): b1 must finish before b2 starts + const b1_end = log.indexOf("b1_end"); + const b2_start = log.indexOf("b2_start"); + assert.ok( + b1_end < b2_start, + `bus_b (bus-serial): b1 should finish before b2 starts. Got: [${log.join(", ")}]` + ); +}); + +// ============================================================================= +// Event-level concurrency on the forward bus. +// +// When the forward bus (bus_b) has bus-serial event concurrency and is already +// processing an event, a queue-jumped child should WAIT for bus_b's in-flight +// event to finish. The current code bypasses event limiters for ALL buses, +// causing the child to cut in front of the in-flight event. +// +// The fix should only bypass event limiters on the INITIATING bus (where the +// parent event holds the limiter). On other buses, bypass only if they resolve +// to the SAME limiter instance (global-serial shares one global limiter). +// ============================================================================= + +test("BUG: queue-jump should respect bus-serial event concurrency on forward bus", async () => { + const TriggerEvent = BaseEvent.extend("QJEvt_Trigger", {}); + const ChildEvent = BaseEvent.extend("QJEvt_Child", {}); + const SlowEvent = BaseEvent.extend("QJEvt_Slow", {}); + + const bus_a = new EventBus("QJEvt_A", { + event_concurrency: "bus-serial", + handler_concurrency: "bus-serial" + }); + const bus_b = new EventBus("QJEvt_B", { + event_concurrency: "bus-serial", // only one event at a time on bus_b + handler_concurrency: "bus-serial" + }); + + const log: string[] = []; + + // SlowEvent handler: occupies bus_b's event limiter for 40ms + bus_b.on(SlowEvent, async () => { + log.push("slow_start"); + await delay(40); + log.push("slow_end"); + }); + + // ChildEvent handler on bus_b: should only run after SlowEvent finishes + bus_b.on(ChildEvent, async () => { + log.push("child_b_start"); + await delay(5); + log.push("child_b_end"); + }); + + // ChildEvent handler on bus_a (so bus_a also processes the child) + bus_a.on(ChildEvent, async () => { + log.push("child_a_start"); + await delay(5); + log.push("child_a_end"); + }); + + // TriggerEvent handler: dispatches child to both buses, awaits completion + bus_a.on(TriggerEvent, async (event: InstanceType) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))!; + bus_b.dispatch(child); + await child.done(); + }); + + // Step 1: Start a slow event on bus_b so it's busy + bus_b.dispatch(SlowEvent({ event_timeout: null })); + await delay(5); // let slow_handler start + + // Step 2: Trigger the queue-jump on bus_a + const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })); + await top.done(); + await bus_a.waitUntilIdle(); + await bus_b.waitUntilIdle(); + + // The child on bus_b should start AFTER the slow event finishes, + // because bus_b has bus-serial event concurrency. + const slow_end = log.indexOf("slow_end"); + const child_b_start = log.indexOf("child_b_start"); + assert.ok(slow_end >= 0, "slow event should have completed"); + assert.ok(child_b_start >= 0, "child on bus_b should have run"); + assert.ok( + slow_end < child_b_start, + `bus_b (bus-serial events): child should wait for slow event to finish. ` + + `Got: [${log.join(", ")}]` + ); + + // The child on bus_a should have processed (queue-jumped, bypasses bus_a's event limiter) + assert.ok(log.includes("child_a_start"), "child on bus_a should have run"); + assert.ok(log.includes("child_a_end"), "child on bus_a should have completed"); +}); + +test("queue-jump with fully-parallel forward bus starts immediately", async () => { + // When bus_b uses parallel event AND handler concurrency, the queue-jumped + // child should start immediately even while another event's handler is running. + + const TriggerEvent = BaseEvent.extend("QJFullPar_Trigger", {}); + const ChildEvent = BaseEvent.extend("QJFullPar_Child", {}); + const SlowEvent = BaseEvent.extend("QJFullPar_Slow", {}); + + const bus_a = new EventBus("QJFullPar_A", { + event_concurrency: "bus-serial", + handler_concurrency: "bus-serial" + }); + const bus_b = new EventBus("QJFullPar_B", { + event_concurrency: "parallel", + handler_concurrency: "parallel" + }); + + const log: string[] = []; + + bus_b.on(SlowEvent, async () => { + log.push("slow_start"); + await delay(40); + log.push("slow_end"); + }); + + bus_b.on(ChildEvent, async () => { + log.push("child_b_start"); + await delay(5); + log.push("child_b_end"); + }); + + bus_a.on(TriggerEvent, async (event: InstanceType) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))!; + bus_b.dispatch(child); + await child.done(); + }); + + bus_b.dispatch(SlowEvent({ event_timeout: null })); + await delay(5); + + const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })); + await top.done(); + await bus_a.waitUntilIdle(); + await bus_b.waitUntilIdle(); + + const slow_end = log.indexOf("slow_end"); + const child_b_start = log.indexOf("child_b_start"); + assert.ok(child_b_start >= 0, "child on bus_b should have run"); + assert.ok( + child_b_start < slow_end, + `bus_b (fully parallel): child should start before slow finishes. ` + + `Got: [${log.join(", ")}]` + ); +}); + +test("queue-jump with parallel events but bus-serial handlers on forward bus serializes handlers", async () => { + // When bus_b has parallel event concurrency but bus-serial handler concurrency, + // the child event can start processing immediately (event limiter is parallel), + // but its handler must wait for the slow handler to release the handler limiter. + + const TriggerEvent = BaseEvent.extend("QJEvtParHSer_Trigger", {}); + const ChildEvent = BaseEvent.extend("QJEvtParHSer_Child", {}); + const SlowEvent = BaseEvent.extend("QJEvtParHSer_Slow", {}); + + const bus_a = new EventBus("QJEvtParHSer_A", { + event_concurrency: "bus-serial", + handler_concurrency: "bus-serial" + }); + const bus_b = new EventBus("QJEvtParHSer_B", { + event_concurrency: "parallel", // events can start concurrently + handler_concurrency: "bus-serial" // but handlers serialize + }); + + const log: string[] = []; + + bus_b.on(SlowEvent, async () => { + log.push("slow_start"); + await delay(40); + log.push("slow_end"); + }); + + bus_b.on(ChildEvent, async () => { + log.push("child_b_start"); + await delay(5); + log.push("child_b_end"); + }); + + bus_a.on(TriggerEvent, async (event: InstanceType) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))!; + bus_b.dispatch(child); + await child.done(); + }); + + bus_b.dispatch(SlowEvent({ event_timeout: null })); + await delay(5); + + const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })); + await top.done(); + await bus_a.waitUntilIdle(); + await bus_b.waitUntilIdle(); + + // With bus-serial handler concurrency, child handler must wait for slow handler + const slow_end = log.indexOf("slow_end"); + const child_b_start = log.indexOf("child_b_start"); + assert.ok(child_b_start >= 0, "child on bus_b should have run"); + assert.ok( + child_b_start > slow_end, + `bus_b (bus-serial handlers): child handler should wait for slow handler. ` + + `Got: [${log.join(", ")}]` + ); +}); diff --git a/bubus-ts/tests/debounce.test.ts b/bubus-ts/tests/debounce.test.ts index 39ee4eb..d45de1f 100644 --- a/bubus-ts/tests/debounce.test.ts +++ b/bubus-ts/tests/debounce.test.ts @@ -98,7 +98,7 @@ test("debounce dispatches new when existing is stale", async () => { )) ?? (await bus.dispatch(ScreenshotEvent({ target_id: "tab1" })).done()); assert.ok(result); - const screenshots = bus.event_history.filter( + const screenshots = Array.from(bus.event_history.values()).filter( (event) => event.event_type === "ScreenshotEvent" ); assert.equal(screenshots.length, 2); diff --git a/bubus-ts/tests/error_handling.test.ts b/bubus-ts/tests/error_handling.test.ts new file mode 100644 index 0000000..b014703 --- /dev/null +++ b/bubus-ts/tests/error_handling.test.ts @@ -0,0 +1,228 @@ +import assert from "node:assert/strict"; +import { test } from "node:test"; + +import { BaseEvent, EventBus } from "../src/index.js"; + +const TestEvent = BaseEvent.extend("TestEvent", {}); + +const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms); + }); + +test("handler error is captured and does not prevent other handlers from running", async () => { + const bus = new EventBus("ErrorIsolationBus"); + const results: string[] = []; + + const failing_handler = (): string => { + throw new Error("Expected to fail - testing error handling"); + }; + + const working_handler = (): string => { + results.push("success"); + return "worked"; + }; + + bus.on(TestEvent, failing_handler); + bus.on(TestEvent, working_handler); + + const event = bus.dispatch(TestEvent({})); + await event.done(); + + // Both handlers should have run and produced results + assert.equal(event.event_results.size, 2); + + const failing_result = Array.from(event.event_results.values()).find( + (r) => r.handler_name === "failing_handler" + ); + assert.ok(failing_result, "failing_handler result should exist"); + assert.equal(failing_result.status, "error"); + assert.ok(failing_result.error instanceof Error); + assert.ok( + (failing_result.error as Error).message.includes("Expected to fail"), + "error message should contain the thrown message" + ); + + const working_result = Array.from(event.event_results.values()).find( + (r) => r.handler_name === "working_handler" + ); + assert.ok(working_result, "working_handler result should exist"); + assert.equal(working_result.status, "completed"); + assert.equal(working_result.result, "worked"); + + // The working handler actually ran + assert.deepEqual(results, ["success"]); +}); + +test("event.event_errors collects handler errors", async () => { + const bus = new EventBus("ErrorCollectionBus"); + + const handler_a = (): void => { + throw new Error("error_a"); + }; + + const handler_b = (): void => { + throw new TypeError("error_b"); + }; + + const handler_c = (): string => { + return "ok"; + }; + + bus.on(TestEvent, handler_a); + bus.on(TestEvent, handler_b); + bus.on(TestEvent, handler_c); + + const event = bus.dispatch(TestEvent({})); + await event.done(); + + // Two errors should be collected + assert.equal(event.event_errors.length, 2); + const error_messages = event.event_errors.map((e) => (e as Error).message); + assert.ok(error_messages.includes("error_a")); + assert.ok(error_messages.includes("error_b")); +}); + +test("handler error does not prevent event completion", async () => { + const bus = new EventBus("ErrorCompletionBus"); + + bus.on(TestEvent, () => { + throw new Error("handler failed"); + }); + + const event = bus.dispatch(TestEvent({})); + await event.done(); + + // Event should still complete even though handler errored + assert.equal(event.event_status, "completed"); + assert.ok(event.event_completed_at, "event_completed_at should be set"); + assert.equal(event.event_errors.length, 1); +}); + +test("error in one event does not affect subsequent queued events", async () => { + const bus = new EventBus("ErrorQueueBus"); + const Event1 = BaseEvent.extend("Event1", {}); + const Event2 = BaseEvent.extend("Event2", {}); + + bus.on(Event1, () => { + throw new Error("event1 handler failed"); + }); + + bus.on(Event2, () => { + return "event2 ok"; + }); + + const event_1 = bus.dispatch(Event1({})); + const event_2 = bus.dispatch(Event2({})); + + await bus.waitUntilIdle(); + + // Event1 completed with error + assert.equal(event_1.event_status, "completed"); + assert.equal(event_1.event_errors.length, 1); + + // Event2 completed successfully and was not affected by Event1's error + assert.equal(event_2.event_status, "completed"); + assert.equal(event_2.event_errors.length, 0); + const result = Array.from(event_2.event_results.values())[0]; + assert.equal(result.status, "completed"); + assert.equal(result.result, "event2 ok"); +}); + +test("async handler rejection is captured as error", async () => { + const bus = new EventBus("AsyncErrorBus"); + + const async_failing_handler = async (): Promise => { + await delay(1); + throw new Error("async rejection"); + }; + + bus.on(TestEvent, async_failing_handler); + + const event = bus.dispatch(TestEvent({})); + await event.done(); + + assert.equal(event.event_status, "completed"); + assert.equal(event.event_errors.length, 1); + assert.ok((event.event_errors[0] as Error).message.includes("async rejection")); + + const result = Array.from(event.event_results.values())[0]; + assert.equal(result.status, "error"); +}); + +test("error in forwarded event handler does not block source bus", async () => { + const bus_a = new EventBus("ErrorForwardA"); + const bus_b = new EventBus("ErrorForwardB"); + + const ForwardEvent = BaseEvent.extend("ForwardEvent", {}); + + // Forward from A to B + bus_a.on("*", bus_b.dispatch); + + // Handler on bus_b throws + bus_b.on(ForwardEvent, () => { + throw new Error("bus_b handler failed"); + }); + + // Handler on bus_a succeeds + bus_a.on(ForwardEvent, () => { + return "bus_a ok"; + }); + + const event = bus_a.dispatch(ForwardEvent({})); + await event.done(); + + assert.equal(event.event_status, "completed"); + + // bus_a's handler succeeded + const bus_a_result = Array.from(event.event_results.values()).find( + (r) => r.eventbus_name === "ErrorForwardA" && r.handler_name !== "dispatch" + ); + assert.ok(bus_a_result); + assert.equal(bus_a_result.status, "completed"); + assert.equal(bus_a_result.result, "bus_a ok"); + + // bus_b's handler errored + const bus_b_result = Array.from(event.event_results.values()).find( + (r) => r.eventbus_name === "ErrorForwardB" && r.handler_name !== "dispatch" + ); + assert.ok(bus_b_result); + assert.equal(bus_b_result.status, "error"); + + // Both errors tracked + assert.ok(event.event_errors.length >= 1); +}); + +test("event with no handlers completes without errors", async () => { + const bus = new EventBus("NoHandlerBus"); + const OrphanEvent = BaseEvent.extend("OrphanEvent", {}); + + const event = bus.dispatch(OrphanEvent({})); + await event.done(); + + assert.equal(event.event_status, "completed"); + assert.equal(event.event_results.size, 0); + assert.equal(event.event_errors.length, 0); +}); + +test("error handler result fields are populated correctly", async () => { + const bus = new EventBus("ErrorFieldsBus"); + + const my_handler = (): void => { + throw new RangeError("out of range"); + }; + + bus.on(TestEvent, my_handler); + + const event = bus.dispatch(TestEvent({})); + await event.done(); + + const result = Array.from(event.event_results.values())[0]; + assert.equal(result.status, "error"); + assert.equal(result.handler_name, "my_handler"); + assert.equal(result.eventbus_name, "ErrorFieldsBus"); + assert.ok(result.error instanceof RangeError); + assert.equal((result.error as RangeError).message, "out of range"); + assert.ok(result.started_at, "started_at should be set"); + assert.ok(result.completed_at, "completed_at should be set even on error"); +}); diff --git a/bubus-ts/tests/event_bus_proxy.test.ts b/bubus-ts/tests/event_bus_proxy.test.ts new file mode 100644 index 0000000..eba95e3 --- /dev/null +++ b/bubus-ts/tests/event_bus_proxy.test.ts @@ -0,0 +1,241 @@ +import assert from "node:assert/strict"; +import { test } from "node:test"; + +import { BaseEvent, EventBus } from "../src/index.js"; + +const MainEvent = BaseEvent.extend("MainEvent", {}); +const ChildEvent = BaseEvent.extend("ChildEvent", {}); +const GrandchildEvent = BaseEvent.extend("GrandchildEvent", {}); + +test("event.bus inside handler returns the dispatching bus", async () => { + const bus = new EventBus("TestBus"); + + let handler_called = false; + let handler_bus_name: string | undefined; + let child_event: BaseEvent | undefined; + + bus.on(MainEvent, (event) => { + handler_called = true; + handler_bus_name = event.bus?.name; + + // Should be able to dispatch child events using event.bus + child_event = event.bus?.emit(ChildEvent({})); + }); + + bus.on(ChildEvent, () => {}); + + bus.dispatch(MainEvent({})); + await bus.waitUntilIdle(); + + assert.equal(handler_called, true); + assert.equal(handler_bus_name, "TestBus"); + assert.ok(child_event, "child event should have been dispatched via event.bus"); + assert.equal(child_event!.event_type, "ChildEvent"); +}); + +test("event.bus returns correct bus when multiple buses exist", async () => { + const bus1 = new EventBus("Bus1"); + const bus2 = new EventBus("Bus2"); + + let handler1_bus_name: string | undefined; + let handler2_bus_name: string | undefined; + + bus1.on(MainEvent, (event) => { + handler1_bus_name = event.bus?.name; + }); + + bus2.on(MainEvent, (event) => { + handler2_bus_name = event.bus?.name; + }); + + bus1.dispatch(MainEvent({})); + await bus1.waitUntilIdle(); + + bus2.dispatch(MainEvent({})); + await bus2.waitUntilIdle(); + + assert.equal(handler1_bus_name, "Bus1"); + assert.equal(handler2_bus_name, "Bus2"); +}); + +test("event.bus reflects the currently-processing bus when forwarded", async () => { + const bus1 = new EventBus("Bus1"); + const bus2 = new EventBus("Bus2"); + + // Forward all events from bus1 to bus2 + bus1.on("*", bus2.dispatch); + + let bus2_handler_bus_name: string | undefined; + + bus2.on(MainEvent, (event) => { + bus2_handler_bus_name = event.bus?.name; + }); + + const event = bus1.dispatch(MainEvent({})); + await bus1.waitUntilIdle(); + await bus2.waitUntilIdle(); + + // The handler on bus2 should see bus2 as event.bus, not bus1 + assert.equal(bus2_handler_bus_name, "Bus2"); + assert.deepEqual(event.event_path, ["Bus1", "Bus2"]); +}); + +test("event.bus in nested handlers sees the same bus", async () => { + const bus = new EventBus("MainBus"); + + let outer_bus_name: string | undefined; + let inner_bus_name: string | undefined; + + bus.on(MainEvent, async (event) => { + outer_bus_name = event.bus?.name; + + // Dispatch child using event.bus + const child = event.bus!.emit(ChildEvent({})); + await child.done(); + }); + + bus.on(ChildEvent, (event) => { + inner_bus_name = event.bus?.name; + }); + + const parent = bus.dispatch(MainEvent({})); + await parent.done(); + + assert.equal(outer_bus_name, "MainBus"); + assert.equal(inner_bus_name, "MainBus"); +}); + +test("event.bus.dispatch sets parent-child relationships through 3 levels", async () => { + const bus = new EventBus("MainBus"); + + const execution_order: string[] = []; + let child_ref: BaseEvent | undefined; + let grandchild_ref: BaseEvent | undefined; + + bus.on(MainEvent, async (event) => { + execution_order.push("parent_start"); + assert.equal(event.bus?.name, "MainBus"); + + child_ref = event.bus!.emit(ChildEvent({})); + await child_ref.done(); + + execution_order.push("parent_end"); + }); + + bus.on(ChildEvent, async (event) => { + execution_order.push("child_start"); + assert.equal(event.bus?.name, "MainBus"); + + grandchild_ref = event.bus!.emit(GrandchildEvent({})); + await grandchild_ref.done(); + + execution_order.push("child_end"); + }); + + bus.on(GrandchildEvent, (event) => { + execution_order.push("grandchild_start"); + assert.equal(event.bus?.name, "MainBus"); + execution_order.push("grandchild_end"); + }); + + const parent_event = bus.dispatch(MainEvent({})); + await parent_event.done(); + + // Child events should queue-jump and complete before their parents return + assert.deepEqual(execution_order, [ + "parent_start", + "child_start", + "grandchild_start", + "grandchild_end", + "child_end", + "parent_end" + ]); + + // All events completed + assert.equal(parent_event.event_status, "completed"); + assert.ok(child_ref); + assert.equal(child_ref!.event_status, "completed"); + assert.ok(grandchild_ref); + assert.equal(grandchild_ref!.event_status, "completed"); + + // Parent-child relationships are set correctly + assert.equal(child_ref!.event_parent_id, parent_event.event_id); + assert.equal(grandchild_ref!.event_parent_id, child_ref!.event_id); +}); + +test("event.bus with forwarding: child dispatched via event.bus goes to the correct bus", async () => { + const bus1 = new EventBus("Bus1"); + const bus2 = new EventBus("Bus2"); + + // Forward all events from bus1 to bus2 + bus1.on("*", bus2.dispatch); + + let child_handler_bus_name: string | undefined; + + // Handlers only on bus2 + bus2.on(MainEvent, async (event) => { + // Handler runs on bus2 (forwarded from bus1) + assert.equal(event.bus?.name, "Bus2"); + + // Child dispatched via event.bus should go to bus2 + const child = event.bus!.emit(ChildEvent({})); + await child.done(); + }); + + bus2.on(ChildEvent, (event) => { + child_handler_bus_name = event.bus?.name; + }); + + const parent_event = bus1.dispatch(MainEvent({})); + await bus1.waitUntilIdle(); + await bus2.waitUntilIdle(); + + // Child handler should have seen bus2 + assert.equal(child_handler_bus_name, "Bus2"); +}); + +test("event.bus is set on the event after dispatch (outside handler)", async () => { + const bus = new EventBus("TestBus"); + + // Before dispatch, bus is not set + const raw_event = MainEvent({}); + assert.equal(raw_event.bus, undefined); + + // After dispatch, bus is set on the original event + const dispatched = bus.dispatch(raw_event); + assert.ok(dispatched.bus, "event.bus should be set after dispatch"); + + await bus.waitUntilIdle(); +}); + +test("event.bus.dispatch from handler correctly attributes event_emitted_by_handler_id", async () => { + const bus = new EventBus("TestBus"); + + let child_emitted_by_handler_id: string | undefined; + + bus.on(MainEvent, (event) => { + event.bus?.emit(ChildEvent({})); + }); + + bus.on(ChildEvent, () => {}); + + const parent = bus.dispatch(MainEvent({})); + await bus.waitUntilIdle(); + + // Find the child event in history + const child = Array.from(bus.event_history.values()).find((e) => e.event_type === "ChildEvent"); + assert.ok(child, "child event should be in history"); + assert.equal(child!.event_parent_id, parent.event_id); + + // The child should have event_emitted_by_handler_id set to the handler that emitted it + assert.ok( + child!.event_emitted_by_handler_id, + "event_emitted_by_handler_id should be set on child events dispatched via event.bus" + ); + + // The handler id should correspond to a handler result on the parent event + const parent_from_history = Array.from(bus.event_history.values()).find((e) => e.event_type === "MainEvent"); + assert.ok(parent_from_history); + const handler_result = parent_from_history!.event_results.get(child!.event_emitted_by_handler_id!); + assert.ok(handler_result, "handler_id on child should match a handler result on the parent"); +}); diff --git a/bubus-ts/tests/eventbus_basics.test.ts b/bubus-ts/tests/eventbus_basics.test.ts new file mode 100644 index 0000000..dd6753f --- /dev/null +++ b/bubus-ts/tests/eventbus_basics.test.ts @@ -0,0 +1,513 @@ +import assert from "node:assert/strict"; +import { test } from "node:test"; + +import { BaseEvent, EventBus } from "../src/index.js"; +import { z } from "zod"; + +const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms); + }); + +// ─── Constructor defaults ──────────────────────────────────────────────────── + +test("EventBus initializes with correct defaults", () => { + const bus = new EventBus("DefaultsBus"); + + assert.equal(bus.name, "DefaultsBus"); + assert.equal(bus.max_history_size, 100); + assert.equal(bus.event_concurrency_default, "bus-serial"); + assert.equal(bus.handler_concurrency_default, "bus-serial"); + assert.equal(bus.event_timeout_default, 60); + assert.equal(bus.event_history.size, 0); + assert.equal(bus.pending_event_queue.length, 0); + assert.equal(bus.in_flight_event_ids.size, 0); + assert.ok(EventBus.instances.has(bus)); +}); + +test("EventBus applies custom options", () => { + const bus = new EventBus("CustomBus", { + max_history_size: 500, + event_concurrency: "parallel", + handler_concurrency: "global-serial", + event_timeout: 30 + }); + + assert.equal(bus.max_history_size, 500); + assert.equal(bus.event_concurrency_default, "parallel"); + assert.equal(bus.handler_concurrency_default, "global-serial"); + assert.equal(bus.event_timeout_default, 30); +}); + +test("EventBus with null max_history_size means unlimited", () => { + const bus = new EventBus("UnlimitedBus", { max_history_size: null }); + assert.equal(bus.max_history_size, null); +}); + +test("EventBus with null event_timeout disables timeouts", () => { + const bus = new EventBus("NoTimeoutBus", { event_timeout: null }); + assert.equal(bus.event_timeout_default, null); +}); + +test("EventBus auto-generates name when not provided", () => { + const bus = new EventBus(); + assert.equal(bus.name, "EventBus"); +}); + +// ─── Event dispatch and status lifecycle ───────────────────────────────────── + +test("dispatch returns pending event with correct initial state", async () => { + const bus = new EventBus("LifecycleBus", { max_history_size: 100 }); + const TestEvent = BaseEvent.extend("TestEvent", { data: z.string() }); + + const event = bus.dispatch(TestEvent({ data: "hello" })); + + // Immediate state after dispatch (before any microtask runs) + assert.equal(event.event_type, "TestEvent"); + assert.ok(event.event_id); + assert.ok(event.event_created_at); + assert.equal((event as any).data, "hello"); + + // event_path should include the bus name + const original = event._original_event ?? event; + assert.ok(original.event_path.includes("LifecycleBus")); + + await bus.waitUntilIdle(); +}); + +test("event transitions through pending -> started -> completed", async () => { + const bus = new EventBus("StatusBus", { max_history_size: 100 }); + const TestEvent = BaseEvent.extend("TestEvent", {}); + let status_during_handler: string | undefined; + + bus.on(TestEvent, (event: BaseEvent) => { + status_during_handler = event.event_status; + return "done"; + }); + + const event = bus.dispatch(TestEvent({})); + const original = event._original_event ?? event; + + await event.done(); + + assert.equal(status_during_handler, "started"); + assert.equal(original.event_status, "completed"); + assert.ok(original.event_started_at, "event_started_at should be set"); + assert.ok(original.event_completed_at, "event_completed_at should be set"); +}); + +test("event with no handlers completes immediately", async () => { + const bus = new EventBus("NoHandlerBus", { max_history_size: 100 }); + const OrphanEvent = BaseEvent.extend("OrphanEvent", {}); + + const event = bus.dispatch(OrphanEvent({})); + await event.done(); + + const original = event._original_event ?? event; + assert.equal(original.event_status, "completed"); + assert.equal(original.event_results.size, 0); +}); + +// ─── Event history tracking ────────────────────────────────────────────────── + +test("dispatched events appear in event_history", async () => { + const bus = new EventBus("HistoryBus", { max_history_size: 100 }); + const EventA = BaseEvent.extend("EventA", {}); + const EventB = BaseEvent.extend("EventB", {}); + + bus.dispatch(EventA({})); + bus.dispatch(EventB({})); + await bus.waitUntilIdle(); + + assert.equal(bus.event_history.size, 2); + const history = Array.from(bus.event_history.values()); + assert.equal(history[0].event_type, "EventA"); + assert.equal(history[1].event_type, "EventB"); + + // All events are accessible by id + for (const event of bus.event_history.values()) { + assert.ok(bus.event_history.has(event.event_id)); + } +}); + +// ─── History trimming (max_history_size) ───────────────────────────────────── + +test("history is trimmed to max_history_size, completed events removed first", async () => { + const bus = new EventBus("TrimBus", { max_history_size: 5 }); + const TrimEvent = BaseEvent.extend("TrimEvent", { seq: z.number() }); + + bus.on(TrimEvent, () => "ok"); + + // Dispatch 10 events; they'll process and complete in FIFO order + for (let i = 0; i < 10; i++) { + bus.dispatch(TrimEvent({ seq: i })); + } + await bus.waitUntilIdle(); + + // History should be trimmed to at most max_history_size + assert.ok(bus.event_history.size <= 5, `expected <= 5, got ${bus.event_history.size}`); + + // The remaining events should be the MOST RECENT ones (oldest completed removed first) + const seqs = Array.from(bus.event_history.values()).map((e) => (e as any).seq as number); + for (let i = 1; i < seqs.length; i++) { + assert.ok(seqs[i] > seqs[i - 1], "remaining history should be in order"); + } +}); + +test("unlimited history (max_history_size: null) keeps all events", async () => { + const bus = new EventBus("UnlimitedHistBus", { max_history_size: null }); + const PingEvent = BaseEvent.extend("PingEvent", {}); + + bus.on(PingEvent, () => "pong"); + + for (let i = 0; i < 150; i++) { + bus.dispatch(PingEvent({})); + } + await bus.waitUntilIdle(); + + assert.equal(bus.event_history.size, 150); + + // All completed + for (const event of bus.event_history.values()) { + assert.equal(event.event_status, "completed"); + } +}); + +// ─── Event type derivation ─────────────────────────────────────────────────── + +test("event_type is derived from extend() name argument", () => { + const MyCustomEvent = BaseEvent.extend("MyCustomEvent", { val: z.number() }); + const event = MyCustomEvent({ val: 42 }); + assert.equal(event.event_type, "MyCustomEvent"); +}); + +test("event_type can be overridden at instantiation", () => { + const FlexEvent = BaseEvent.extend("FlexEvent", {}); + const event = FlexEvent({ event_type: "OverriddenType" }); + assert.equal(event.event_type, "OverriddenType"); +}); + +test("handler registration by string matches extend() name", async () => { + const bus = new EventBus("StringMatchBus", { max_history_size: 100 }); + const NamedEvent = BaseEvent.extend("NamedEvent", {}); + const received: string[] = []; + + bus.on("NamedEvent", () => { + received.push("string_handler"); + }); + + bus.dispatch(NamedEvent({})); + await bus.waitUntilIdle(); + + assert.equal(received.length, 1); + assert.equal(received[0], "string_handler"); +}); + +test("wildcard handler receives all events", async () => { + const bus = new EventBus("WildcardBus", { max_history_size: 100 }); + const EventA = BaseEvent.extend("EventA", {}); + const EventB = BaseEvent.extend("EventB", {}); + const types: string[] = []; + + bus.on("*", (event: BaseEvent) => { + types.push(event.event_type); + }); + + bus.dispatch(EventA({})); + bus.dispatch(EventB({})); + await bus.waitUntilIdle(); + + assert.deepEqual(types, ["EventA", "EventB"]); +}); + +// ─── Error handling and isolation ──────────────────────────────────────────── + +test("handler error is captured without crashing the bus", async () => { + const bus = new EventBus("ErrorBus", { max_history_size: 100 }); + const ErrorEvent = BaseEvent.extend("ErrorEvent", {}); + + bus.on(ErrorEvent, () => { + throw new Error("handler blew up"); + }); + + const event = bus.dispatch(ErrorEvent({})); + await event.done(); + + const original = event._original_event ?? event; + assert.equal(original.event_status, "completed"); + assert.ok(original.event_errors.length > 0, "event should record the error"); + + // The handler result should have error status + const results = Array.from(original.event_results.values()); + assert.equal(results.length, 1); + assert.equal(results[0].status, "error"); + assert.ok(results[0].error instanceof Error); + assert.equal((results[0].error as Error).message, "handler blew up"); +}); + +test("one handler error does not prevent other handlers from running", async () => { + const bus = new EventBus("IsolationBus", { + max_history_size: 100, + handler_concurrency: "parallel" + }); + const MultiEvent = BaseEvent.extend("MultiEvent", {}); + + const results_seen: string[] = []; + + bus.on(MultiEvent, () => { + results_seen.push("handler_1_ok"); + return "result_1"; + }); + bus.on(MultiEvent, () => { + throw new Error("handler_2_fails"); + }); + bus.on(MultiEvent, () => { + results_seen.push("handler_3_ok"); + return "result_3"; + }); + + const event = bus.dispatch(MultiEvent({})); + await event.done(); + + const original = event._original_event ?? event; + assert.equal(original.event_status, "completed"); + + // Both non-erroring handlers should have run + assert.ok(results_seen.includes("handler_1_ok")); + assert.ok(results_seen.includes("handler_3_ok")); + + // Check individual results + const all_results = Array.from(original.event_results.values()); + const completed_results = all_results.filter((r) => r.status === "completed"); + const error_results = all_results.filter((r) => r.status === "error"); + assert.equal(completed_results.length, 2); + assert.equal(error_results.length, 1); +}); + +// ─── Concurrent dispatch ───────────────────────────────────────────────────── + +test("many events dispatched concurrently all complete", async () => { + const bus = new EventBus("ConcurrentBus", { max_history_size: null }); + const BatchEvent = BaseEvent.extend("BatchEvent", { idx: z.number() }); + let processed = 0; + + bus.on(BatchEvent, () => { + processed += 1; + return "ok"; + }); + + const events: BaseEvent[] = []; + for (let i = 0; i < 100; i++) { + events.push(bus.dispatch(BatchEvent({ idx: i }))); + } + + // Wait for all to complete + await Promise.all(events.map((e) => e.done())); + await bus.waitUntilIdle(); + + assert.equal(processed, 100); + assert.equal(bus.event_history.size, 100); + + for (const event of bus.event_history.values()) { + assert.equal(event.event_status, "completed"); + } +}); + +// ─── event_timeout default application ─────────────────────────────────────── + +test("dispatch applies bus event_timeout_default when event has null timeout", async () => { + const bus = new EventBus("TimeoutDefaultBus", { + max_history_size: 100, + event_timeout: 42 + }); + const TEvent = BaseEvent.extend("TEvent", {}); + + const event = bus.dispatch(TEvent({})); + const original = event._original_event ?? event; + + // The bus should have applied its default timeout + assert.equal(original.event_timeout, 42); + + await bus.waitUntilIdle(); +}); + +test("event with explicit timeout is not overridden by bus default", async () => { + const bus = new EventBus("TimeoutOverrideBus", { + max_history_size: 100, + event_timeout: 42 + }); + const TEvent = BaseEvent.extend("TEvent", {}); + + const event = bus.dispatch(TEvent({ event_timeout: 10 })); + const original = event._original_event ?? event; + + assert.equal(original.event_timeout, 10); + + await bus.waitUntilIdle(); +}); + +// ─── EventBus.instances tracking ───────────────────────────────────────────── + +test("EventBus.instances tracks all created buses", () => { + const initial_count = EventBus.instances.size; + const bus_a = new EventBus("TrackA"); + const bus_b = new EventBus("TrackB"); + + assert.ok(EventBus.instances.has(bus_a)); + assert.ok(EventBus.instances.has(bus_b)); + assert.equal(EventBus.instances.size, initial_count + 2); +}); + +// ─── Circular forwarding prevention ────────────────────────────────────────── + +test("circular forwarding does not cause infinite loop", async () => { + const bus_a = new EventBus("CircA", { max_history_size: 100 }); + const bus_b = new EventBus("CircB", { max_history_size: 100 }); + const bus_c = new EventBus("CircC", { max_history_size: 100 }); + + // A -> B -> C -> A (circular) + bus_a.on("*", bus_b.dispatch); + bus_b.on("*", bus_c.dispatch); + bus_c.on("*", bus_a.dispatch); + + const CircEvent = BaseEvent.extend("CircEvent", {}); + const handler_calls: string[] = []; + + // Register real handlers on each bus + bus_a.on(CircEvent, () => { handler_calls.push("A"); return "a"; }); + bus_b.on(CircEvent, () => { handler_calls.push("B"); return "b"; }); + bus_c.on(CircEvent, () => { handler_calls.push("C"); return "c"; }); + + const event = bus_a.dispatch(CircEvent({})); + await event.done(); + await bus_a.waitUntilIdle(); + await bus_b.waitUntilIdle(); + await bus_c.waitUntilIdle(); + + // Each bus should process the event exactly once (loop prevention via event_path) + assert.equal(handler_calls.filter((h) => h === "A").length, 1); + assert.equal(handler_calls.filter((h) => h === "B").length, 1); + assert.equal(handler_calls.filter((h) => h === "C").length, 1); + + // event_path should contain all three buses + const original = event._original_event ?? event; + assert.ok(original.event_path.includes("CircA")); + assert.ok(original.event_path.includes("CircB")); + assert.ok(original.event_path.includes("CircC")); +}); + +// ─── EventBus GC / memory leak ─────────────────────────────────────────────── + +test("unreferenced EventBus can be garbage collected (not retained by instances)", async () => { + // This test requires --expose-gc to force garbage collection + const gc = globalThis.gc as (() => void) | undefined; + if (typeof gc !== "function") { + // Can't test GC without --expose-gc; skip gracefully + return; + } + + let weak_ref: WeakRef; + + // Create a bus inside an IIFE so the only reference is the WeakRef + (() => { + const bus = new EventBus("GCTestBus"); + weak_ref = new WeakRef(bus); + })(); + + // Force garbage collection + gc(); + await delay(50); + gc(); + + // If EventBus.instances holds a strong reference (Set), + // the bus will NOT be collected β€” proving the memory leak. + // After the fix (WeakRef-based storage), the bus should be collected. + assert.equal( + weak_ref!.deref(), + undefined, + "bus should be garbage collected when no external references remain β€” " + + "EventBus.instances is holding a strong reference (memory leak)" + ); +}); + +// ─── off() handler deregistration ──────────────────────────────────────────── + +test("off() removes a handler so it no longer fires", async () => { + const bus = new EventBus("OffBus", { max_history_size: 100 }); + const OffEvent = BaseEvent.extend("OffEvent", {}); + let call_count = 0; + + const handler = () => { + call_count += 1; + }; + + bus.on(OffEvent, handler); + bus.dispatch(OffEvent({})); + await bus.waitUntilIdle(); + assert.equal(call_count, 1); + + bus.off(OffEvent, handler); + bus.dispatch(OffEvent({})); + await bus.waitUntilIdle(); + assert.equal(call_count, 1, "handler should not fire after off()"); +}); + +test("off() removes a handler by handler_id string", async () => { + const bus = new EventBus("OffByIdBus", { max_history_size: 100 }); + const OffIdEvent = BaseEvent.extend("OffIdEvent", {}); + let call_count = 0; + + bus.on(OffIdEvent, function my_handler() { + call_count += 1; + }); + + // Dispatch once so we can find the handler_id from the event results + const event1 = bus.dispatch(OffIdEvent({})); + await bus.waitUntilIdle(); + assert.equal(call_count, 1); + + // Get the handler_id from the event's results + const results = Array.from(event1.event_results.values()); + assert.equal(results.length, 1, "should have exactly one handler result"); + const handler_id = results[0].handler_id; + assert.ok(handler_id, "handler_id should exist"); + + // Remove by handler_id string + bus.off(OffIdEvent, handler_id); + + // Dispatch again β€” handler should NOT fire + bus.dispatch(OffIdEvent({})); + await bus.waitUntilIdle(); + assert.equal(call_count, 1, "handler should not fire after off() by handler_id"); +}); + +test("off() with no handler removes all handlers for that event", async () => { + const bus = new EventBus("OffAllBus", { max_history_size: 100 }); + const OffAllEvent = BaseEvent.extend("OffAllEvent", {}); + const OtherEvent = BaseEvent.extend("OffAllOther", {}); + let call_count_a = 0; + let call_count_b = 0; + let other_count = 0; + + bus.on(OffAllEvent, () => { call_count_a += 1; }); + bus.on(OffAllEvent, () => { call_count_b += 1; }); + bus.on(OtherEvent, () => { other_count += 1; }); + + bus.dispatch(OffAllEvent({})); + await bus.waitUntilIdle(); + assert.equal(call_count_a, 1); + assert.equal(call_count_b, 1); + + // Remove ALL handlers for OffAllEvent + bus.off(OffAllEvent); + + bus.dispatch(OffAllEvent({})); + bus.dispatch(OtherEvent({})); + await bus.waitUntilIdle(); + + // Neither OffAllEvent handler should fire + assert.equal(call_count_a, 1, "handler A should not fire after off(event)"); + assert.equal(call_count_b, 1, "handler B should not fire after off(event)"); + // OtherEvent handler should still work + assert.equal(other_count, 1, "unrelated handler should still fire"); +}); diff --git a/bubus-ts/tests/forwarding.test.ts b/bubus-ts/tests/forwarding.test.ts index b03884d..a380ecf 100644 --- a/bubus-ts/tests/forwarding.test.ts +++ b/bubus-ts/tests/forwarding.test.ts @@ -86,6 +86,69 @@ test("await event.done waits for handlers on forwarded buses", async () => { assert.equal(event.event_pending_buses, 0); }); +test("circular forwarding A->B->C->A does not loop", async () => { + const peer1 = new EventBus("Peer1"); + const peer2 = new EventBus("Peer2"); + const peer3 = new EventBus("Peer3"); + + const events_at_peer1: string[] = []; + const events_at_peer2: string[] = []; + const events_at_peer3: string[] = []; + + peer1.on(PingEvent, (event) => { + events_at_peer1.push(event.event_id); + }); + peer2.on(PingEvent, (event) => { + events_at_peer2.push(event.event_id); + }); + peer3.on(PingEvent, (event) => { + events_at_peer3.push(event.event_id); + }); + + // Create a full cycle: Peer1 -> Peer2 -> Peer3 -> Peer1 + peer1.on("*", peer2.dispatch); + peer2.on("*", peer3.dispatch); + peer3.on("*", peer1.dispatch); // completes the circle + + const event = peer1.dispatch(PingEvent({ value: 42 })); + + await peer1.waitUntilIdle(); + await peer2.waitUntilIdle(); + await peer3.waitUntilIdle(); + + // Each peer must see the event exactly once (no infinite loop) + assert.equal(events_at_peer1.length, 1); + assert.equal(events_at_peer2.length, 1); + assert.equal(events_at_peer3.length, 1); + + // All saw the same event + assert.equal(events_at_peer1[0], event.event_id); + assert.equal(events_at_peer2[0], event.event_id); + assert.equal(events_at_peer3[0], event.event_id); + + // event_path shows propagation order without looping back + assert.deepEqual(event.event_path, ["Peer1", "Peer2", "Peer3"]); + + // --- Start from a different peer in the same cycle --- + events_at_peer1.length = 0; + events_at_peer2.length = 0; + events_at_peer3.length = 0; + + const event2 = peer2.dispatch(PingEvent({ value: 99 })); + + await peer1.waitUntilIdle(); + await peer2.waitUntilIdle(); + await peer3.waitUntilIdle(); + + // Each peer sees it exactly once + assert.equal(events_at_peer1.length, 1); + assert.equal(events_at_peer2.length, 1); + assert.equal(events_at_peer3.length, 1); + + // Path starts at Peer2, goes to Peer3, then Peer1 (stops before looping back to Peer2) + assert.deepEqual(event2.event_path, ["Peer2", "Peer3", "Peer1"]); +}); + test("await event.done waits when forwarding handler is async-delayed", async () => { const bus_a = new EventBus("BusA"); const bus_b = new EventBus("BusB"); diff --git a/bubus-ts/tests/handlers.test.ts b/bubus-ts/tests/handlers.test.ts index 3fc0fa0..6599427 100644 --- a/bubus-ts/tests/handlers.test.ts +++ b/bubus-ts/tests/handlers.test.ts @@ -61,8 +61,10 @@ test("handlers can be sync or async", async () => { bus.on("TestEvent", sync_handler); bus.on("TestEvent", async_handler); - const handlers = bus.handlers_by_key.get("TestEvent"); - assert.equal(handlers?.size ?? 0, 2); + const handler_count = Array.from(bus.handlers.values()).filter( + (entry) => entry.event_key === "TestEvent" + ).length; + assert.equal(handler_count, 2); const event = bus.dispatch(BaseEvent.extend("TestEvent", {})({})); await event.done(); diff --git a/bubus-ts/tests/locking.test.ts b/bubus-ts/tests/locking.test.ts index bc9e84b..87b9e46 100644 --- a/bubus-ts/tests/locking.test.ts +++ b/bubus-ts/tests/locking.test.ts @@ -155,7 +155,10 @@ test("global-serial: awaited child jumps ahead of queued events across buses", a bus_a.on(ParentEvent, async (event) => { order.push("parent_start"); bus_b.emit(QueuedEvent({})); - const child = bus_b.emit(ChildEvent({})); + // Emit through the scoped proxy so parent tracking is set up, + // then also dispatch to bus_b for cross-bus processing. + const child = event.bus?.emit(ChildEvent({}))!; + bus_b.dispatch(child); order.push("child_dispatched"); await child.done(); order.push("child_awaited"); @@ -920,19 +923,19 @@ test("fifo: forwarded events preserve order on target bus (bus-serial)", async ( await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]); - const history_orders = bus_b.event_history.map((event) => (event as { order?: number }).order); - const results_sizes = bus_b.event_history.map((event) => event.event_results.size); - const bus_b_result_counts = bus_b.event_history.map((event) => + const history_orders = Array.from(bus_b.event_history.values()).map((event) => (event as { order?: number }).order); + const results_sizes = Array.from(bus_b.event_history.values()).map((event) => event.event_results.size); + const bus_b_result_counts = Array.from(bus_b.event_history.values()).map((event) => Array.from(event.event_results.values()).filter( (result) => result.eventbus_name === "ForwardOrderB" ).length ); - const processed_flags = bus_b.event_history.map((event) => + const processed_flags = Array.from(bus_b.event_history.values()).map((event) => Array.from(event.event_results.values()) .filter((result) => result.eventbus_name === "ForwardOrderB") .every((result) => result.status === "completed" || result.status === "error") ); - const pending_counts = bus_b.event_history.map( + const pending_counts = Array.from(bus_b.event_history.values()).map( (event) => Array.from(event.event_results.values()).filter((result) => result.status === "pending").length ); assert.deepEqual(order_a, [0, 1, 2, 3, 4]); diff --git a/bubus-ts/tests/log_tree.test.ts b/bubus-ts/tests/log_tree.test.ts index 51f0e34..42e578f 100644 --- a/bubus-ts/tests/log_tree.test.ts +++ b/bubus-ts/tests/log_tree.test.ts @@ -23,8 +23,7 @@ test("logTree: single event", () => { event.event_status = "completed"; event.event_completed_at = event.event_created_at; - bus.event_history.push(event); - bus.event_history_by_id.set(event.event_id, event); + bus.event_history.set(event.event_id, event); const output = bus.logTree(); @@ -50,8 +49,7 @@ test("logTree: with handler results", () => { result.markCompleted("status: success"); event.event_results.set(handler_id, result); - bus.event_history.push(event); - bus.event_history_by_id.set(event.event_id, event); + bus.event_history.set(event.event_id, event); const output = bus.logTree(); @@ -78,8 +76,7 @@ test("logTree: with handler errors", () => { result.markError(new ValueError("Test error message")); event.event_results.set(handler_id, result); - bus.event_history.push(event); - bus.event_history_by_id.set(event.event_id, event); + bus.event_history.set(event.event_id, event); const output = bus.logTree(); @@ -139,10 +136,9 @@ test("logTree: complex nested", () => { grandchild_result.markCompleted(null); grandchild.event_results.set(grandchild_handler_id, grandchild_result); - bus.event_history.push(root, child, grandchild); - bus.event_history_by_id.set(root.event_id, root); - bus.event_history_by_id.set(child.event_id, child); - bus.event_history_by_id.set(grandchild.event_id, grandchild); + bus.event_history.set(root.event_id, root); + bus.event_history.set(child.event_id, child); + bus.event_history.set(grandchild.event_id, grandchild); const output = bus.logTree(); @@ -168,9 +164,8 @@ test("logTree: multiple roots", () => { root2.event_status = "completed"; root2.event_completed_at = root2.event_created_at; - bus.event_history.push(root1, root2); - bus.event_history_by_id.set(root1.event_id, root1); - bus.event_history_by_id.set(root2.event_id, root2); + bus.event_history.set(root1.event_id, root1); + bus.event_history.set(root2.event_id, root2); const output = bus.logTree(); @@ -196,8 +191,7 @@ test("logTree: timing info", () => { result.markCompleted("done"); event.event_results.set(handler_id, result); - bus.event_history.push(event); - bus.event_history_by_id.set(event.event_id, event); + bus.event_history.set(event.event_id, event); const output = bus.logTree(); @@ -221,8 +215,7 @@ test("logTree: running handler", () => { result.markStarted(); event.event_results.set(handler_id, result); - bus.event_history.push(event); - bus.event_history_by_id.set(event.event_id, event); + bus.event_history.set(event.event_id, event); const output = bus.logTree(); diff --git a/bubus-ts/tests/parent_child.test.ts b/bubus-ts/tests/parent_child.test.ts index f95b700..0a7c0d7 100644 --- a/bubus-ts/tests/parent_child.test.ts +++ b/bubus-ts/tests/parent_child.test.ts @@ -18,7 +18,7 @@ test("eventIsChildOf and eventIsParentOf work for direct children", async () => const parent_event = bus.dispatch(ParentEvent({})); await bus.waitUntilIdle(); - const child_event = bus.event_history.find((event) => event.event_type === "ChildEvent"); + const child_event = Array.from(bus.event_history.values()).find((event) => event.event_type === "ChildEvent"); assert.ok(child_event); assert.equal(child_event.event_parent_id, parent_event.event_id); @@ -40,8 +40,8 @@ test("eventIsChildOf works for grandchildren", async () => { const parent_event = bus.dispatch(ParentEvent({})); await bus.waitUntilIdle(); - const child_event = bus.event_history.find((event) => event.event_type === "ChildEvent"); - const grandchild_event = bus.event_history.find((event) => event.event_type === "GrandchildEvent"); + const child_event = Array.from(bus.event_history.values()).find((event) => event.event_type === "ChildEvent"); + const grandchild_event = Array.from(bus.event_history.values()).find((event) => event.event_type === "GrandchildEvent"); assert.ok(child_event); assert.ok(grandchild_event); diff --git a/bubus-ts/tests/performance.test.ts b/bubus-ts/tests/performance.test.ts index 8901986..043b910 100644 --- a/bubus-ts/tests/performance.test.ts +++ b/bubus-ts/tests/performance.test.ts @@ -31,6 +31,6 @@ test( assert.equal(processed_count, total_events); assert.ok(duration_ms < 120_000, `Processing took ${duration_ms}ms`); - assert.ok(bus.event_history.length <= bus.max_history_size); + assert.ok(bus.event_history.size <= bus.max_history_size); } ); diff --git a/bubus-ts/tests/timeout.test.ts b/bubus-ts/tests/timeout.test.ts index 5d72b9f..1bfe24a 100644 --- a/bubus-ts/tests/timeout.test.ts +++ b/bubus-ts/tests/timeout.test.ts @@ -514,3 +514,501 @@ test("multi-level timeout cascade with mixed cancellations", async () => { ); assert.ok(queued_cancelled.length >= 2); }); + +// ============================================================================= +// Three-level timeout cascade (mirrors Python test_handler_timeout.py) +// +// This test creates a deep event hierarchy: +// TopEvent (250ms timeout) +// β”œβ”€β”€ ChildEvent (80ms timeout) β€” awaited by top_handler_main +// β”‚ β”œβ”€β”€ GrandchildEvent (35ms timeout) β€” awaited by child_handler +// β”‚ β”‚ └── 5 handlers (parallel): 3 slow (timeout), 2 fast (complete) +// β”‚ └── QueuedGrandchildEvent β€” emitted but NOT awaited, stays in queue +// β”‚ └── 1 handler: never runs, CANCELLED when child_handler times out +// └── SiblingEvent β€” emitted but NOT awaited, stays in queue +// └── 1 handler: never runs, CANCELLED when top_handler_main times out +// +// KEY MECHANIC: When a child event is awaited via event.done() inside a handler, +// it triggers "queue-jumping" via _runImmediately β†’ runImmediatelyAcrossBuses. +// Queue-jumped events bypass the handler limiter (bypass_handler_limiters: true), +// so all handlers for that event run in PARALLEL, even on a bus-serial bus. +// Non-awaited child events stay in the pending_event_queue and are blocked by +// immediate_processing_stack_depth > 0 (runloop is paused during queue-jump). +// +// TIMEOUT BEHAVIOR: Each handler gets its OWN timeout window starting from when +// that handler begins execution β€” NOT from when the event was dispatched. +// So with parallel handlers, all timeouts start at roughly the same time. +// With serial handlers, each timeout starts when the handler acquires the limiter. +// +// CANCELLATION CASCADE: When a handler times out, cancelPendingChildProcessing() +// walks the event's children tree and marks any "pending" handler results as +// EventHandlerCancelledError. Only "pending" results are cancelled β€” handlers +// that already started ("started" status) continue running in the background. +// ============================================================================= + +test("three-level timeout cascade with per-level timeouts and cascading cancellation", async () => { + const TopEvent = BaseEvent.extend("Cascade3LTop", {}); + const ChildEvent = BaseEvent.extend("Cascade3LChild", {}); + const GrandchildEvent = BaseEvent.extend("Cascade3LGrandchild", {}); + const QueuedGrandchildEvent = BaseEvent.extend("Cascade3LQueuedGC", {}); + const SiblingEvent = BaseEvent.extend("Cascade3LSibling", {}); + + const bus = new EventBus("Cascade3LevelBus", { + event_concurrency: "bus-serial", + handler_concurrency: "bus-serial" + }); + + const execution_log: string[] = []; + let child_ref: InstanceType | null = null; + let grandchild_ref: InstanceType | null = null; + let queued_grandchild_ref: InstanceType | null = null; + let sibling_ref: InstanceType | null = null; + + // ── GrandchildEvent handlers ────────────────────────────────────────── + // These run in PARALLEL because GrandchildEvent is queue-jumped + // (bypass_handler_limiters: true). Each handler gets its own 35ms timeout + // window starting from approximately the same moment. + // + // Handlers a, c, e sleep 200ms β†’ each times out individually at 35ms + // Handler b is synchronous β†’ completes immediately + // Handler d sleeps 10ms β†’ completes within its 35ms window + + const gc_handler_a = async () => { + execution_log.push("gc_a_start"); + await delay(200); // will be interrupted by 35ms timeout + execution_log.push("gc_a_end"); // should never reach here + return "gc_a_done"; + }; + + const gc_handler_b = () => { + execution_log.push("gc_b_complete"); + return "gc_b_done"; + }; + + const gc_handler_c = async () => { + execution_log.push("gc_c_start"); + await delay(200); // will be interrupted by 35ms timeout + execution_log.push("gc_c_end"); // should never reach here + return "gc_c_done"; + }; + + const gc_handler_d = async () => { + execution_log.push("gc_d_start"); + await delay(10); // fast enough to complete within 35ms + execution_log.push("gc_d_complete"); + return "gc_d_done"; + }; + + const gc_handler_e = async () => { + execution_log.push("gc_e_start"); + await delay(200); // will be interrupted by 35ms timeout + execution_log.push("gc_e_end"); // should never reach here + return "gc_e_done"; + }; + + // ── QueuedGrandchildEvent handler ───────────────────────────────────── + // This event is emitted by child_handler but NOT awaited, so it sits in + // pending_event_queue. When child_handler times out at 80ms, + // cancelPendingChildProcessing walks ChildEvent.event_children and finds + // this event still pending β†’ its handler results are marked as cancelled. + const queued_gc_handler = () => { + execution_log.push("queued_gc_start"); // should never reach here + return "queued_gc_done"; + }; + + // ── ChildEvent handler ──────────────────────────────────────────────── + // Emits GrandchildEvent (awaited β†’ queue-jump, ~35ms to complete) + // Emits QueuedGrandchildEvent (NOT awaited β†’ stays in queue) + // After grandchild completes, sleeps 300ms β†’ times out at 80ms total + const child_handler = async (event: InstanceType) => { + execution_log.push("child_start"); + grandchild_ref = event.bus?.emit(GrandchildEvent({ event_timeout: 0.035 }))!; + queued_grandchild_ref = event.bus?.emit(QueuedGrandchildEvent({ event_timeout: 0.5 }))!; + // Queue-jump: processes GrandchildEvent immediately, bypassing handler limiter. + // All 5 GC handlers run in parallel. Completes in ~35ms. + await grandchild_ref.done(); + execution_log.push("child_after_grandchild"); + await delay(300); // will be interrupted: child started at ~t=0, timeout at 80ms + execution_log.push("child_end"); // should never reach here + return "child_done"; + }; + + // ── SiblingEvent handler ────────────────────────────────────────────── + // This event is emitted by top_handler_main but NOT awaited. Stays in + // pending_event_queue until top_handler_main times out at 250ms β†’ + // cancelled by cancelPendingChildProcessing. + const sibling_handler = () => { + execution_log.push("sibling_start"); // should never reach here + return "sibling_done"; + }; + + // ── TopEvent handlers ───────────────────────────────────────────────── + // These run SERIALLY (via bus handler limiter) because TopEvent is + // processed by the normal runloop (not queue-jumped). top_handler_fast + // goes first, completes quickly, then top_handler_main starts. + + const top_handler_fast = async () => { + execution_log.push("top_fast_start"); + await delay(2); + execution_log.push("top_fast_complete"); + return "top_fast_done"; + }; + + const top_handler_main = async (event: InstanceType) => { + execution_log.push("top_main_start"); + child_ref = event.bus?.emit(ChildEvent({ event_timeout: 0.08 }))!; + sibling_ref = event.bus?.emit(SiblingEvent({ event_timeout: 0.5 }))!; + // Queue-jump: processes ChildEvent immediately (which in turn queue-jumps + // GrandchildEvent). This entire subtree resolves in ~80ms (child timeout). + await child_ref.done(); + execution_log.push("top_main_after_child"); + await delay(300); // will be interrupted: top_handler_main started at ~t=2, timeout at 250ms + execution_log.push("top_main_end"); // should never reach here + return "top_main_done"; + }; + + // Register handlers (registration order = execution order for serial) + bus.on(TopEvent, top_handler_fast); + bus.on(TopEvent, top_handler_main); + bus.on(ChildEvent, child_handler); + bus.on(GrandchildEvent, gc_handler_a); + bus.on(GrandchildEvent, gc_handler_b); + bus.on(GrandchildEvent, gc_handler_c); + bus.on(GrandchildEvent, gc_handler_d); + bus.on(GrandchildEvent, gc_handler_e); + bus.on(QueuedGrandchildEvent, queued_gc_handler); + bus.on(SiblingEvent, sibling_handler); + + // ── Dispatch and wait ───────────────────────────────────────────────── + const top = bus.dispatch(TopEvent({ event_timeout: 0.25 })); + await top.done(); + await bus.waitUntilIdle(); + + // ═══════════════════════════════════════════════════════════════════════ + // ASSERTIONS + // ═══════════════════════════════════════════════════════════════════════ + + // ── TopEvent: 2 handler results (1 completed, 1 timed out) ────────── + assert.equal(top.event_status, "completed"); + assert.ok(top.event_errors.length >= 1, "TopEvent should have at least 1 error"); + + const top_results = Array.from(top.event_results.values()); + assert.equal(top_results.length, 2, "TopEvent should have 2 handler results"); + + const top_fast_result = top_results.find((r) => r.handler_name === "top_handler_fast"); + assert.ok(top_fast_result, "top_handler_fast result should exist"); + assert.equal(top_fast_result!.status, "completed"); + assert.equal(top_fast_result!.result, "top_fast_done"); + + const top_main_result = top_results.find((r) => r.handler_name === "top_handler_main"); + assert.ok(top_main_result, "top_handler_main result should exist"); + assert.equal(top_main_result!.status, "error"); + assert.ok( + top_main_result!.error instanceof EventHandlerTimeoutError, + "top_handler_main should have timed out" + ); + + // ── ChildEvent: 1 handler result (timed out at 80ms) ──────────────── + assert.ok(child_ref, "ChildEvent should have been emitted"); + assert.equal(child_ref!.event_status, "completed"); + + const child_results = Array.from(child_ref!.event_results.values()); + assert.equal(child_results.length, 1, "ChildEvent should have 1 handler result"); + assert.equal(child_results[0].handler_name, "child_handler"); + assert.equal(child_results[0].status, "error"); + assert.ok( + child_results[0].error instanceof EventHandlerTimeoutError, + "child_handler should have timed out" + ); + + // ── GrandchildEvent: 5 handler results (2 completed, 3 timed out) ── + assert.ok(grandchild_ref, "GrandchildEvent should have been emitted"); + assert.equal(grandchild_ref!.event_status, "completed"); + + const gc_results = Array.from(grandchild_ref!.event_results.values()); + assert.equal(gc_results.length, 5, "GrandchildEvent should have 5 handler results"); + + // Handlers a, c, e: slow β†’ individually timed out + for (const name of ["gc_handler_a", "gc_handler_c", "gc_handler_e"]) { + const result = gc_results.find((r) => r.handler_name === name); + assert.ok(result, `${name} result should exist`); + assert.equal(result!.status, "error", `${name} should have status error`); + assert.ok( + result!.error instanceof EventHandlerTimeoutError, + `${name} should be EventHandlerTimeoutError` + ); + } + + // Handlers b, d: fast β†’ completed successfully + const gc_b_result = gc_results.find((r) => r.handler_name === "gc_handler_b"); + assert.ok(gc_b_result, "gc_handler_b result should exist"); + assert.equal(gc_b_result!.status, "completed"); + assert.equal(gc_b_result!.result, "gc_b_done"); + + const gc_d_result = gc_results.find((r) => r.handler_name === "gc_handler_d"); + assert.ok(gc_d_result, "gc_handler_d result should exist"); + assert.equal(gc_d_result!.status, "completed"); + assert.equal(gc_d_result!.result, "gc_d_done"); + + // ── QueuedGrandchildEvent: CANCELLED by child_handler timeout ─────── + // This event was emitted but never awaited. It sat in pending_event_queue + // until child_handler timed out, which triggered cancelPendingChildProcessing + // to walk ChildEvent.event_children and cancel all pending handlers. + assert.ok(queued_grandchild_ref, "QueuedGrandchildEvent should have been emitted"); + assert.equal(queued_grandchild_ref!.event_status, "completed"); + + const queued_gc_results = Array.from(queued_grandchild_ref!.event_results.values()); + assert.equal(queued_gc_results.length, 1, "QueuedGC should have 1 handler result"); + assert.equal(queued_gc_results[0].status, "error"); + assert.ok( + queued_gc_results[0].error instanceof EventHandlerCancelledError, + "QueuedGC handler should be EventHandlerCancelledError (not timeout β€” it never ran)" + ); + // Verify the cancellation error chain: CancelledError.parent_error β†’ TimeoutError + assert.ok( + (queued_gc_results[0].error as EventHandlerCancelledError).parent_error instanceof + EventHandlerTimeoutError, + "QueuedGC cancellation should reference the child_handler's timeout as parent_error" + ); + + // ── SiblingEvent: CANCELLED by top_handler_main timeout ───────────── + // Same pattern: emitted but never awaited, stays in queue, cancelled when + // top_handler_main times out and cancelPendingChildProcessing runs. + assert.ok(sibling_ref, "SiblingEvent should have been emitted"); + assert.equal(sibling_ref!.event_status, "completed"); + + const sibling_results = Array.from(sibling_ref!.event_results.values()); + assert.equal(sibling_results.length, 1, "SiblingEvent should have 1 handler result"); + assert.equal(sibling_results[0].status, "error"); + assert.ok( + sibling_results[0].error instanceof EventHandlerCancelledError, + "SiblingEvent handler should be EventHandlerCancelledError" + ); + assert.ok( + (sibling_results[0].error as EventHandlerCancelledError).parent_error instanceof + EventHandlerTimeoutError, + "SiblingEvent cancellation should reference top_handler_main's timeout as parent_error" + ); + + // ── Execution log: verify what ran and what didn't ────────────────── + // These handlers started AND completed: + assert.ok(execution_log.includes("top_fast_start"), "top_fast should have started"); + assert.ok(execution_log.includes("top_fast_complete"), "top_fast should have completed"); + assert.ok(execution_log.includes("gc_b_complete"), "gc_b (sync) should have completed"); + assert.ok(execution_log.includes("gc_d_start"), "gc_d should have started"); + assert.ok(execution_log.includes("gc_d_complete"), "gc_d should have completed"); + + // These handlers started but were interrupted by their own timeout: + assert.ok(execution_log.includes("gc_a_start"), "gc_a should have started"); + assert.ok(!execution_log.includes("gc_a_end"), "gc_a should NOT have finished (timed out)"); + assert.ok(execution_log.includes("gc_c_start"), "gc_c should have started"); + assert.ok(!execution_log.includes("gc_c_end"), "gc_c should NOT have finished (timed out)"); + assert.ok(execution_log.includes("gc_e_start"), "gc_e should have started"); + assert.ok(!execution_log.includes("gc_e_end"), "gc_e should NOT have finished (timed out)"); + + // These handlers started and progressed, then parent timeout interrupted: + assert.ok(execution_log.includes("top_main_start"), "top_main should have started"); + assert.ok(execution_log.includes("child_start"), "child should have started"); + assert.ok( + execution_log.includes("child_after_grandchild"), + "child should have continued after grandchild completed" + ); + assert.ok( + execution_log.includes("top_main_after_child"), + "top_main should have continued after child completed" + ); + assert.ok(!execution_log.includes("child_end"), "child should NOT have finished (timed out)"); + assert.ok(!execution_log.includes("top_main_end"), "top_main should NOT have finished (timed out)"); + + // These handlers never ran at all (cancelled before starting): + assert.ok(!execution_log.includes("queued_gc_start"), "queued_gc should never have started"); + assert.ok(!execution_log.includes("sibling_start"), "sibling should never have started"); + + // ── Parent-child tree structure ───────────────────────────────────── + assert.ok( + top.event_children.some((c) => c.event_id === child_ref!.event_id), + "ChildEvent should be in TopEvent.event_children" + ); + assert.ok( + top.event_children.some((c) => c.event_id === sibling_ref!.event_id), + "SiblingEvent should be in TopEvent.event_children" + ); + assert.ok( + child_ref!.event_children.some((c) => c.event_id === grandchild_ref!.event_id), + "GrandchildEvent should be in ChildEvent.event_children" + ); + assert.ok( + child_ref!.event_children.some((c) => c.event_id === queued_grandchild_ref!.event_id), + "QueuedGrandchildEvent should be in ChildEvent.event_children" + ); + + // ── Timing invariants ────────────────────────────────────────────── + // All events should have completion timestamps + for (const evt of [top, child_ref!, grandchild_ref!, queued_grandchild_ref!, sibling_ref!]) { + assert.ok(evt.event_completed_at, `${evt.event_type} should have event_completed_at`); + } + // All handler results should have started_at and completed_at + for (const result of top_results) { + assert.ok(result.started_at, `${result.handler_name} should have started_at`); + assert.ok(result.completed_at, `${result.handler_name} should have completed_at`); + } + for (const result of gc_results) { + assert.ok(result.started_at, `${result.handler_name} should have started_at`); + assert.ok(result.completed_at, `${result.handler_name} should have completed_at`); + } +}); + +// ============================================================================= +// Verify the timeoutβ†’cancellation error chain is intact at every level. +// When a parent handler times out and cancels a child's pending handlers, +// the EventHandlerCancelledError.parent_error must reference the specific +// EventHandlerTimeoutError that caused the cascade. This test creates a +// 2-level chain where each level's cancellation error can be inspected. +// ============================================================================= + +test("cancellation error chain preserves parent_error references through hierarchy", async () => { + const OuterEvent = BaseEvent.extend("ErrorChainOuter", {}); + const InnerEvent = BaseEvent.extend("ErrorChainInner", {}); + const DeepEvent = BaseEvent.extend("ErrorChainDeep", {}); + + const bus = new EventBus("ErrorChainBus", { + event_concurrency: "bus-serial", + handler_concurrency: "bus-serial" + }); + + let inner_ref: InstanceType | null = null; + let deep_ref: InstanceType | null = null; + + // DeepEvent handler: sleeps long, will be still pending when inner times out + // Because DeepEvent is emitted but NOT awaited, it stays in the queue. + const deep_handler = async () => { + await delay(200); + return "deep_done"; + }; + + // InnerEvent handler: emits DeepEvent (not awaited), then sleeps long β†’ times out + const inner_handler = async (event: InstanceType) => { + deep_ref = event.bus?.emit(DeepEvent({ event_timeout: 0.5 }))!; + await delay(200); // interrupted by inner timeout + return "inner_done"; + }; + + // OuterEvent handler: emits InnerEvent (awaited), then sleeps long β†’ times out + const outer_handler = async (event: InstanceType) => { + inner_ref = event.bus?.emit(InnerEvent({ event_timeout: 0.04 }))!; + await inner_ref.done(); + await delay(200); // interrupted by outer timeout + return "outer_done"; + }; + + bus.on(OuterEvent, outer_handler); + bus.on(InnerEvent, inner_handler); + bus.on(DeepEvent, deep_handler); + + const outer = bus.dispatch(OuterEvent({ event_timeout: 0.15 })); + await outer.done(); + await bus.waitUntilIdle(); + + // Outer handler timed out + const outer_result = Array.from(outer.event_results.values())[0]; + assert.equal(outer_result.status, "error"); + assert.ok(outer_result.error instanceof EventHandlerTimeoutError); + const outer_timeout = outer_result.error as EventHandlerTimeoutError; + + // Inner handler timed out (its own 40ms timeout, not outer's) + assert.ok(inner_ref); + const inner_result = Array.from(inner_ref!.event_results.values())[0]; + assert.equal(inner_result.status, "error"); + assert.ok(inner_result.error instanceof EventHandlerTimeoutError); + const inner_timeout = inner_result.error as EventHandlerTimeoutError; + + // Inner's timeout is from InnerEvent's own event_timeout (40ms), + // not inherited from outer + assert.ok( + inner_timeout.message.includes("inner_handler"), + "Inner timeout should name inner_handler" + ); + + // DeepEvent was cancelled when inner_handler timed out. + // The cancellation error should reference inner_handler's timeout (not outer's). + assert.ok(deep_ref); + const deep_result = Array.from(deep_ref!.event_results.values())[0]; + assert.equal(deep_result.status, "error"); + assert.ok( + deep_result.error instanceof EventHandlerCancelledError, + "DeepEvent handler should be cancelled, not timed out (it never started)" + ); + const deep_cancel = deep_result.error as EventHandlerCancelledError; + assert.ok( + deep_cancel.parent_error instanceof EventHandlerTimeoutError, + "Cancellation should reference parent timeout" + ); + // The parent_error should be the INNER handler's timeout, because that's + // the handler whose cancelPendingChildProcessing actually cancelled DeepEvent. + assert.ok( + deep_cancel.parent_error.message.includes("inner_handler") || + deep_cancel.parent_error.message.includes("child_handler"), + "parent_error should reference the handler that directly caused cancellation" + ); +}); + +// ============================================================================= +// When a parent has a timeout but a child has event_timeout: null (no timeout), +// the child's handlers run indefinitely on their own β€” but if the PARENT times +// out, cancelPendingChildProcessing still cancels any pending child handlers. +// This tests that cancellation works across timeout/no-timeout boundaries. +// ============================================================================= + +test("parent timeout cancels children that have no timeout of their own", async () => { + const ParentEvent = BaseEvent.extend("TimeoutBoundaryParent", {}); + const NoTimeoutChild = BaseEvent.extend("TimeoutBoundaryChild", {}); + + const bus = new EventBus("TimeoutBoundaryBus", { + event_concurrency: "bus-serial", + handler_concurrency: "bus-serial", + event_timeout: null // no bus-level default + }); + + let child_ref: InstanceType | null = null; + let child_handler_ran = false; + + // Child handler: would run forever but should be cancelled + const child_slow_handler = async () => { + child_handler_ran = true; + await delay(500); + return "child_done"; + }; + + // Parent handler: emits child (not awaited), then sleeps β†’ parent times out + const parent_handler = async (event: InstanceType) => { + // event_timeout: null means the child has no timeout of its own. + // It would run forever if the parent didn't cancel it. + child_ref = event.bus?.emit(NoTimeoutChild({ event_timeout: null }))!; + await delay(200); + return "parent_done"; + }; + + bus.on(ParentEvent, parent_handler); + bus.on(NoTimeoutChild, child_slow_handler); + + const parent = bus.dispatch(ParentEvent({ event_timeout: 0.03 })); + await parent.done(); + await bus.waitUntilIdle(); + + // Parent timed out + const parent_result = Array.from(parent.event_results.values())[0]; + assert.equal(parent_result.status, "error"); + assert.ok(parent_result.error instanceof EventHandlerTimeoutError); + + // Child should exist and be cancelled (it was in the queue, never started) + assert.ok(child_ref, "Child event should have been emitted"); + assert.equal(child_ref!.event_status, "completed"); + assert.equal(child_handler_ran, false, "Child handler should never have started"); + + const child_results = Array.from(child_ref!.event_results.values()); + assert.equal(child_results.length, 1); + assert.ok( + child_results[0].error instanceof EventHandlerCancelledError, + "Child handler should be cancelled by parent timeout, even though it has no timeout" + ); +}); From 2acac40096f39f858a4294b7bfcada5a79603195 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Thu, 5 Feb 2026 23:55:09 -0800 Subject: [PATCH 49/79] all tests passing --- bubus-ts/README.md | 66 +- bubus-ts/TODOS.txt | 296 +++ bubus-ts/eslint.config.js | 24 +- bubus-ts/examples/log_tree_demo.ts | 106 +- bubus-ts/pnpm-lock.yaml | 495 +++-- bubus-ts/prettier.config.js | 8 +- bubus-ts/src/async_context.ts | 44 +- bubus-ts/src/base_event.ts | 449 ++--- bubus-ts/src/event_bus.ts | 1649 ++++++++-------- bubus-ts/src/event_result.ts | 80 +- bubus-ts/src/index.ts | 18 +- bubus-ts/src/semaphores.ts | 114 +- bubus-ts/src/types.ts | 28 +- bubus-ts/tests/_perf_profile.ts | 52 + bubus-ts/tests/comprehensive_patterns.test.ts | 1689 ++++++++--------- bubus-ts/tests/context_propagation.test.ts | 636 +++---- bubus-ts/tests/debounce.test.ts | 162 +- bubus-ts/tests/error_handling.test.ts | 295 ++- bubus-ts/tests/event_bus_proxy.test.ts | 290 ++- bubus-ts/tests/event_results.test.ts | 96 +- bubus-ts/tests/eventbus_basics.test.ts | 687 +++---- bubus-ts/tests/fifo.test.ts | 45 +- bubus-ts/tests/find.test.ts | 812 ++++---- bubus-ts/tests/forwarding.test.ts | 242 +-- bubus-ts/tests/handlers.test.ts | 190 +- bubus-ts/tests/locking.test.ts | 1580 ++++++++------- bubus-ts/tests/log_tree.test.ts | 332 ++-- bubus-ts/tests/parent_child.test.ts | 88 +- bubus-ts/tests/performance.test.ts | 348 +++- bubus-ts/tests/timeout.test.ts | 1293 ++++++------- bubus-ts/tests/typed_results.test.ts | 238 ++- bubus-ts/tsconfig.json | 2 +- 32 files changed, 6381 insertions(+), 6073 deletions(-) create mode 100644 bubus-ts/TODOS.txt create mode 100644 bubus-ts/tests/_perf_profile.ts diff --git a/bubus-ts/README.md b/bubus-ts/README.md index b967cc2..09ed50b 100644 --- a/bubus-ts/README.md +++ b/bubus-ts/README.md @@ -6,35 +6,42 @@ gotchas we uncovered while matching behavior. It intentionally does **not** re-d ## Key Differences vs Python ### 1) Awaiting events: `event.done()` instead of `await event` + - Python: `await event` waits for handlers and can jump the queue when awaited inside a handler. - TS: use `await event.done()` for the same behavior. - Outside a handler, `done()` just waits for completion (it does not jump the queue). - Inside a handler, `done()` triggers immediate processing (queue jump) on **all buses** where the event is queued. ### 2) Cross-bus queue jump (forwarding) + - Python uses a global re-entrant lock to let awaited events process immediately on every bus where they appear. -- TS does **not** use AsyncLocalStorage or a global lock (browser support). -- Instead, `EventBus.instances` + `immediate_processing_stack_depth` pauses each runloop and processes the same event immediately across buses. +- TS optionally uses `AsyncLocalStorage` on Node.js (auto-detected) to capture dispatch context, but falls back gracefully in browsers. +- `EventBus.instances` + `immediate_processing_stack_depth` pauses each runloop and processes the same event immediately across buses. ### 3) `event.bus` is a BusScopedEvent view + - In Python, `event.event_bus` is dynamic (contextvars). - In TS, `event.bus` is provided by a **BusScopedEvent** (a Proxy over the original event). - That proxy injects a bus-bound `emit/dispatch` to ensure correct parent/child tracking. ### 4) Monotonic timestamps + - JS `Date.now()` is not strictly monotonic at millisecond granularity. - To keep FIFO tests stable, we generate strictly increasing ISO timestamps via `BaseEvent.nextIsoTimestamp()`. ### 5) No middleware, no WAL, no SQLite mirrors + - Those Python features were intentionally dropped for the JS version. ### 6) Default timeouts come from the EventBus + - `BaseEvent.event_timeout` defaults to `null`. - When dispatched, `EventBus` applies its default `event_timeout` (60s unless configured). - You can set `{ event_timeout: null }` on the bus to disable timeouts entirely. - Handlers that exceed 15s emit a warning (deadlock detection signal); the event still continues unless a timeout is hit. ## EventBus Options + All options are passed to `new EventBus(name, options)`. - `max_history_size?: number | null` (default: `100`) @@ -57,22 +64,23 @@ All options are passed to `new EventBus(name, options)`. You can override concurrency per event and per handler: ```ts -const FastEvent = BaseEvent.extend("FastEvent", { - payload: z.string() -}); +const FastEvent = BaseEvent.extend('FastEvent', { + payload: z.string(), +}) // Per-event override (highest precedence) const event = FastEvent({ - payload: "x", - event_concurrency: "parallel", - handler_concurrency: "parallel" -}); + payload: 'x', + event_concurrency: 'parallel', + handler_concurrency: 'parallel', +}) // Per-handler override (lower precedence) -bus.on(FastEvent, handler, { handler_concurrency: "parallel" }); +bus.on(FastEvent, handler, { handler_concurrency: 'parallel' }) ``` Precedence order (highest β†’ lowest): + 1. Event instance overrides (`event_concurrency`, `handler_concurrency`) 2. Handler options (`handler_concurrency`) 3. Bus defaults (`event_concurrency`, `handler_concurrency`) @@ -86,8 +94,8 @@ Handlers can be configured with `HandlerOptions`: ```ts bus.on(SomeEvent, handler, { order: -10, // serial ordering (lower runs earlier) - handler_concurrency: "parallel" -}); + handler_concurrency: 'parallel', +}) ``` - `order: number` runs handlers in ascending order (serial). @@ -116,16 +124,18 @@ under different `event_concurrency` / `handler_concurrency` configurations. ### 1) Base execution flow (applies to all modes) **Dispatch (non-awaited):** + 1. `dispatch()` normalizes to `original_event`, sets `bus` if missing. 2. Captures `_dispatch_context` (AsyncLocalStorage if available). 3. Applies `event_timeout_default` if `event.event_timeout === null`. 4. If this bus is already in `event_path` (or `eventHasVisited()`), return a BusScopedEvent without queueing. 5. Append bus name to `event_path`, record child relationship (if `event_parent_id` is set). -6. Add to `event_history` + `event_history_by_id`. +6. Add to `event_history` (a `Map` keyed by event id). 7. Increment `event_pending_buses`. 8. Push to `pending_event_queue` and `startRunloop()`. **Runloop + processing:** + 1. `runloop()` drains `pending_event_queue`. 2. Adds event id to `in_flight_event_ids`. 3. Calls `scheduleEventProcessing()` (async). @@ -139,7 +149,7 @@ under different `event_concurrency` / `handler_concurrency` configurations. ### 2) Event concurrency modes (`event_concurrency`) -- **`global-serial`**: events are serialized across *all* buses using the global event limiter. +- **`global-serial`**: events are serialized across _all_ buses using the global event limiter. - **`bus-serial`**: events are serialized per bus; different buses can overlap. - **`parallel`**: no event limiter; events can run concurrently on the same bus. - **`auto`**: resolves to the bus default. @@ -162,6 +172,7 @@ Even if events are parallel, handlers can still be serialized: ### 4) Forwarding across buses (non-awaited) When a handler on Bus A calls `bus_b.dispatch(event)` without awaiting: + - Bus A continues running its handler. - Bus B queues and processes the event according to **Bus B’s** concurrency settings. - No coupling unless both buses use the global limiters. @@ -170,18 +181,23 @@ When a handler on Bus A calls `bus_b.dispatch(event)` without awaiting: When `event.done()` is awaited inside a handler, **queue-jump** happens: -1. `BaseEvent.done()` detects it’s inside a handler and calls `_runImmediately()`. -2. `_runImmediately()` removes the event from the pending queue (if present). -3. `runImmediatelyAcrossBuses()` processes the event immediately on all buses where it is queued. -4. While immediate processing is active, each affected bus increments `immediate_processing_stack_depth`, +1. `BaseEvent.done()` detects it's inside a handler and calls `_runImmediately()`. +2. `_runImmediately()` **yields** the parent handler's concurrency limiter (if held) so child handlers can acquire it. +3. `_runImmediately()` removes the event from the pending queue (if present). +4. `runImmediatelyAcrossBuses()` processes the event immediately on all buses where it is queued. +5. While immediate processing is active, each affected bus increments `immediate_processing_stack_depth`, and its `runloop()` pauses to prevent unrelated events from running. -5. Once immediate processing completes, `immediate_processing_waiters` resume the paused runloops. +6. Once immediate processing completes, `_runImmediately()` **re-acquires** the parent handler's limiter + (unless the parent timed out while the child was processing). +7. `immediate_processing_waiters` resume the paused runloops. -**Important:** queue-jump bypasses all event and handler limiters to guarantee correctness and FIFO semantics. +**Important:** queue-jump bypasses event limiters but **respects** handler limiters via yield-and-reacquire. +This means queue-jumped handlers run serially on a `bus-serial` bus, not in parallel. ### 6) Precedence recap Highest β†’ lowest: + 1. Event instance fields (`event_concurrency`, `handler_concurrency`) 2. Handler options (`handler_concurrency`) 3. Bus defaults @@ -191,7 +207,9 @@ Highest β†’ lowest: ## Gotchas and Design Choices (What surprised us) ### A) Handler attribution without AsyncLocalStorage + We need to know **which handler emitted a child** to correctly assign: + - `event_parent_id` - `event_emitted_by_handler_id` - and to attach child events under the correct handler in the tree. @@ -200,6 +218,7 @@ In TS we do this by injecting a **BusScopedEvent** into handlers, which captures propagates it via `event_emitted_by_handler_id`. This keeps parentage deterministic even with nested awaits. ### B) Why `immediate_processing_stack_depth` exists + When an event is awaited inside a handler, the event must **jump the queue**. If the runloop continues normally, it could process unrelated events ("overshoot"), breaking FIFO guarantees. @@ -207,17 +226,21 @@ it could process unrelated events ("overshoot"), breaking FIFO guarantees. the runloop resumes in FIFO order. This matches the Python behavior. ### C) BusScopedEvent: why it exists and how it works + Forwarding exposes a subtle bug: if you pass the **same event object** to another bus, a naive implementation can mutate `event.bus` mid-handler and break parent-child tracking. To prevent that: + - Handlers always receive a **BusScopedEvent** (Proxy of the original event). - Its `bus` property is a proxy over the real `EventBus`. - That proxy intercepts `emit/dispatch` to set `event_parent_id` and attach children to the correct handler. - The original event object is still the canonical one stored in history. ### D) Cross-bus immediate processing (forwarding + awaiting) + When you `await event.done()` inside a handler: + - the system finds all buses that have this event queued (using `EventBus.instances` + `event_path`) - pauses their runloops - processes the event immediately on each bus @@ -226,11 +249,14 @@ When you `await event.done()` inside a handler: This gives the same "awaited events jump the queue" semantics as Python, but without a global lock. ### E) Why `event.bus` is required for `done()` + `done()` is the signal to run an event immediately when called inside a handler. Without a bus, we can't perform the queue jump, so `done()` throws if no bus is attached. ## Summary + The core contract is preserved: + - FIFO order - child event tracking - forwarding diff --git a/bubus-ts/TODOS.txt b/bubus-ts/TODOS.txt new file mode 100644 index 0000000..2e166b1 --- /dev/null +++ b/bubus-ts/TODOS.txt @@ -0,0 +1,296 @@ +Coordination Refactoring Plan +============================= +(Updated after timeout/limiter fixes and data-model cleanup landed) + +Code Quality Goals +------------------ +- Minimum unique abstractions +- Minimum fields that are directly mutated +- Fewest flags, state variables, stacks, sets, counters, callbacks, and signals +- Unified interfaces for concurrency decisions at handler, event, and bus level +- All locking-related logic in semaphores.ts and gates.ts, encapsulated from main flow +- Derive everything possible from event_results, event, bus.event_history, bus.handlers + (use getters rather than adding state variables) +- Clear, descriptive naming + +Files touched +------------- +- NEW: `src/gates.ts` +- EDIT: `src/semaphores.ts` (add HandlerLimiterLease) +- EDIT: `src/event_result.ts` +- EDIT: `src/base_event.ts` +- EDIT: `src/event_bus.ts` +- EDIT: `tests/comprehensive_patterns.test.ts` +- EDIT: `tests/timeout.test.ts` (limiter leak regression tests) + +No new exports from `index.ts` (all helpers remain internal). + +================================================================================ +COMPLETED +================================================================================ + +Done: Deferred migration + - `Deferred` type + `withResolvers()` live in `semaphores.ts` + - BaseEvent uses `_done: Deferred | null` + - `ensureDonePromise()` uses `withResolvers()` + - No separate `src/deferred.ts` needed + +Done: Limiter leak mitigation (partial) + - `runHandlerEntry` manually manages limiter acquire/release (no `runWithLimiter`) + - `handler_still_owns_limiter` check prevents double-release when limiter was yielded + - `_runImmediately` guards reacquire with `effective_result.status === "started"` + - IMPORTANT: race still exists and is NOT fully fixed yet + - window: `_runImmediately` checks `status === "started"` and then awaits + `limiter.acquire()`; handler may finish during that await, causing a leaked + reacquire token + - do not treat this as solved until Step 1 gate-based lease state machine is implemented + +Done: BaseEvent data-model cleanup + - Removed `event_created_at_ms` (redundant; use `Date.parse(event_created_at)`) + - Removed mutable `event_errors: unknown[]` array and `markFailed()` method + - `event_errors` is now a getter derived from `event_results` + - `event_children` is now a getter derived from `event_results` + +Done: Handler map consolidation + - `handlers_by_key` + `handlers_by_id` β†’ single `handlers: Map` + - `collectHandlers` uses two-pass ordering (exact-match first, wildcards second) + +Done: Event history consolidation + - `event_history: BaseEvent[]` + `event_history_by_id` β†’ single `event_history: Map` + +================================================================================ +REMAINING WORK +================================================================================ + +================================================================================ +0. Define gate namespaces and ownership boundaries (required design step) +================================================================================ + +Adopt a single naming model: +- `event_result.gate.*` for handler-execution transitions +- `event.gate.*` for event lifecycle transitions +- `event_bus.gate.*` for bus-level queue/idle/limiter coordination + +Required method names (locked): +- `event_result.gate`: + - `enterHandlerRun` + - `yieldPermitForChildRun` + - `reclaimPermitIfRunning` + - `exitHandlerRun` + - `getExecutionState` + - `runQueueJump` +- `event.gate`: + - `enterEventStarted` + - `completeIfDoneHandling` + - `waitForCompletion` + - `cancelPendingDescendants` +- `event_bus.gate`: + - `requestPause` + - `waitUntilResumed` + - `isPaused` + - `waitForIdle` + - `notifyIdleListeners` + - `getLimiterForEvent` + - `getLimiterForHandler` + +Important: do not re-introduce scattered fields like +`_runloop_hold_release`, `queue_jump_hold`, `_held_handler_limiter`, +`idle_waiters`, `idle_check_pending`, `idle_check_streak`, +`immediate_processing_stack_depth`, `immediate_processing_waiters`. + +================================================================================ +1. Implement `event_result.gate` and race-safe limiter ownership (required correctness work) +================================================================================ + +The current manual tracking (`handler_still_owns_limiter` + `status === "started"`) +still has a race and can leak permits. This is mandatory to fix first. + +Implementation shape: +- Add internal lease state machine in `src/semaphores.ts` + (or in `src/gates.ts` if colocated with other gate internals): + - state: `"held" | "yielded" | "closed"` + - race-safe reacquire behavior: if state becomes closed while awaiting acquire, + immediately release to avoid leaking a permit. +- `event_result.gate.enterHandlerRun(limiter)` claims execution ownership +- `event_result.gate.yieldPermitForChildRun()` releases permit only when currently held +- `event_result.gate.reclaimPermitIfRunning()` reacquires only when still running +- `event_result.gate.exitHandlerRun()` idempotently closes and releases if held +- `event_result.gate.runQueueJump(fn)` wraps yield β†’ run β†’ reclaim as one transition API +- `event_result.gate.getExecutionState()` is read-only debug/inspection + +Storage rule: +- keep gate-private mutable state off public EventResult fields +- use private state managed by gate internals (closure/private class/WeakMap) + +================================================================================ +2. Implement flat `event_bus.gate.*` coordination methods +================================================================================ + +Create `src/gates.ts` and move bus coordination internals behind `event_bus.gate`. + +`event_bus.gate.requestPause()`: +- increments pause depth +- returns idempotent release closure + +`event_bus.gate.waitUntilResumed()`: +- fast path if pause depth is 0 +- otherwise await waiter queue + +`event_bus.gate.isPaused()`: +- true while pause depth > 0 + +`event_bus.gate.waitForIdle()` + `event_bus.gate.notifyIdleListeners()`: +- encapsulate the existing two-snapshot idle confirmation pattern +- keep idle check scheduling private to the gate + +`event_bus.gate.getLimiterForEvent(event)` and +`event_bus.gate.getLimiterForHandler(event, options)`: +- move effective limiter resolution behind gate accessor methods +- preserve current precedence behavior + +Storage rule: +- bus coordination state (pause depth/waiters, idle waiters/check flags) is private to gate + +================================================================================ +3. Wire `event_bus.gate` into EventBus call sites +================================================================================ + +`src/event_bus.ts` call-site migration: +- `_runImmediately()`: + - replace direct queue-jump flag/field mutation with `result.gate.runQueueJump(...)` + - pause via `bus.gate.requestPause()` on each participating bus +- `runImmediatelyAcrossBuses()`: + - use `requestPause()` releases in `finally` +- `runloop()`: + - `if (this.gate.isPaused()) await this.gate.waitUntilResumed()` +- `waitUntilIdle()`: + - delegate to `this.gate.waitForIdle()` +- `scheduleEventProcessing().finally` and runloop exit path: + - call `this.gate.notifyIdleListeners()` +- `resolveEventLimiter` and `resolveHandlerLimiter`: + - fold into `this.gate.getLimiterForEvent(...)` and + `this.gate.getLimiterForHandler(...)` + +After migration remove old EventBus fields/methods: +- `idle_waiters`, `idle_check_pending`, `idle_check_streak` +- `immediate_processing_stack_depth`, `immediate_processing_waiters` +- `scheduleIdleCheck`, `resolveIdleWaitersIfDone`, + `releaseImmediateProcessingWaiters`, + `resolveEventLimiter`, `resolveHandlerLimiter` + +================================================================================ +4. Add `event.gate.*` lifecycle facade +================================================================================ + +In `src/base_event.ts`: +- add `event.gate.enterEventStarted()` as lifecycle transition wrapper +- add `event.gate.completeIfDoneHandling()` wrapper for completion checks +- add `event.gate.waitForCompletion()` wrapper around done promise +- add `event.gate.cancelPendingDescendants(reason)` lifecycle entry point + +Keep these as normal top-level getters on event (not gate methods): +- `event.event_children` (derived getter) +- `event.event_errors` (derived getter) + +Migration call sites: +- EventBus `processEvent()` uses `event.gate.enterEventStarted()` and + `event.gate.completeIfDoneHandling()` +- completion and parent notification paths use gate wrappers +- timeout cancellation paths call `event.gate.cancelPendingDescendants(reason)` + +================================================================================ +5. Tests and invariants update +================================================================================ + +Update queue-jump depth tests away from direct field access: +- remove assertions tied to `immediate_processing_stack_depth` +- assert pause semantics via `bus.gate.isPaused()` at equivalent checkpoints + +Add explicit limiter-race regressions: +- timeout during awaited `child.done()` does not leak permit +- next event still runs on same bus after timeout path +- nested queue-jump under timeout/cancellation remains permit-safe + +Keep/expand coverage for: +- cross-bus queue-jump ordering +- idle wait semantics +- forwarding + `event.bus` scoped behavior + +================================================================================ +6. Verification +================================================================================ + +Focused first: +- `node --expose-gc --test --import tsx tests/locking.test.ts` +- `node --expose-gc --test --import tsx tests/comprehensive_patterns.test.ts` +- `node --expose-gc --test --import tsx tests/timeout.test.ts` +- `node --expose-gc --test --import tsx tests/event_bus_proxy.test.ts` +- `node --expose-gc --test --import tsx tests/forwarding.test.ts` + +Then full suite: +- `pnpm test` + +================================================================================ +7. Implementation sequence (execution order) +================================================================================ + +1) Add gate surfaces first (no behavior change): +- Add `gate` accessors on EventBus/EventResult/BaseEvent. +- Keep internals on current logic temporarily so call sites can migrate safely. + +2) Implement `event_result.gate` with private execution state: +- Move permit ownership to gate-private state (`held/yielded/closed`). +- Route `_runImmediately` + `runHandlerEntry` permit transitions through: + `enterHandlerRun`, `yieldPermitForChildRun`, `reclaimPermitIfRunning`, + `exitHandlerRun`, `runQueueJump`. + +3) Migrate runloop pause to `event_bus.gate`: +- Replace queue-jump pause/depth fields with `requestPause`, + `waitUntilResumed`, `isPaused`. +- Keep release callbacks gate-internal; no public flag fields on EventResult. + +4) Migrate idle waiting to `event_bus.gate`: +- Replace idle waiters/check flags + scheduling methods with: + `waitForIdle`, `notifyIdleListeners`. +- Preserve two-snapshot confirmation semantics. + +5) Move limiter resolution behind `event_bus.gate`: +- Replace direct resolver call sites with: + `getLimiterForEvent`, `getLimiterForHandler`. +- Keep existing concurrency precedence behavior unchanged. + +6) Add `event.gate` lifecycle wrappers and switch call sites: +- Use `enterEventStarted`, `completeIfDoneHandling`, `waitForCompletion`, + `cancelPendingDescendants`. +- Keep `event.event_children` + `event.event_errors` as non-gate getters. + +7) Remove old scattered fields/methods: +- Delete queue-jump/idle/permit legacy fields and helper methods only after + all call sites use gates. + +8) Update tests in two passes: +- First migrate assertions from raw internal fields to gate semantics. +- Then add explicit limiter-race regressions (timeout + queue-jump leak checks). + +9) Verify after each phase: +- Run focused suites after each migration phase. +- Run full `pnpm test` after legacy field/method removal lands. + +================================================================================ +Net effect +================================================================================ + +API shape becomes explicit and namespaced: +- `event_result.gate.*` owns handler execution/permit transitions +- `event.gate.*` owns lifecycle transitions +- `event_bus.gate.*` owns runloop pause, idle waiting, and limiter resolution + +State ownership becomes centralized: +- no scattered coordination flags on EventResult/EventBus +- private mutable coordination state lives inside gate internals + +Correctness target after Step 1: +- impossible to double-release or leak handler permits on timeout + queue-jump races + +No new public package exports required: +- gate internals remain project-internal (`src/gates.ts`, `src/semaphores.ts`) diff --git a/bubus-ts/eslint.config.js b/bubus-ts/eslint.config.js index 3d89e0b..4783e2a 100644 --- a/bubus-ts/eslint.config.js +++ b/bubus-ts/eslint.config.js @@ -1,22 +1,22 @@ -import ts_parser from "@typescript-eslint/parser"; -import ts_eslint_plugin from "@typescript-eslint/eslint-plugin"; +import ts_parser from '@typescript-eslint/parser' +import ts_eslint_plugin from '@typescript-eslint/eslint-plugin' export default [ { - files: ["**/*.ts"], + files: ['**/*.ts'], languageOptions: { parser: ts_parser, parserOptions: { - sourceType: "module", - ecmaVersion: "latest" - } + sourceType: 'module', + ecmaVersion: 'latest', + }, }, plugins: { - "@typescript-eslint": ts_eslint_plugin + '@typescript-eslint': ts_eslint_plugin, }, rules: { - "no-unused-vars": "off", - "@typescript-eslint/no-unused-vars": ["error", { "argsIgnorePattern": "^_" }] - } - } -]; + 'no-unused-vars': 'off', + '@typescript-eslint/no-unused-vars': ['error', { argsIgnorePattern: '^_' }], + }, + }, +] diff --git a/bubus-ts/examples/log_tree_demo.ts b/bubus-ts/examples/log_tree_demo.ts index 192346a..a4aaef0 100644 --- a/bubus-ts/examples/log_tree_demo.ts +++ b/bubus-ts/examples/log_tree_demo.ts @@ -1,106 +1,98 @@ -import { z } from "zod"; +import { z } from 'zod' -import { BaseEvent, EventBus } from "../src/index.js"; +import { BaseEvent, EventBus } from '../src/index.js' -const RootEvent = BaseEvent.extend("RootEvent", { +const RootEvent = BaseEvent.extend('RootEvent', { url: z.string(), event_result_schema: z.string(), - event_result_type: "string" -}); + event_result_type: 'string', +}) -const ChildEvent = BaseEvent.extend("ChildEvent", { +const ChildEvent = BaseEvent.extend('ChildEvent', { tab_id: z.string(), event_result_schema: z.string(), - event_result_type: "string" -}); + event_result_type: 'string', +}) -const GrandchildEvent = BaseEvent.extend("GrandchildEvent", { +const GrandchildEvent = BaseEvent.extend('GrandchildEvent', { status: z.string(), event_result_schema: z.string(), - event_result_type: "string" -}); + event_result_type: 'string', +}) const delay = (ms: number): Promise => new Promise((resolve) => { - setTimeout(resolve, ms); - }); + setTimeout(resolve, ms) + }) async function main(): Promise { - const bus_a = new EventBus("BusA"); - const bus_b = new EventBus("BusB"); + const bus_a = new EventBus('BusA') + const bus_b = new EventBus('BusB') async function forward_to_bus_b(event: InstanceType): Promise { - await delay(20); - bus_b.dispatch(event); - return "forwarded_to_bus_b"; + await delay(20) + bus_b.dispatch(event) + return 'forwarded_to_bus_b' } - bus_a.on("*", forward_to_bus_b); + bus_a.on('*', forward_to_bus_b) async function root_fast_handler(event: InstanceType): Promise { - await delay(10); - const child = event.bus?.emit( - ChildEvent({ tab_id: "tab-123", event_timeout: 0.1 }) - ); + await delay(10) + const child = event.bus?.emit(ChildEvent({ tab_id: 'tab-123', event_timeout: 0.1 })) if (child) { - await child.done(); + await child.done() } - return "root_fast_handler_ok"; + return 'root_fast_handler_ok' } async function root_slow_handler(event: InstanceType): Promise { - event.bus?.emit( - ChildEvent({ tab_id: "tab-timeout", event_timeout: 0.1 }) - ); - await delay(400); - return "root_slow_handler_timeout"; + event.bus?.emit(ChildEvent({ tab_id: 'tab-timeout', event_timeout: 0.1 })) + await delay(400) + return 'root_slow_handler_timeout' } - bus_a.on(RootEvent, root_fast_handler); - bus_a.on(RootEvent, root_slow_handler); + bus_a.on(RootEvent, root_fast_handler) + bus_a.on(RootEvent, root_slow_handler) async function child_slow_handler(_event: InstanceType): Promise { - await delay(150); - return "child_slow_handler_done"; + await delay(150) + return 'child_slow_handler_done' } async function child_fast_handler(event: InstanceType): Promise { - await delay(10); - const grandchild = event.bus?.emit( - GrandchildEvent({ status: "ok", event_timeout: 0.05 }) - ); + await delay(10) + const grandchild = event.bus?.emit(GrandchildEvent({ status: 'ok', event_timeout: 0.05 })) if (grandchild) { - await grandchild.done(); + await grandchild.done() } - return "child_handler_ok"; + return 'child_handler_ok' } async function grandchild_fast_handler(): Promise { - await delay(5); - return "grandchild_fast_handler_ok"; + await delay(5) + return 'grandchild_fast_handler_ok' } async function grandchild_slow_handler(): Promise { - await delay(60); - return "grandchild_slow_handler_timeout"; + await delay(60) + return 'grandchild_slow_handler_timeout' } - bus_b.on(ChildEvent, child_slow_handler); - bus_b.on(ChildEvent, child_fast_handler); - bus_b.on(GrandchildEvent, grandchild_fast_handler); - bus_b.on(GrandchildEvent, grandchild_slow_handler); + bus_b.on(ChildEvent, child_slow_handler) + bus_b.on(ChildEvent, child_fast_handler) + bus_b.on(GrandchildEvent, grandchild_fast_handler) + bus_b.on(GrandchildEvent, grandchild_slow_handler) - const root_event = bus_a.dispatch( - RootEvent({ url: "https://example.com", event_timeout: 0.25 }) - ); + const root_event = bus_a.dispatch(RootEvent({ url: 'https://example.com', event_timeout: 0.25 })) - await root_event.done(); + await root_event.done() - console.log("\n=== BusA logTree ==="); - console.log(bus_a.logTree()); + console.log('\n=== BusA logTree ===') + console.log(bus_a.logTree()) - console.log("\n=== BusB logTree ==="); - console.log(bus_b.logTree()); + console.log('\n=== BusB logTree ===') + console.log(bus_b.logTree()) } -await main(); +await main() diff --git a/bubus-ts/pnpm-lock.yaml b/bubus-ts/pnpm-lock.yaml index 698b911..331a564 100644 --- a/bubus-ts/pnpm-lock.yaml +++ b/bubus-ts/pnpm-lock.yaml @@ -5,7 +5,6 @@ settings: excludeLinksFromLockfile: false importers: - .: dependencies: uuid: @@ -38,336 +37,335 @@ importers: version: 5.9.3 packages: - '@esbuild/aix-ppc64@0.27.2': - resolution: {integrity: sha512-GZMB+a0mOMZs4MpDbj8RJp4cw+w1WV5NYD6xzgvzUJ5Ek2jerwfO2eADyI6ExDSUED+1X8aMbegahsJi+8mgpw==} - engines: {node: '>=18'} + resolution: { integrity: sha512-GZMB+a0mOMZs4MpDbj8RJp4cw+w1WV5NYD6xzgvzUJ5Ek2jerwfO2eADyI6ExDSUED+1X8aMbegahsJi+8mgpw== } + engines: { node: '>=18' } cpu: [ppc64] os: [aix] '@esbuild/android-arm64@0.27.2': - resolution: {integrity: sha512-pvz8ZZ7ot/RBphf8fv60ljmaoydPU12VuXHImtAs0XhLLw+EXBi2BLe3OYSBslR4rryHvweW5gmkKFwTiFy6KA==} - engines: {node: '>=18'} + resolution: { integrity: sha512-pvz8ZZ7ot/RBphf8fv60ljmaoydPU12VuXHImtAs0XhLLw+EXBi2BLe3OYSBslR4rryHvweW5gmkKFwTiFy6KA== } + engines: { node: '>=18' } cpu: [arm64] os: [android] '@esbuild/android-arm@0.27.2': - resolution: {integrity: sha512-DVNI8jlPa7Ujbr1yjU2PfUSRtAUZPG9I1RwW4F4xFB1Imiu2on0ADiI/c3td+KmDtVKNbi+nffGDQMfcIMkwIA==} - engines: {node: '>=18'} + resolution: { integrity: sha512-DVNI8jlPa7Ujbr1yjU2PfUSRtAUZPG9I1RwW4F4xFB1Imiu2on0ADiI/c3td+KmDtVKNbi+nffGDQMfcIMkwIA== } + engines: { node: '>=18' } cpu: [arm] os: [android] '@esbuild/android-x64@0.27.2': - resolution: {integrity: sha512-z8Ank4Byh4TJJOh4wpz8g2vDy75zFL0TlZlkUkEwYXuPSgX8yzep596n6mT7905kA9uHZsf/o2OJZubl2l3M7A==} - engines: {node: '>=18'} + resolution: { integrity: sha512-z8Ank4Byh4TJJOh4wpz8g2vDy75zFL0TlZlkUkEwYXuPSgX8yzep596n6mT7905kA9uHZsf/o2OJZubl2l3M7A== } + engines: { node: '>=18' } cpu: [x64] os: [android] '@esbuild/darwin-arm64@0.27.2': - resolution: {integrity: sha512-davCD2Zc80nzDVRwXTcQP/28fiJbcOwvdolL0sOiOsbwBa72kegmVU0Wrh1MYrbuCL98Omp5dVhQFWRKR2ZAlg==} - engines: {node: '>=18'} + resolution: { integrity: sha512-davCD2Zc80nzDVRwXTcQP/28fiJbcOwvdolL0sOiOsbwBa72kegmVU0Wrh1MYrbuCL98Omp5dVhQFWRKR2ZAlg== } + engines: { node: '>=18' } cpu: [arm64] os: [darwin] '@esbuild/darwin-x64@0.27.2': - resolution: {integrity: sha512-ZxtijOmlQCBWGwbVmwOF/UCzuGIbUkqB1faQRf5akQmxRJ1ujusWsb3CVfk/9iZKr2L5SMU5wPBi1UWbvL+VQA==} - engines: {node: '>=18'} + resolution: { integrity: sha512-ZxtijOmlQCBWGwbVmwOF/UCzuGIbUkqB1faQRf5akQmxRJ1ujusWsb3CVfk/9iZKr2L5SMU5wPBi1UWbvL+VQA== } + engines: { node: '>=18' } cpu: [x64] os: [darwin] '@esbuild/freebsd-arm64@0.27.2': - resolution: {integrity: sha512-lS/9CN+rgqQ9czogxlMcBMGd+l8Q3Nj1MFQwBZJyoEKI50XGxwuzznYdwcav6lpOGv5BqaZXqvBSiB/kJ5op+g==} - engines: {node: '>=18'} + resolution: { integrity: sha512-lS/9CN+rgqQ9czogxlMcBMGd+l8Q3Nj1MFQwBZJyoEKI50XGxwuzznYdwcav6lpOGv5BqaZXqvBSiB/kJ5op+g== } + engines: { node: '>=18' } cpu: [arm64] os: [freebsd] '@esbuild/freebsd-x64@0.27.2': - resolution: {integrity: sha512-tAfqtNYb4YgPnJlEFu4c212HYjQWSO/w/h/lQaBK7RbwGIkBOuNKQI9tqWzx7Wtp7bTPaGC6MJvWI608P3wXYA==} - engines: {node: '>=18'} + resolution: { integrity: sha512-tAfqtNYb4YgPnJlEFu4c212HYjQWSO/w/h/lQaBK7RbwGIkBOuNKQI9tqWzx7Wtp7bTPaGC6MJvWI608P3wXYA== } + engines: { node: '>=18' } cpu: [x64] os: [freebsd] '@esbuild/linux-arm64@0.27.2': - resolution: {integrity: sha512-hYxN8pr66NsCCiRFkHUAsxylNOcAQaxSSkHMMjcpx0si13t1LHFphxJZUiGwojB1a/Hd5OiPIqDdXONia6bhTw==} - engines: {node: '>=18'} + resolution: { integrity: sha512-hYxN8pr66NsCCiRFkHUAsxylNOcAQaxSSkHMMjcpx0si13t1LHFphxJZUiGwojB1a/Hd5OiPIqDdXONia6bhTw== } + engines: { node: '>=18' } cpu: [arm64] os: [linux] '@esbuild/linux-arm@0.27.2': - resolution: {integrity: sha512-vWfq4GaIMP9AIe4yj1ZUW18RDhx6EPQKjwe7n8BbIecFtCQG4CfHGaHuh7fdfq+y3LIA2vGS/o9ZBGVxIDi9hw==} - engines: {node: '>=18'} + resolution: { integrity: sha512-vWfq4GaIMP9AIe4yj1ZUW18RDhx6EPQKjwe7n8BbIecFtCQG4CfHGaHuh7fdfq+y3LIA2vGS/o9ZBGVxIDi9hw== } + engines: { node: '>=18' } cpu: [arm] os: [linux] '@esbuild/linux-ia32@0.27.2': - resolution: {integrity: sha512-MJt5BRRSScPDwG2hLelYhAAKh9imjHK5+NE/tvnRLbIqUWa+0E9N4WNMjmp/kXXPHZGqPLxggwVhz7QP8CTR8w==} - engines: {node: '>=18'} + resolution: { integrity: sha512-MJt5BRRSScPDwG2hLelYhAAKh9imjHK5+NE/tvnRLbIqUWa+0E9N4WNMjmp/kXXPHZGqPLxggwVhz7QP8CTR8w== } + engines: { node: '>=18' } cpu: [ia32] os: [linux] '@esbuild/linux-loong64@0.27.2': - resolution: {integrity: sha512-lugyF1atnAT463aO6KPshVCJK5NgRnU4yb3FUumyVz+cGvZbontBgzeGFO1nF+dPueHD367a2ZXe1NtUkAjOtg==} - engines: {node: '>=18'} + resolution: { integrity: sha512-lugyF1atnAT463aO6KPshVCJK5NgRnU4yb3FUumyVz+cGvZbontBgzeGFO1nF+dPueHD367a2ZXe1NtUkAjOtg== } + engines: { node: '>=18' } cpu: [loong64] os: [linux] '@esbuild/linux-mips64el@0.27.2': - resolution: {integrity: sha512-nlP2I6ArEBewvJ2gjrrkESEZkB5mIoaTswuqNFRv/WYd+ATtUpe9Y09RnJvgvdag7he0OWgEZWhviS1OTOKixw==} - engines: {node: '>=18'} + resolution: { integrity: sha512-nlP2I6ArEBewvJ2gjrrkESEZkB5mIoaTswuqNFRv/WYd+ATtUpe9Y09RnJvgvdag7he0OWgEZWhviS1OTOKixw== } + engines: { node: '>=18' } cpu: [mips64el] os: [linux] '@esbuild/linux-ppc64@0.27.2': - resolution: {integrity: sha512-C92gnpey7tUQONqg1n6dKVbx3vphKtTHJaNG2Ok9lGwbZil6DrfyecMsp9CrmXGQJmZ7iiVXvvZH6Ml5hL6XdQ==} - engines: {node: '>=18'} + resolution: { integrity: sha512-C92gnpey7tUQONqg1n6dKVbx3vphKtTHJaNG2Ok9lGwbZil6DrfyecMsp9CrmXGQJmZ7iiVXvvZH6Ml5hL6XdQ== } + engines: { node: '>=18' } cpu: [ppc64] os: [linux] '@esbuild/linux-riscv64@0.27.2': - resolution: {integrity: sha512-B5BOmojNtUyN8AXlK0QJyvjEZkWwy/FKvakkTDCziX95AowLZKR6aCDhG7LeF7uMCXEJqwa8Bejz5LTPYm8AvA==} - engines: {node: '>=18'} + resolution: { integrity: sha512-B5BOmojNtUyN8AXlK0QJyvjEZkWwy/FKvakkTDCziX95AowLZKR6aCDhG7LeF7uMCXEJqwa8Bejz5LTPYm8AvA== } + engines: { node: '>=18' } cpu: [riscv64] os: [linux] '@esbuild/linux-s390x@0.27.2': - resolution: {integrity: sha512-p4bm9+wsPwup5Z8f4EpfN63qNagQ47Ua2znaqGH6bqLlmJ4bx97Y9JdqxgGZ6Y8xVTixUnEkoKSHcpRlDnNr5w==} - engines: {node: '>=18'} + resolution: { integrity: sha512-p4bm9+wsPwup5Z8f4EpfN63qNagQ47Ua2znaqGH6bqLlmJ4bx97Y9JdqxgGZ6Y8xVTixUnEkoKSHcpRlDnNr5w== } + engines: { node: '>=18' } cpu: [s390x] os: [linux] '@esbuild/linux-x64@0.27.2': - resolution: {integrity: sha512-uwp2Tip5aPmH+NRUwTcfLb+W32WXjpFejTIOWZFw/v7/KnpCDKG66u4DLcurQpiYTiYwQ9B7KOeMJvLCu/OvbA==} - engines: {node: '>=18'} + resolution: { integrity: sha512-uwp2Tip5aPmH+NRUwTcfLb+W32WXjpFejTIOWZFw/v7/KnpCDKG66u4DLcurQpiYTiYwQ9B7KOeMJvLCu/OvbA== } + engines: { node: '>=18' } cpu: [x64] os: [linux] '@esbuild/netbsd-arm64@0.27.2': - resolution: {integrity: sha512-Kj6DiBlwXrPsCRDeRvGAUb/LNrBASrfqAIok+xB0LxK8CHqxZ037viF13ugfsIpePH93mX7xfJp97cyDuTZ3cw==} - engines: {node: '>=18'} + resolution: { integrity: sha512-Kj6DiBlwXrPsCRDeRvGAUb/LNrBASrfqAIok+xB0LxK8CHqxZ037viF13ugfsIpePH93mX7xfJp97cyDuTZ3cw== } + engines: { node: '>=18' } cpu: [arm64] os: [netbsd] '@esbuild/netbsd-x64@0.27.2': - resolution: {integrity: sha512-HwGDZ0VLVBY3Y+Nw0JexZy9o/nUAWq9MlV7cahpaXKW6TOzfVno3y3/M8Ga8u8Yr7GldLOov27xiCnqRZf0tCA==} - engines: {node: '>=18'} + resolution: { integrity: sha512-HwGDZ0VLVBY3Y+Nw0JexZy9o/nUAWq9MlV7cahpaXKW6TOzfVno3y3/M8Ga8u8Yr7GldLOov27xiCnqRZf0tCA== } + engines: { node: '>=18' } cpu: [x64] os: [netbsd] '@esbuild/openbsd-arm64@0.27.2': - resolution: {integrity: sha512-DNIHH2BPQ5551A7oSHD0CKbwIA/Ox7+78/AWkbS5QoRzaqlev2uFayfSxq68EkonB+IKjiuxBFoV8ESJy8bOHA==} - engines: {node: '>=18'} + resolution: { integrity: sha512-DNIHH2BPQ5551A7oSHD0CKbwIA/Ox7+78/AWkbS5QoRzaqlev2uFayfSxq68EkonB+IKjiuxBFoV8ESJy8bOHA== } + engines: { node: '>=18' } cpu: [arm64] os: [openbsd] '@esbuild/openbsd-x64@0.27.2': - resolution: {integrity: sha512-/it7w9Nb7+0KFIzjalNJVR5bOzA9Vay+yIPLVHfIQYG/j+j9VTH84aNB8ExGKPU4AzfaEvN9/V4HV+F+vo8OEg==} - engines: {node: '>=18'} + resolution: { integrity: sha512-/it7w9Nb7+0KFIzjalNJVR5bOzA9Vay+yIPLVHfIQYG/j+j9VTH84aNB8ExGKPU4AzfaEvN9/V4HV+F+vo8OEg== } + engines: { node: '>=18' } cpu: [x64] os: [openbsd] '@esbuild/openharmony-arm64@0.27.2': - resolution: {integrity: sha512-LRBbCmiU51IXfeXk59csuX/aSaToeG7w48nMwA6049Y4J4+VbWALAuXcs+qcD04rHDuSCSRKdmY63sruDS5qag==} - engines: {node: '>=18'} + resolution: { integrity: sha512-LRBbCmiU51IXfeXk59csuX/aSaToeG7w48nMwA6049Y4J4+VbWALAuXcs+qcD04rHDuSCSRKdmY63sruDS5qag== } + engines: { node: '>=18' } cpu: [arm64] os: [openharmony] '@esbuild/sunos-x64@0.27.2': - resolution: {integrity: sha512-kMtx1yqJHTmqaqHPAzKCAkDaKsffmXkPHThSfRwZGyuqyIeBvf08KSsYXl+abf5HDAPMJIPnbBfXvP2ZC2TfHg==} - engines: {node: '>=18'} + resolution: { integrity: sha512-kMtx1yqJHTmqaqHPAzKCAkDaKsffmXkPHThSfRwZGyuqyIeBvf08KSsYXl+abf5HDAPMJIPnbBfXvP2ZC2TfHg== } + engines: { node: '>=18' } cpu: [x64] os: [sunos] '@esbuild/win32-arm64@0.27.2': - resolution: {integrity: sha512-Yaf78O/B3Kkh+nKABUF++bvJv5Ijoy9AN1ww904rOXZFLWVc5OLOfL56W+C8F9xn5JQZa3UX6m+IktJnIb1Jjg==} - engines: {node: '>=18'} + resolution: { integrity: sha512-Yaf78O/B3Kkh+nKABUF++bvJv5Ijoy9AN1ww904rOXZFLWVc5OLOfL56W+C8F9xn5JQZa3UX6m+IktJnIb1Jjg== } + engines: { node: '>=18' } cpu: [arm64] os: [win32] '@esbuild/win32-ia32@0.27.2': - resolution: {integrity: sha512-Iuws0kxo4yusk7sw70Xa2E2imZU5HoixzxfGCdxwBdhiDgt9vX9VUCBhqcwY7/uh//78A1hMkkROMJq9l27oLQ==} - engines: {node: '>=18'} + resolution: { integrity: sha512-Iuws0kxo4yusk7sw70Xa2E2imZU5HoixzxfGCdxwBdhiDgt9vX9VUCBhqcwY7/uh//78A1hMkkROMJq9l27oLQ== } + engines: { node: '>=18' } cpu: [ia32] os: [win32] '@esbuild/win32-x64@0.27.2': - resolution: {integrity: sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ==} - engines: {node: '>=18'} + resolution: { integrity: sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ== } + engines: { node: '>=18' } cpu: [x64] os: [win32] '@eslint-community/eslint-utils@4.9.1': - resolution: {integrity: sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==} - engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + resolution: { integrity: sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ== } + engines: { node: ^12.22.0 || ^14.17.0 || >=16.0.0 } peerDependencies: eslint: ^6.0.0 || ^7.0.0 || >=8.0.0 '@eslint-community/regexpp@4.12.2': - resolution: {integrity: sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==} - engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0} + resolution: { integrity: sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew== } + engines: { node: ^12.0.0 || ^14.0.0 || >=16.0.0 } '@eslint/config-array@0.21.1': - resolution: {integrity: sha512-aw1gNayWpdI/jSYVgzN5pL0cfzU02GT3NBpeT/DXbx1/1x7ZKxFPd9bwrzygx/qiwIQiJ1sw/zD8qY/kRvlGHA==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + resolution: { integrity: sha512-aw1gNayWpdI/jSYVgzN5pL0cfzU02GT3NBpeT/DXbx1/1x7ZKxFPd9bwrzygx/qiwIQiJ1sw/zD8qY/kRvlGHA== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } '@eslint/config-helpers@0.4.2': - resolution: {integrity: sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + resolution: { integrity: sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } '@eslint/core@0.17.0': - resolution: {integrity: sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + resolution: { integrity: sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } '@eslint/eslintrc@3.3.3': - resolution: {integrity: sha512-Kr+LPIUVKz2qkx1HAMH8q1q6azbqBAsXJUxBl/ODDuVPX45Z9DfwB8tPjTi6nNZ8BuM3nbJxC5zCAg5elnBUTQ==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + resolution: { integrity: sha512-Kr+LPIUVKz2qkx1HAMH8q1q6azbqBAsXJUxBl/ODDuVPX45Z9DfwB8tPjTi6nNZ8BuM3nbJxC5zCAg5elnBUTQ== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } '@eslint/js@9.39.2': - resolution: {integrity: sha512-q1mjIoW1VX4IvSocvM/vbTiveKC4k9eLrajNEuSsmjymSDEbpGddtpfOoN7YGAqBK3NG+uqo8ia4PDTt8buCYA==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + resolution: { integrity: sha512-q1mjIoW1VX4IvSocvM/vbTiveKC4k9eLrajNEuSsmjymSDEbpGddtpfOoN7YGAqBK3NG+uqo8ia4PDTt8buCYA== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } '@eslint/object-schema@2.1.7': - resolution: {integrity: sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + resolution: { integrity: sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } '@eslint/plugin-kit@0.4.1': - resolution: {integrity: sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + resolution: { integrity: sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } '@humanfs/core@0.19.1': - resolution: {integrity: sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==} - engines: {node: '>=18.18.0'} + resolution: { integrity: sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA== } + engines: { node: '>=18.18.0' } '@humanfs/node@0.16.7': - resolution: {integrity: sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==} - engines: {node: '>=18.18.0'} + resolution: { integrity: sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ== } + engines: { node: '>=18.18.0' } '@humanwhocodes/module-importer@1.0.1': - resolution: {integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==} - engines: {node: '>=12.22'} + resolution: { integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA== } + engines: { node: '>=12.22' } '@humanwhocodes/retry@0.4.3': - resolution: {integrity: sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==} - engines: {node: '>=18.18'} + resolution: { integrity: sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ== } + engines: { node: '>=18.18' } '@types/estree@1.0.8': - resolution: {integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==} + resolution: { integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w== } '@types/json-schema@7.0.15': - resolution: {integrity: sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==} + resolution: { integrity: sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA== } '@typescript-eslint/eslint-plugin@8.54.0': - resolution: {integrity: sha512-hAAP5io/7csFStuOmR782YmTthKBJ9ND3WVL60hcOjvtGFb+HJxH4O5huAcmcZ9v9G8P+JETiZ/G1B8MALnWZQ==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + resolution: { integrity: sha512-hAAP5io/7csFStuOmR782YmTthKBJ9ND3WVL60hcOjvtGFb+HJxH4O5huAcmcZ9v9G8P+JETiZ/G1B8MALnWZQ== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } peerDependencies: '@typescript-eslint/parser': ^8.54.0 eslint: ^8.57.0 || ^9.0.0 typescript: '>=4.8.4 <6.0.0' '@typescript-eslint/parser@8.54.0': - resolution: {integrity: sha512-BtE0k6cjwjLZoZixN0t5AKP0kSzlGu7FctRXYuPAm//aaiZhmfq1JwdYpYr1brzEspYyFeF+8XF5j2VK6oalrA==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + resolution: { integrity: sha512-BtE0k6cjwjLZoZixN0t5AKP0kSzlGu7FctRXYuPAm//aaiZhmfq1JwdYpYr1brzEspYyFeF+8XF5j2VK6oalrA== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } peerDependencies: eslint: ^8.57.0 || ^9.0.0 typescript: '>=4.8.4 <6.0.0' '@typescript-eslint/project-service@8.54.0': - resolution: {integrity: sha512-YPf+rvJ1s7MyiWM4uTRhE4DvBXrEV+d8oC3P9Y2eT7S+HBS0clybdMIPnhiATi9vZOYDc7OQ1L/i6ga6NFYK/g==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + resolution: { integrity: sha512-YPf+rvJ1s7MyiWM4uTRhE4DvBXrEV+d8oC3P9Y2eT7S+HBS0clybdMIPnhiATi9vZOYDc7OQ1L/i6ga6NFYK/g== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } peerDependencies: typescript: '>=4.8.4 <6.0.0' '@typescript-eslint/scope-manager@8.54.0': - resolution: {integrity: sha512-27rYVQku26j/PbHYcVfRPonmOlVI6gihHtXFbTdB5sb6qA0wdAQAbyXFVarQ5t4HRojIz64IV90YtsjQSSGlQg==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + resolution: { integrity: sha512-27rYVQku26j/PbHYcVfRPonmOlVI6gihHtXFbTdB5sb6qA0wdAQAbyXFVarQ5t4HRojIz64IV90YtsjQSSGlQg== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } '@typescript-eslint/tsconfig-utils@8.54.0': - resolution: {integrity: sha512-dRgOyT2hPk/JwxNMZDsIXDgyl9axdJI3ogZ2XWhBPsnZUv+hPesa5iuhdYt2gzwA9t8RE5ytOJ6xB0moV0Ujvw==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + resolution: { integrity: sha512-dRgOyT2hPk/JwxNMZDsIXDgyl9axdJI3ogZ2XWhBPsnZUv+hPesa5iuhdYt2gzwA9t8RE5ytOJ6xB0moV0Ujvw== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } peerDependencies: typescript: '>=4.8.4 <6.0.0' '@typescript-eslint/type-utils@8.54.0': - resolution: {integrity: sha512-hiLguxJWHjjwL6xMBwD903ciAwd7DmK30Y9Axs/etOkftC3ZNN9K44IuRD/EB08amu+Zw6W37x9RecLkOo3pMA==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + resolution: { integrity: sha512-hiLguxJWHjjwL6xMBwD903ciAwd7DmK30Y9Axs/etOkftC3ZNN9K44IuRD/EB08amu+Zw6W37x9RecLkOo3pMA== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } peerDependencies: eslint: ^8.57.0 || ^9.0.0 typescript: '>=4.8.4 <6.0.0' '@typescript-eslint/types@8.54.0': - resolution: {integrity: sha512-PDUI9R1BVjqu7AUDsRBbKMtwmjWcn4J3le+5LpcFgWULN3LvHC5rkc9gCVxbrsrGmO1jfPybN5s6h4Jy+OnkAA==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + resolution: { integrity: sha512-PDUI9R1BVjqu7AUDsRBbKMtwmjWcn4J3le+5LpcFgWULN3LvHC5rkc9gCVxbrsrGmO1jfPybN5s6h4Jy+OnkAA== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } '@typescript-eslint/typescript-estree@8.54.0': - resolution: {integrity: sha512-BUwcskRaPvTk6fzVWgDPdUndLjB87KYDrN5EYGetnktoeAvPtO4ONHlAZDnj5VFnUANg0Sjm7j4usBlnoVMHwA==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + resolution: { integrity: sha512-BUwcskRaPvTk6fzVWgDPdUndLjB87KYDrN5EYGetnktoeAvPtO4ONHlAZDnj5VFnUANg0Sjm7j4usBlnoVMHwA== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } peerDependencies: typescript: '>=4.8.4 <6.0.0' '@typescript-eslint/utils@8.54.0': - resolution: {integrity: sha512-9Cnda8GS57AQakvRyG0PTejJNlA2xhvyNtEVIMlDWOOeEyBkYWhGPnfrIAnqxLMTSTo6q8g12XVjjev5l1NvMA==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + resolution: { integrity: sha512-9Cnda8GS57AQakvRyG0PTejJNlA2xhvyNtEVIMlDWOOeEyBkYWhGPnfrIAnqxLMTSTo6q8g12XVjjev5l1NvMA== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } peerDependencies: eslint: ^8.57.0 || ^9.0.0 typescript: '>=4.8.4 <6.0.0' '@typescript-eslint/visitor-keys@8.54.0': - resolution: {integrity: sha512-VFlhGSl4opC0bprJiItPQ1RfUhGDIBokcPwaFH4yiBCaNPeld/9VeXbiPO1cLyorQi1G1vL+ecBk1x8o1axORA==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + resolution: { integrity: sha512-VFlhGSl4opC0bprJiItPQ1RfUhGDIBokcPwaFH4yiBCaNPeld/9VeXbiPO1cLyorQi1G1vL+ecBk1x8o1axORA== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } acorn-jsx@5.3.2: - resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} + resolution: { integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ== } peerDependencies: acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 acorn@8.15.0: - resolution: {integrity: sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==} - engines: {node: '>=0.4.0'} + resolution: { integrity: sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg== } + engines: { node: '>=0.4.0' } hasBin: true ajv@6.12.6: - resolution: {integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==} + resolution: { integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g== } ansi-styles@4.3.0: - resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} - engines: {node: '>=8'} + resolution: { integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg== } + engines: { node: '>=8' } argparse@2.0.1: - resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} + resolution: { integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q== } balanced-match@1.0.2: - resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} + resolution: { integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== } brace-expansion@1.1.12: - resolution: {integrity: sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==} + resolution: { integrity: sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg== } brace-expansion@2.0.2: - resolution: {integrity: sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==} + resolution: { integrity: sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ== } callsites@3.1.0: - resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==} - engines: {node: '>=6'} + resolution: { integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ== } + engines: { node: '>=6' } chalk@4.1.2: - resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} - engines: {node: '>=10'} + resolution: { integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA== } + engines: { node: '>=10' } color-convert@2.0.1: - resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} - engines: {node: '>=7.0.0'} + resolution: { integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ== } + engines: { node: '>=7.0.0' } color-name@1.1.4: - resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} + resolution: { integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== } concat-map@0.0.1: - resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==} + resolution: { integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg== } cross-spawn@7.0.6: - resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==} - engines: {node: '>= 8'} + resolution: { integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA== } + engines: { node: '>= 8' } debug@4.4.3: - resolution: {integrity: sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==} - engines: {node: '>=6.0'} + resolution: { integrity: sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA== } + engines: { node: '>=6.0' } peerDependencies: supports-color: '*' peerDependenciesMeta: @@ -375,32 +373,32 @@ packages: optional: true deep-is@0.1.4: - resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} + resolution: { integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ== } esbuild@0.27.2: - resolution: {integrity: sha512-HyNQImnsOC7X9PMNaCIeAm4ISCQXs5a5YasTXVliKv4uuBo1dKrG0A+uQS8M5eXjVMnLg3WgXaKvprHlFJQffw==} - engines: {node: '>=18'} + resolution: { integrity: sha512-HyNQImnsOC7X9PMNaCIeAm4ISCQXs5a5YasTXVliKv4uuBo1dKrG0A+uQS8M5eXjVMnLg3WgXaKvprHlFJQffw== } + engines: { node: '>=18' } hasBin: true escape-string-regexp@4.0.0: - resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==} - engines: {node: '>=10'} + resolution: { integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA== } + engines: { node: '>=10' } eslint-scope@8.4.0: - resolution: {integrity: sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + resolution: { integrity: sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } eslint-visitor-keys@3.4.3: - resolution: {integrity: sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==} - engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + resolution: { integrity: sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag== } + engines: { node: ^12.22.0 || ^14.17.0 || >=16.0.0 } eslint-visitor-keys@4.2.1: - resolution: {integrity: sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + resolution: { integrity: sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } eslint@9.39.2: - resolution: {integrity: sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + resolution: { integrity: sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } hasBin: true peerDependencies: jiti: '*' @@ -409,37 +407,37 @@ packages: optional: true espree@10.4.0: - resolution: {integrity: sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + resolution: { integrity: sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } esquery@1.7.0: - resolution: {integrity: sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==} - engines: {node: '>=0.10'} + resolution: { integrity: sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g== } + engines: { node: '>=0.10' } esrecurse@4.3.0: - resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==} - engines: {node: '>=4.0'} + resolution: { integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag== } + engines: { node: '>=4.0' } estraverse@5.3.0: - resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==} - engines: {node: '>=4.0'} + resolution: { integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA== } + engines: { node: '>=4.0' } esutils@2.0.3: - resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==} - engines: {node: '>=0.10.0'} + resolution: { integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g== } + engines: { node: '>=0.10.0' } fast-deep-equal@3.1.3: - resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} + resolution: { integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q== } fast-json-stable-stringify@2.1.0: - resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==} + resolution: { integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw== } fast-levenshtein@2.0.6: - resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} + resolution: { integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw== } fdir@6.5.0: - resolution: {integrity: sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==} - engines: {node: '>=12.0.0'} + resolution: { integrity: sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg== } + engines: { node: '>=12.0.0' } peerDependencies: picomatch: ^3 || ^4 peerDependenciesMeta: @@ -447,225 +445,224 @@ packages: optional: true file-entry-cache@8.0.0: - resolution: {integrity: sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==} - engines: {node: '>=16.0.0'} + resolution: { integrity: sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ== } + engines: { node: '>=16.0.0' } find-up@5.0.0: - resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} - engines: {node: '>=10'} + resolution: { integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng== } + engines: { node: '>=10' } flat-cache@4.0.1: - resolution: {integrity: sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==} - engines: {node: '>=16'} + resolution: { integrity: sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw== } + engines: { node: '>=16' } flatted@3.3.3: - resolution: {integrity: sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==} + resolution: { integrity: sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg== } fsevents@2.3.3: - resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} - engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + resolution: { integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw== } + engines: { node: ^8.16.0 || ^10.6.0 || >=11.0.0 } os: [darwin] get-tsconfig@4.13.1: - resolution: {integrity: sha512-EoY1N2xCn44xU6750Sx7OjOIT59FkmstNc3X6y5xpz7D5cBtZRe/3pSlTkDJgqsOk3WwZPkWfonhhUJfttQo3w==} + resolution: { integrity: sha512-EoY1N2xCn44xU6750Sx7OjOIT59FkmstNc3X6y5xpz7D5cBtZRe/3pSlTkDJgqsOk3WwZPkWfonhhUJfttQo3w== } glob-parent@6.0.2: - resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==} - engines: {node: '>=10.13.0'} + resolution: { integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A== } + engines: { node: '>=10.13.0' } globals@14.0.0: - resolution: {integrity: sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==} - engines: {node: '>=18'} + resolution: { integrity: sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ== } + engines: { node: '>=18' } has-flag@4.0.0: - resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} - engines: {node: '>=8'} + resolution: { integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== } + engines: { node: '>=8' } ignore@5.3.2: - resolution: {integrity: sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==} - engines: {node: '>= 4'} + resolution: { integrity: sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g== } + engines: { node: '>= 4' } ignore@7.0.5: - resolution: {integrity: sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==} - engines: {node: '>= 4'} + resolution: { integrity: sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg== } + engines: { node: '>= 4' } import-fresh@3.3.1: - resolution: {integrity: sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==} - engines: {node: '>=6'} + resolution: { integrity: sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ== } + engines: { node: '>=6' } imurmurhash@0.1.4: - resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} - engines: {node: '>=0.8.19'} + resolution: { integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA== } + engines: { node: '>=0.8.19' } is-extglob@2.1.1: - resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} - engines: {node: '>=0.10.0'} + resolution: { integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ== } + engines: { node: '>=0.10.0' } is-glob@4.0.3: - resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} - engines: {node: '>=0.10.0'} + resolution: { integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg== } + engines: { node: '>=0.10.0' } isexe@2.0.0: - resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} + resolution: { integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw== } js-yaml@4.1.1: - resolution: {integrity: sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==} + resolution: { integrity: sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA== } hasBin: true json-buffer@3.0.1: - resolution: {integrity: sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==} + resolution: { integrity: sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ== } json-schema-traverse@0.4.1: - resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==} + resolution: { integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg== } json-stable-stringify-without-jsonify@1.0.1: - resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==} + resolution: { integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw== } keyv@4.5.4: - resolution: {integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==} + resolution: { integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw== } levn@0.4.1: - resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==} - engines: {node: '>= 0.8.0'} + resolution: { integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ== } + engines: { node: '>= 0.8.0' } locate-path@6.0.0: - resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} - engines: {node: '>=10'} + resolution: { integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw== } + engines: { node: '>=10' } lodash.merge@4.6.2: - resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} + resolution: { integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ== } minimatch@3.1.2: - resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} + resolution: { integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw== } minimatch@9.0.5: - resolution: {integrity: sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==} - engines: {node: '>=16 || 14 >=14.17'} + resolution: { integrity: sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow== } + engines: { node: '>=16 || 14 >=14.17' } ms@2.1.3: - resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + resolution: { integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== } natural-compare@1.4.0: - resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} + resolution: { integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw== } optionator@0.9.4: - resolution: {integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==} - engines: {node: '>= 0.8.0'} + resolution: { integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g== } + engines: { node: '>= 0.8.0' } p-limit@3.1.0: - resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} - engines: {node: '>=10'} + resolution: { integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ== } + engines: { node: '>=10' } p-locate@5.0.0: - resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} - engines: {node: '>=10'} + resolution: { integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw== } + engines: { node: '>=10' } parent-module@1.0.1: - resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==} - engines: {node: '>=6'} + resolution: { integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g== } + engines: { node: '>=6' } path-exists@4.0.0: - resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} - engines: {node: '>=8'} + resolution: { integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w== } + engines: { node: '>=8' } path-key@3.1.1: - resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} - engines: {node: '>=8'} + resolution: { integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q== } + engines: { node: '>=8' } picomatch@4.0.3: - resolution: {integrity: sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==} - engines: {node: '>=12'} + resolution: { integrity: sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q== } + engines: { node: '>=12' } prelude-ls@1.2.1: - resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} - engines: {node: '>= 0.8.0'} + resolution: { integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g== } + engines: { node: '>= 0.8.0' } prettier@3.8.1: - resolution: {integrity: sha512-UOnG6LftzbdaHZcKoPFtOcCKztrQ57WkHDeRD9t/PTQtmT0NHSeWWepj6pS0z/N7+08BHFDQVUrfmfMRcZwbMg==} - engines: {node: '>=14'} + resolution: { integrity: sha512-UOnG6LftzbdaHZcKoPFtOcCKztrQ57WkHDeRD9t/PTQtmT0NHSeWWepj6pS0z/N7+08BHFDQVUrfmfMRcZwbMg== } + engines: { node: '>=14' } hasBin: true punycode@2.3.1: - resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==} - engines: {node: '>=6'} + resolution: { integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg== } + engines: { node: '>=6' } resolve-from@4.0.0: - resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==} - engines: {node: '>=4'} + resolution: { integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g== } + engines: { node: '>=4' } resolve-pkg-maps@1.0.0: - resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==} + resolution: { integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw== } semver@7.7.3: - resolution: {integrity: sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==} - engines: {node: '>=10'} + resolution: { integrity: sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q== } + engines: { node: '>=10' } hasBin: true shebang-command@2.0.0: - resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} - engines: {node: '>=8'} + resolution: { integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA== } + engines: { node: '>=8' } shebang-regex@3.0.0: - resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} - engines: {node: '>=8'} + resolution: { integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A== } + engines: { node: '>=8' } strip-json-comments@3.1.1: - resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} - engines: {node: '>=8'} + resolution: { integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig== } + engines: { node: '>=8' } supports-color@7.2.0: - resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} - engines: {node: '>=8'} + resolution: { integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw== } + engines: { node: '>=8' } tinyglobby@0.2.15: - resolution: {integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==} - engines: {node: '>=12.0.0'} + resolution: { integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ== } + engines: { node: '>=12.0.0' } ts-api-utils@2.4.0: - resolution: {integrity: sha512-3TaVTaAv2gTiMB35i3FiGJaRfwb3Pyn/j3m/bfAvGe8FB7CF6u+LMYqYlDh7reQf7UNvoTvdfAqHGmPGOSsPmA==} - engines: {node: '>=18.12'} + resolution: { integrity: sha512-3TaVTaAv2gTiMB35i3FiGJaRfwb3Pyn/j3m/bfAvGe8FB7CF6u+LMYqYlDh7reQf7UNvoTvdfAqHGmPGOSsPmA== } + engines: { node: '>=18.12' } peerDependencies: typescript: '>=4.8.4' tsx@4.21.0: - resolution: {integrity: sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==} - engines: {node: '>=18.0.0'} + resolution: { integrity: sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw== } + engines: { node: '>=18.0.0' } hasBin: true type-check@0.4.0: - resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==} - engines: {node: '>= 0.8.0'} + resolution: { integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew== } + engines: { node: '>= 0.8.0' } typescript@5.9.3: - resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==} - engines: {node: '>=14.17'} + resolution: { integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw== } + engines: { node: '>=14.17' } hasBin: true uri-js@4.4.1: - resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} + resolution: { integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg== } uuid@11.1.0: - resolution: {integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==} + resolution: { integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A== } hasBin: true which@2.0.2: - resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} - engines: {node: '>= 8'} + resolution: { integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA== } + engines: { node: '>= 8' } hasBin: true word-wrap@1.2.5: - resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==} - engines: {node: '>=0.10.0'} + resolution: { integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA== } + engines: { node: '>=0.10.0' } yocto-queue@0.1.0: - resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} - engines: {node: '>=10'} + resolution: { integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== } + engines: { node: '>=10' } zod@4.3.6: - resolution: {integrity: sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==} + resolution: { integrity: sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg== } snapshots: - '@esbuild/aix-ppc64@0.27.2': optional: true diff --git a/bubus-ts/prettier.config.js b/bubus-ts/prettier.config.js index f68c694..98b89f5 100644 --- a/bubus-ts/prettier.config.js +++ b/bubus-ts/prettier.config.js @@ -1,8 +1,8 @@ const config = { semi: false, singleQuote: true, - trailingComma: "es5", - printWidth: 140 -}; + trailingComma: 'es5', + printWidth: 140, +} -export default config; +export default config diff --git a/bubus-ts/src/async_context.ts b/bubus-ts/src/async_context.ts index 0ef59d0..117ab2d 100644 --- a/bubus-ts/src/async_context.ts +++ b/bubus-ts/src/async_context.ts @@ -1,43 +1,41 @@ +declare const process: { versions?: { node?: string } } | undefined + type AsyncLocalStorageLike = { - getStore(): unknown; - run(store: unknown, callback: () => T): T; - enterWith?(store: unknown): void; -}; + getStore(): unknown + run(store: unknown, callback: () => T): T + enterWith?(store: unknown): void +} -export let async_local_storage: AsyncLocalStorageLike | null = null; +export let async_local_storage: AsyncLocalStorageLike | null = null -const is_node = - typeof process !== "undefined" && - typeof process.versions !== "undefined" && - typeof process.versions.node === "string"; +const is_node = typeof process !== 'undefined' && typeof process.versions !== 'undefined' && typeof process.versions.node === 'string' if (is_node) { try { - const importer = new Function( - "specifier", - "return import(specifier)" - ) as (specifier: string) => Promise<{ AsyncLocalStorage?: new () => AsyncLocalStorageLike }>; - const mod = await importer("node:async_hooks"); + const importer = new Function('specifier', 'return import(specifier)') as ( + specifier: string + ) => Promise<{ AsyncLocalStorage?: new () => AsyncLocalStorageLike }> + const mod = await importer('node:async_hooks') if (mod?.AsyncLocalStorage) { - async_local_storage = new mod.AsyncLocalStorage(); + async_local_storage = new mod.AsyncLocalStorage() } } catch { - async_local_storage = null; + async_local_storage = null } } export const captureAsyncContext = (): unknown | null => { if (!async_local_storage) { - return null; + return null } - return async_local_storage.getStore() ?? null; -}; + return async_local_storage.getStore() ?? null +} export const runWithAsyncContext = (context: unknown | null, fn: () => T): T => { if (!async_local_storage) { - return fn(); + return fn() } - return async_local_storage.run(context ?? undefined, fn); -}; + return async_local_storage.run(context ?? undefined, fn) +} -export const hasAsyncLocalStorage = (): boolean => async_local_storage !== null; +export const hasAsyncLocalStorage = (): boolean => async_local_storage !== null diff --git a/bubus-ts/src/base_event.ts b/bubus-ts/src/base_event.ts index 4c8c1d8..cb2d2a0 100644 --- a/bubus-ts/src/base_event.ts +++ b/bubus-ts/src/base_event.ts @@ -1,11 +1,10 @@ -import { z } from "zod"; -import { v7 as uuidv7 } from "uuid"; - -import type { EventBus } from "./event_bus.js"; -import { EventResult } from "./event_result.js"; -import type { ConcurrencyMode, Deferred } from "./semaphores.js"; -import { CONCURRENCY_MODES, withResolvers } from "./semaphores.js"; +import { z } from 'zod' +import { v7 as uuidv7 } from 'uuid' +import type { EventBus } from './event_bus.js' +import { EventResult } from './event_result.js' +import type { ConcurrencyMode, Deferred } from './semaphores.js' +import { CONCURRENCY_MODES, withResolvers } from './semaphores.js' export const BaseEventSchema = z .object({ @@ -18,101 +17,89 @@ export const BaseEventSchema = z event_result_type: z.string().optional(), event_result_schema: z.unknown().optional(), event_concurrency: z.enum(CONCURRENCY_MODES).optional(), - handler_concurrency: z.enum(CONCURRENCY_MODES).optional() + handler_concurrency: z.enum(CONCURRENCY_MODES).optional(), }) - .passthrough(); + .passthrough() -export type BaseEventData = z.infer; +export type BaseEventData = z.infer type BaseEventFields = Pick< BaseEventData, - | "event_id" - | "event_created_at" - | "event_type" - | "event_timeout" - | "event_parent_id" - | "event_result_type" - | "event_result_schema" - | "event_concurrency" - | "handler_concurrency" ->; - -export type BaseEventInit> = TFields & - Partial; - -type BaseEventSchemaShape = typeof BaseEventSchema.shape; - -export type EventSchema = z.ZodObject< - BaseEventSchemaShape & TShape ->; - -type EventInput = z.input>; -export type EventInit = Omit, keyof BaseEventFields> & - Partial; + | 'event_id' + | 'event_created_at' + | 'event_type' + | 'event_timeout' + | 'event_parent_id' + | 'event_result_type' + | 'event_result_schema' + | 'event_concurrency' + | 'handler_concurrency' +> + +export type BaseEventInit> = TFields & Partial + +type BaseEventSchemaShape = typeof BaseEventSchema.shape + +export type EventSchema = z.ZodObject + +type EventInput = z.input> +export type EventInit = Omit, keyof BaseEventFields> & Partial export type EventFactory = { - (data: EventInit): BaseEvent & z.infer>; - new (data: EventInit): BaseEvent & z.infer>; - schema: EventSchema; - event_type?: string; - event_result_schema?: z.ZodTypeAny; - event_result_type?: string; - fromJSON?: (data: unknown) => BaseEvent & z.infer>; -}; + (data: EventInit): BaseEvent & z.infer> + new (data: EventInit): BaseEvent & z.infer> + schema: EventSchema + event_type?: string + event_result_schema?: z.ZodTypeAny + event_result_type?: string + fromJSON?: (data: unknown) => BaseEvent & z.infer> +} type ZodShapeFrom> = { - [K in keyof TShape as K extends - | "event_result_schema" - | "event_result_type" - | "event_result_schema_json" + [K in keyof TShape as K extends 'event_result_schema' | 'event_result_type' | 'event_result_schema_json' ? never : TShape[K] extends z.ZodTypeAny - ? K - : never]: Extract; -}; + ? K + : never]: Extract +} export class BaseEvent { - static _last_timestamp_ms = 0; - event_id: string; - event_created_at: string; - event_type: string; - event_timeout: number | null; - event_parent_id?: string; - event_path: string[]; - event_factory?: Function; - event_result_schema?: z.ZodTypeAny; - event_result_type?: string; - event_results: Map; - event_emitted_by_handler_id?: string; - event_pending_buses: number; - event_status: "pending" | "started" | "completed"; - event_created_at_ms: number; - event_started_at?: string; - event_completed_at?: string; - event_errors: unknown[]; - bus?: EventBus; - event_concurrency?: ConcurrencyMode; - handler_concurrency?: ConcurrencyMode; - _original_event?: BaseEvent; - _dispatch_context?: unknown | null; - - static schema = BaseEventSchema; - static event_type?: string; - - _done: Deferred | null; + static _last_timestamp_ms = 0 + event_id!: string + event_created_at!: string + event_type!: string + event_timeout!: number | null + event_parent_id?: string + event_path!: string[] + event_result_schema?: z.ZodTypeAny + event_result_type?: string + event_results!: Map + event_emitted_by_handler_id?: string + event_pending_buses!: number + event_status!: 'pending' | 'started' | 'completed' + event_started_at?: string + event_completed_at?: string + bus?: EventBus + event_concurrency?: ConcurrencyMode + handler_concurrency?: ConcurrencyMode + _original_event?: BaseEvent + _dispatch_context?: unknown | null + + static schema = BaseEventSchema + static event_type?: string + + _done: Deferred | null constructor(data: BaseEventInit> = {}) { const ctor = this.constructor as typeof BaseEvent & { - factory?: Function; - event_result_schema?: z.ZodTypeAny; - event_result_type?: string; - }; - const event_type = data.event_type ?? ctor.event_type ?? ctor.name; - const event_result_schema = data.event_result_schema ?? ctor.event_result_schema; - const event_result_type = data.event_result_type ?? ctor.event_result_type; - const event_id = data.event_id ?? uuidv7(); - const event_created_at = - data.event_created_at ?? new Date().toISOString(); - const event_timeout = data.event_timeout ?? null; + event_result_schema?: z.ZodTypeAny + event_result_type?: string + } + const event_type = data.event_type ?? ctor.event_type ?? ctor.name + const event_result_schema = (data.event_result_schema ?? ctor.event_result_schema) as z.ZodTypeAny | undefined + const event_result_type = data.event_result_type ?? ctor.event_result_type + const event_id = data.event_id ?? uuidv7() + const event_created_at = data.event_created_at ?? new Date().toISOString() + const event_timeout = data.event_timeout ?? null const base_data = { ...data, @@ -121,115 +108,92 @@ export class BaseEvent { event_type, event_timeout, event_result_schema, - event_result_type - }; - - const schema = ctor.schema ?? BaseEventSchema; - const parsed = schema.parse(base_data) as BaseEventData & Record; - - Object.assign(this, parsed); - - this.event_path = Array.isArray((parsed as { event_path?: string[] }).event_path) - ? ([...(parsed as { event_path?: string[] }).event_path] as string[]) - : []; - this.event_pending_buses = 0; - this.event_status = "pending"; - this.event_created_at_ms = Date.parse(this.event_created_at); - this.event_errors = []; - this.event_factory = ctor.factory; - this.event_result_schema = event_result_schema; - this.event_result_type = event_result_type; - this.event_results = new Map(); - - this._done = null; - this._dispatch_context = undefined; + event_result_type, + } + + const schema = ctor.schema ?? BaseEventSchema + const parsed = schema.parse(base_data) as BaseEventData & Record + + Object.assign(this, parsed) + + const parsed_path = (parsed as { event_path?: string[] }).event_path + this.event_path = Array.isArray(parsed_path) ? [...parsed_path] : [] + this.event_pending_buses = 0 + this.event_status = 'pending' + this.event_result_schema = event_result_schema + this.event_result_type = event_result_type + this.event_results = new Map() + + this._done = null + this._dispatch_context = undefined } static nextIsoTimestamp(): string { - const now_ms = Date.now(); - const next_ms = Math.max(now_ms, BaseEvent._last_timestamp_ms + 1); - BaseEvent._last_timestamp_ms = next_ms; - return new Date(next_ms).toISOString(); + const now_ms = Date.now() + const next_ms = Math.max(now_ms, BaseEvent._last_timestamp_ms + 1) + BaseEvent._last_timestamp_ms = next_ms + return new Date(next_ms).toISOString() } - static extend( - shape: TShape - ): EventFactory; - static extend>( - shape: TShape - ): EventFactory>; - static extend>( - event_type: string, - shape: TShape - ): EventFactory>; - static extend>( - arg1: string | TShape, - arg2?: TShape - ): EventFactory> { - const event_type = typeof arg1 === "string" ? arg1 : undefined; - const raw_shape = (typeof arg1 === "string" ? arg2 ?? {} : arg1) as Record< - string, - unknown - >; - - const event_result_schema = is_zod_schema(raw_shape.event_result_schema) - ? (raw_shape.event_result_schema as z.ZodTypeAny) - : undefined; - const event_result_type = - typeof raw_shape.event_result_type === "string" ? raw_shape.event_result_type : undefined; - - const shape = extract_zod_shape(raw_shape); - const full_schema = BaseEventSchema.extend(shape); + static extend(shape: TShape): EventFactory + static extend>(shape: TShape): EventFactory> + static extend>(event_type: string, shape: TShape): EventFactory> + static extend>(arg1: string | TShape, arg2?: TShape): EventFactory> { + const event_type = typeof arg1 === 'string' ? arg1 : undefined + const raw_shape = (typeof arg1 === 'string' ? (arg2 ?? {}) : arg1) as Record + + const event_result_schema = is_zod_schema(raw_shape.event_result_schema) ? (raw_shape.event_result_schema as z.ZodTypeAny) : undefined + const event_result_type = typeof raw_shape.event_result_type === 'string' ? raw_shape.event_result_type : undefined + + const shape = extract_zod_shape(raw_shape) + const full_schema = BaseEventSchema.extend(shape) class ExtendedEvent extends BaseEvent { - static schema = full_schema; - static event_type = event_type; - static factory?: Function; - static event_result_schema = event_result_schema; - static event_result_type = event_result_type; + static schema = full_schema as unknown as typeof BaseEvent.schema + static event_type = event_type + static event_result_schema = event_result_schema + static event_result_type = event_result_type constructor(data: EventInit>) { - super(data as BaseEventInit>); + super(data as BaseEventInit>) } } - function EventFactory( - data: EventInit> - ): BaseEvent & z.infer>> { - return new ExtendedEvent(data); + type FactoryResult = BaseEvent & z.infer>> + + function EventFactory(data: EventInit>): FactoryResult { + return new ExtendedEvent(data) as FactoryResult } - EventFactory.schema = full_schema as EventSchema>; - EventFactory.event_type = event_type; - EventFactory.event_result_schema = event_result_schema; - EventFactory.event_result_type = event_result_type; - EventFactory.fromJSON = (data: unknown) => - ExtendedEvent.fromJSON(data) as BaseEvent & z.infer>>; - EventFactory.prototype = ExtendedEvent.prototype; - (EventFactory as unknown as { class: typeof ExtendedEvent }).class = ExtendedEvent; - (ExtendedEvent as unknown as { factory?: Function }).factory = EventFactory; - - return EventFactory as EventFactory>; + EventFactory.schema = full_schema as EventSchema> + EventFactory.event_type = event_type + EventFactory.event_result_schema = event_result_schema + EventFactory.event_result_type = event_result_type + EventFactory.fromJSON = (data: unknown) => (ExtendedEvent.fromJSON as (data: unknown) => FactoryResult)(data) + EventFactory.prototype = ExtendedEvent.prototype + ;(EventFactory as unknown as { class: typeof ExtendedEvent }).class = ExtendedEvent + + return EventFactory as unknown as EventFactory> } static parse(this: T, data: unknown): InstanceType { - const schema = this.schema ?? BaseEventSchema; - const parsed = schema.parse(data); - return new this(parsed) as InstanceType; + const schema = this.schema ?? BaseEventSchema + const parsed = schema.parse(data) + return new this(parsed) as InstanceType } static fromJSON(this: T, data: unknown): InstanceType { - if (!data || typeof data !== "object") { - return this.parse(data); + if (!data || typeof data !== 'object') { + return this.parse(data) } - const record = { ...(data as Record) }; + const record = { ...(data as Record) } if (record.event_result_schema && !is_zod_schema(record.event_result_schema)) { - const zod_any = z as unknown as { fromJSONSchema?: (schema: unknown) => z.ZodTypeAny }; - if (typeof zod_any.fromJSONSchema === "function") { - record.event_result_schema = zod_any.fromJSONSchema(record.event_result_schema); + const zod_any = z as unknown as { fromJSONSchema?: (schema: unknown) => z.ZodTypeAny } + if (typeof zod_any.fromJSONSchema === 'function') { + record.event_result_schema = zod_any.fromJSONSchema(record.event_result_schema) } } - return new this(record as BaseEventInit>) as InstanceType; + return new this(record as BaseEventInit>) as InstanceType } toJSON(): BaseEventData { @@ -243,144 +207,149 @@ export class BaseEvent { event_result_type: this.event_result_type, event_concurrency: this.event_concurrency, handler_concurrency: this.handler_concurrency, - event_result_schema: this.event_result_schema - ? to_json_schema(this.event_result_schema) - : this.event_result_schema - }; - } - - get type(): string { - return this.event_type; + event_result_schema: this.event_result_schema ? to_json_schema(this.event_result_schema) : this.event_result_schema, + } } get event_children(): BaseEvent[] { - const children: BaseEvent[] = []; - const seen = new Set(); + const children: BaseEvent[] = [] + const seen = new Set() for (const result of this.event_results.values()) { for (const child of result.event_children) { if (!seen.has(child.event_id)) { - seen.add(child.event_id); - children.push(child); + seen.add(child.event_id) + children.push(child) } } } - return children; + return children } done(): Promise { if (!this.bus) { - return Promise.reject(new Error("event has no bus attached")); + return Promise.reject(new Error('event has no bus attached')) } - if (this.event_status === "completed") { - return Promise.resolve(this); + if (this.event_status === 'completed') { + return Promise.resolve(this) } // Always delegate to _runImmediately β€” it walks up the parent event tree // to determine whether we're inside a handler (works cross-bus). If no // ancestor handler is in-flight, it falls back to waitForCompletion(). const runner_bus = this.bus as { - _runImmediately: (event: BaseEvent) => Promise; - }; - return runner_bus._runImmediately(this) as Promise; + _runImmediately: (event: BaseEvent) => Promise + } + return runner_bus._runImmediately(this) as Promise } waitForCompletion(): Promise { - this.ensureDonePromise(); - return this._done!.promise; + if (this.event_status === 'completed') { + return Promise.resolve(this) + } + this.ensureDonePromise() + return this._done!.promise } markStarted(): void { - if (this.event_status !== "pending") { - return; + if (this.event_status !== 'pending') { + return } - this.event_status = "started"; - this.event_started_at = BaseEvent.nextIsoTimestamp(); + this.event_status = 'started' + this.event_started_at = BaseEvent.nextIsoTimestamp() } markCompleted(): void { - if (this.event_status === "completed") { - return; + if (this.event_status === 'completed') { + return } - this.event_status = "completed"; - this.event_completed_at = BaseEvent.nextIsoTimestamp(); - this.ensureDonePromise(); - this._done!.resolve(this); - } - - markFailed(error: unknown): void { - this.event_errors.push(error); + this.event_status = 'completed' + this.event_completed_at = BaseEvent.nextIsoTimestamp() + this._dispatch_context = null + this.ensureDonePromise() + this._done!.resolve(this) + this._done = null } - cancelPendingChildProcessing(reason: unknown): void { - for (const child of this.event_children) { - for (const result of child.event_results.values()) { - if (result.status === "pending") { - result.markError(reason); - } + get event_errors(): unknown[] { + const errors: unknown[] = [] + for (const result of this.event_results.values()) { + if (result.error !== undefined) { + errors.push(result.error) } - child.cancelPendingChildProcessing(reason); } + return errors } eventAreAllChildrenComplete(visited: Set = new Set()): boolean { if (visited.has(this.event_id)) { - return true; + return true } - visited.add(this.event_id); + visited.add(this.event_id) for (const child of this.event_children) { - if (child.event_status !== "completed") { - return false; + if (child.event_status !== 'completed') { + return false } if (!child.eventAreAllChildrenComplete(visited)) { - return false; + return false } } - return true; + return true } tryFinalizeCompletion(): void { if (this.event_pending_buses > 0) { - return; + return } if (!this.eventAreAllChildrenComplete()) { - return; + return } - this.markCompleted(); + this.markCompleted() } ensureDonePromise(): void { if (this._done) { - return; + return + } + this._done = withResolvers() + } + + // Break internal reference chains so a completed event can be GC'd when + // evicted from event_history. Called by EventBus.trimHistory(). + _gc(): void { + this._done = null + this._dispatch_context = null + this.bus = undefined + for (const result of this.event_results.values()) { + result.event_children = [] } - this._done = withResolvers(); + this.event_results.clear() } } -const is_zod_schema = (value: unknown): value is z.ZodTypeAny => - !!value && typeof (value as z.ZodTypeAny).safeParse === "function"; +const is_zod_schema = (value: unknown): value is z.ZodTypeAny => !!value && typeof (value as z.ZodTypeAny).safeParse === 'function' const extract_zod_shape = (raw: Record): z.ZodRawShape => { - const shape: z.ZodRawShape = {}; + const shape: Record = {} for (const [key, value] of Object.entries(raw)) { - if (key === "event_result_schema" || key === "event_result_type") { - continue; + if (key === 'event_result_schema' || key === 'event_result_type') { + continue } if (is_zod_schema(value)) { - shape[key] = value; + shape[key] = value } } - return shape; -}; + return shape as z.ZodRawShape +} const to_json_schema = (schema: unknown): unknown => { if (!schema) { - return schema; + return schema } if (!is_zod_schema(schema)) { - return schema; + return schema } - const zod_any = z as unknown as { toJSONSchema?: (schema: z.ZodTypeAny) => unknown }; - if (typeof zod_any.toJSONSchema === "function") { - return zod_any.toJSONSchema(schema); + const zod_any = z as unknown as { toJSONSchema?: (schema: z.ZodTypeAny) => unknown } + if (typeof zod_any.toJSONSchema === 'function') { + return zod_any.toJSONSchema(schema) } - return undefined; -}; + return undefined +} diff --git a/bubus-ts/src/event_bus.ts b/bubus-ts/src/event_bus.ts index 10f379a..c38ff57 100644 --- a/bubus-ts/src/event_bus.ts +++ b/bubus-ts/src/event_bus.ts @@ -1,218 +1,201 @@ -import { BaseEvent } from "./base_event.js"; -import { EventResult } from "./event_result.js"; -import { captureAsyncContext, runWithAsyncContext } from "./async_context.js"; -import { v5 as uuidv5, v7 as uuidv7 } from "uuid"; -import { - AsyncLimiter, - type ConcurrencyMode, - limiterForMode, - resolveConcurrencyMode, - runWithLimiter, - withResolvers -} from "./semaphores.js"; - +import { BaseEvent } from './base_event.js' +import { EventResult } from './event_result.js' +import { captureAsyncContext, runWithAsyncContext } from './async_context.js' +import { v5 as uuidv5 } from 'uuid' +import { AsyncLimiter, type ConcurrencyMode, limiterForMode, resolveConcurrencyMode, runWithLimiter, withResolvers } from './semaphores.js' export class EventHandlerTimeoutError extends Error { - event_type: string; - handler_name: string; - timeout_seconds: number; - - constructor( - message: string, - params: { event_type: string; handler_name: string; timeout_seconds: number } - ) { - super(message); - this.name = "EventHandlerTimeoutError"; - this.event_type = params.event_type; - this.handler_name = params.handler_name; - this.timeout_seconds = params.timeout_seconds; + event_type: string + handler_name: string + timeout_seconds: number + + constructor(message: string, params: { event_type: string; handler_name: string; timeout_seconds: number }) { + super(message) + this.name = 'EventHandlerTimeoutError' + this.event_type = params.event_type + this.handler_name = params.handler_name + this.timeout_seconds = params.timeout_seconds } } export class EventHandlerCancelledError extends Error { - event_type: string; - handler_name: string; - parent_error: Error; - - constructor( - message: string, - params: { event_type: string; handler_name: string; parent_error: Error } - ) { - super(message); - this.name = "EventHandlerCancelledError"; - this.event_type = params.event_type; - this.handler_name = params.handler_name; - this.parent_error = params.parent_error; + event_type: string + handler_name: string + parent_error: Error + + constructor(message: string, params: { event_type: string; handler_name: string; parent_error: Error }) { + super(message) + this.name = 'EventHandlerCancelledError' + this.event_type = params.event_type + this.handler_name = params.handler_name + this.parent_error = params.parent_error } } -import type { EventHandler, EventKey, FindOptions, HandlerOptions } from "./types.js"; +import type { EventHandler, EventKey, FindOptions, HandlerOptions } from './types.js' type FindWaiter = { - event_key: EventKey; - matches: (event: BaseEvent) => boolean; - resolve: (event: BaseEvent) => void; - timeout_id?: ReturnType; -}; + event_key: EventKey + matches: (event: BaseEvent) => boolean + resolve: (event: BaseEvent) => void + timeout_id?: ReturnType +} type HandlerEntry = { - id: string; - handler: EventHandler; - handler_name: string; - handler_file_path?: string; - handler_registered_at: string; - options?: HandlerOptions; - event_key: string | "*"; -}; + id: string + handler: EventHandler + handler_name: string + handler_file_path?: string + handler_registered_at: string + options?: HandlerOptions + event_key: string | '*' +} -const HANDLER_ID_NAMESPACE = uuidv5("bubus-handler", uuidv5.DNS); +const HANDLER_ID_NAMESPACE = uuidv5('bubus-handler', uuidv5.DNS) type EventBusOptions = { - max_history_size?: number | null; - event_concurrency?: ConcurrencyMode; - handler_concurrency?: ConcurrencyMode; - event_timeout?: number | null; -}; + max_history_size?: number | null + event_concurrency?: ConcurrencyMode + handler_concurrency?: ConcurrencyMode + event_timeout?: number | null +} class EventBusInstanceRegistry { - private _refs = new Set>(); - private _lookup = new WeakMap>(); - private _gc = typeof FinalizationRegistry !== "undefined" - ? new FinalizationRegistry>((ref) => { this._refs.delete(ref); }) - : null; + private _refs = new Set>() + private _lookup = new WeakMap>() + private _gc = + typeof FinalizationRegistry !== 'undefined' + ? new FinalizationRegistry>((ref) => { + this._refs.delete(ref) + }) + : null add(bus: EventBus): void { - const ref = new WeakRef(bus); - this._refs.add(ref); - this._lookup.set(bus, ref); - this._gc?.register(bus, ref, bus); + const ref = new WeakRef(bus) + this._refs.add(ref) + this._lookup.set(bus, ref) + this._gc?.register(bus, ref, bus) } delete(bus: EventBus): void { - const ref = this._lookup.get(bus); - if (!ref) return; - this._refs.delete(ref); - this._lookup.delete(bus); - this._gc?.unregister(bus); + const ref = this._lookup.get(bus) + if (!ref) return + this._refs.delete(ref) + this._lookup.delete(bus) + this._gc?.unregister(bus) } has(bus: EventBus): boolean { - return this._lookup.get(bus)?.deref() !== undefined; + return this._lookup.get(bus)?.deref() !== undefined } get size(): number { - let n = 0; - for (const ref of this._refs) ref.deref() ? n++ : this._refs.delete(ref); - return n; + let n = 0 + for (const ref of this._refs) ref.deref() ? n++ : this._refs.delete(ref) + return n } *[Symbol.iterator](): Iterator { for (const ref of this._refs) { - const bus = ref.deref(); - if (bus) yield bus; else this._refs.delete(ref); + const bus = ref.deref() + if (bus) yield bus + else this._refs.delete(ref) } } } export class EventBus { - static instances = new EventBusInstanceRegistry(); - static global_event_limiter = new AsyncLimiter(1); - static global_handler_limiter = new AsyncLimiter(1); + static instances = new EventBusInstanceRegistry() + static global_event_limiter = new AsyncLimiter(1) + static global_handler_limiter = new AsyncLimiter(1) static findEventById(event_id: string): BaseEvent | null { for (const bus of EventBus.instances) { - const event = bus.event_history.get(event_id); + const event = bus.event_history.get(event_id) if (event) { - return event; + return event } } - return null; + return null } - name: string; - max_history_size: number | null; - event_concurrency_default: ConcurrencyMode; - handler_concurrency_default: ConcurrencyMode; - event_timeout_default: number | null; - bus_event_limiter: AsyncLimiter; - bus_handler_limiter: AsyncLimiter; - handlers: Map; - event_history: Map; - pending_event_queue: BaseEvent[]; - in_flight_event_ids: Set; - runloop_running: boolean; + name: string + max_history_size: number | null + event_concurrency_default: ConcurrencyMode + handler_concurrency_default: ConcurrencyMode + event_timeout_default: number | null + bus_event_limiter: AsyncLimiter + bus_handler_limiter: AsyncLimiter + handlers: Map + event_history: Map + pending_event_queue: BaseEvent[] + in_flight_event_ids: Set + runloop_running: boolean // Resolves for callers of waitUntilIdle(); only drained when idle is confirmed twice. - idle_waiters: Array<() => void>; + idle_waiters: Array<() => void> // True while an idle check timeout is scheduled. - idle_check_pending: boolean; + idle_check_pending: boolean // Number of consecutive idle snapshots seen; must reach 2 to resolve waiters. - idle_check_streak: number; + idle_check_streak: number // Pending find() callers waiting for a matching future event. - find_waiters: Set; + find_waiters: Set // Depth counter for "immediate processing" (queue-jump) inside handlers. // While > 0, the runloop pauses to avoid processing unrelated events. - immediate_processing_stack_depth: number; + immediate_processing_stack_depth: number // Runloop waiters that resume once immediate_processing_stack_depth returns to 0. - immediate_processing_waiters: Array<() => void>; + immediate_processing_waiters: Array<() => void> // Stack of EventResults for handlers currently executing on this bus. // Enables per-bus isInsideHandler() and gives _runImmediately access to the // calling handler's result even when called on raw (non-proxied) events. - _event_result_stack: EventResult[]; - - constructor(name: string = "EventBus", options: EventBusOptions = {}) { - this.name = name; - this.max_history_size = - options.max_history_size === undefined ? 100 : options.max_history_size; - this.event_concurrency_default = options.event_concurrency ?? "bus-serial"; - this.handler_concurrency_default = options.handler_concurrency ?? "bus-serial"; - this.event_timeout_default = - options.event_timeout === undefined ? 60 : options.event_timeout; - this.bus_event_limiter = new AsyncLimiter(1); - this.bus_handler_limiter = new AsyncLimiter(1); - this.handlers = new Map(); - this.event_history = new Map(); - this.pending_event_queue = []; - this.in_flight_event_ids = new Set(); - this.runloop_running = false; - this.idle_waiters = []; - this.idle_check_pending = false; - this.idle_check_streak = 0; - this.find_waiters = new Set(); - this.immediate_processing_stack_depth = 0; - this.immediate_processing_waiters = []; - this._event_result_stack = []; - - EventBus.instances.add(this); - - this.dispatch = this.dispatch.bind(this); - this.emit = this.emit.bind(this); + _event_result_stack: EventResult[] + + constructor(name: string = 'EventBus', options: EventBusOptions = {}) { + this.name = name + this.max_history_size = options.max_history_size === undefined ? 100 : options.max_history_size + this.event_concurrency_default = options.event_concurrency ?? 'bus-serial' + this.handler_concurrency_default = options.handler_concurrency ?? 'bus-serial' + this.event_timeout_default = options.event_timeout === undefined ? 60 : options.event_timeout + this.bus_event_limiter = new AsyncLimiter(1) + this.bus_handler_limiter = new AsyncLimiter(1) + this.handlers = new Map() + this.event_history = new Map() + this.pending_event_queue = [] + this.in_flight_event_ids = new Set() + this.runloop_running = false + this.idle_waiters = [] + this.idle_check_pending = false + this.idle_check_streak = 0 + this.find_waiters = new Set() + this.immediate_processing_stack_depth = 0 + this.immediate_processing_waiters = [] + this._event_result_stack = [] + + EventBus.instances.add(this) + + this.dispatch = this.dispatch.bind(this) + this.emit = this.emit.bind(this) } destroy(): void { - EventBus.instances.delete(this); - this.handlers.clear(); - this.event_history.clear(); - this.pending_event_queue.length = 0; - this.in_flight_event_ids.clear(); - this.find_waiters.clear(); - this.idle_waiters.length = 0; - this.immediate_processing_waiters.length = 0; + EventBus.instances.delete(this) + this.handlers.clear() + for (const event of this.event_history.values()) { + event._gc() + } + this.event_history.clear() + this.pending_event_queue.length = 0 + this.in_flight_event_ids.clear() + this.find_waiters.clear() + this.idle_waiters.length = 0 + this.immediate_processing_waiters.length = 0 + this._event_result_stack.length = 0 } - on( - event_key: EventKey | "*", - handler: EventHandler, - options: HandlerOptions = {} - ): void { - const normalized_key = this.normalizeEventKey(event_key); - const handler_name = handler.name || "anonymous"; - const handler_file_path = this.inferHandlerFilePath() ?? undefined; - const handler_registered_at = BaseEvent.nextIsoTimestamp(); - const handler_id = this.computeHandlerId( - normalized_key, - handler_name, - handler_file_path, - handler_registered_at - ); + on(event_key: EventKey | '*', handler: EventHandler, options: HandlerOptions = {}): void { + const normalized_key = this.normalizeEventKey(event_key) + const handler_name = handler.name || 'anonymous' + const handler_file_path = this.inferHandlerFilePath() ?? undefined + const handler_registered_at = BaseEvent.nextIsoTimestamp() + const handler_id = this.computeHandlerId(normalized_key, handler_name, handler_file_path, handler_registered_at) this.handlers.set(handler_id, { id: handler_id, @@ -221,173 +204,163 @@ export class EventBus { handler_file_path, handler_registered_at, options: Object.keys(options).length > 0 ? options : undefined, - event_key: normalized_key - }); + event_key: normalized_key, + }) } - off(event_key: EventKey | "*", handler?: EventHandler | string): void { - const normalized_key = this.normalizeEventKey(event_key); - const match_by_id = typeof handler === "string"; + off(event_key: EventKey | '*', handler?: EventHandler | string): void { + const normalized_key = this.normalizeEventKey(event_key) + const match_by_id = typeof handler === 'string' for (const [handler_id, entry] of this.handlers) { if (entry.event_key !== normalized_key) { - continue; + continue } if (handler === undefined || (match_by_id ? handler_id === handler : entry.handler === (handler as EventHandler))) { - this.handlers.delete(handler_id); + this.handlers.delete(handler_id) } } } private computeHandlerId( - event_key: string | "*", + event_key: string | '*', handler_name: string, handler_file_path: string | undefined, handler_registered_at: string ): string { - const file_path = handler_file_path ?? "unknown"; - const seed = `${this.name}|${event_key}|${handler_name}|${file_path}|${handler_registered_at}`; - return uuidv5(seed, HANDLER_ID_NAMESPACE); + const file_path = handler_file_path ?? 'unknown' + const seed = `${this.name}|${event_key}|${handler_name}|${file_path}|${handler_registered_at}` + return uuidv5(seed, HANDLER_ID_NAMESPACE) } - dispatch(event: T, event_key?: EventKey): T { - const original_event = event._original_event ?? event; + dispatch(event: T, _event_key?: EventKey): T { + const original_event = event._original_event ?? event if (!original_event.bus) { - original_event.bus = this; + original_event.bus = this } if (!Array.isArray(original_event.event_path)) { - original_event.event_path = []; + original_event.event_path = [] } if (original_event._dispatch_context === undefined) { - original_event._dispatch_context = captureAsyncContext(); + original_event._dispatch_context = captureAsyncContext() } if (original_event.event_timeout === null) { - original_event.event_timeout = this.event_timeout_default; + original_event.event_timeout = this.event_timeout_default } - if (original_event.event_path.includes(this.name) || this.eventHasVisited(original_event)) { - return this._getBusScopedEvent(original_event) as T; + return this._getBusScopedEvent(original_event) as T } if (!original_event.event_path.includes(this.name)) { - original_event.event_path.push(this.name); + original_event.event_path.push(this.name) } if (original_event.event_parent_id) { - const parent_event = this.event_history.get(original_event.event_parent_id); + const parent_event = this.event_history.get(original_event.event_parent_id) if (parent_event) { - this.recordChildEvent( - parent_event.event_id, - original_event, - original_event.event_emitted_by_handler_id - ); + this.recordChildEvent(parent_event.event_id, original_event, original_event.event_emitted_by_handler_id) } } - this.event_history.set(original_event.event_id, original_event); - this.trimHistory(); + this.event_history.set(original_event.event_id, original_event) + this.trimHistory() - original_event.event_pending_buses += 1; - this.pending_event_queue.push(original_event); - this.startRunloop(); + original_event.event_pending_buses += 1 + this.pending_event_queue.push(original_event) + this.startRunloop() - return this._getBusScopedEvent(original_event) as T; + return this._getBusScopedEvent(original_event) as T } emit(event: T, event_key?: EventKey): T { - return this.dispatch(event, event_key); + return this.dispatch(event, event_key) } - find(event_key: EventKey, options?: FindOptions): Promise; - find( - event_key: EventKey, - where: (event: T) => boolean, - options?: FindOptions - ): Promise; + find(event_key: EventKey, options?: FindOptions): Promise + find(event_key: EventKey, where: (event: T) => boolean, options?: FindOptions): Promise async find( event_key: EventKey, - where_or_options: ((event: T) => boolean) | FindOptions = {}, - maybe_options: FindOptions = {} + where_or_options: ((event: T) => boolean) | FindOptions = {}, + maybe_options: FindOptions = {} ): Promise { - const where = typeof where_or_options === "function" ? where_or_options : (() => true); - const options = typeof where_or_options === "function" ? maybe_options : where_or_options; + const where = typeof where_or_options === 'function' ? where_or_options : () => true + const options = typeof where_or_options === 'function' ? maybe_options : where_or_options - return this.findInternal(event_key, where, options); + return this.findInternal(event_key, where, options) } private async findInternal( event_key: EventKey, where: (event: T) => boolean, - options: FindOptions + options: FindOptions ): Promise { - const past = options.past ?? true; - const future = options.future ?? true; - const child_of = options.child_of ?? null; + const past = options.past ?? true + const future = options.future ?? true + const child_of = options.child_of ?? null if (past === false && future === false) { - return null; + return null } const matches = (event: BaseEvent): boolean => { if (!this.eventMatchesKey(event, event_key)) { - return false; + return false } if (!where(event as T)) { - return false; + return false } if (child_of && !this.eventIsChildOf(event, child_of)) { - return false; + return false } - return true; - }; + return true + } if (past !== false || future !== false) { - const now_ms = Date.now(); - const cutoff_ms = - past === true ? null : now_ms - Math.max(0, Number(past)) * 1000; + const now_ms = Date.now() + const cutoff_ms = past === true ? null : now_ms - Math.max(0, Number(past)) * 1000 - const history_values = Array.from(this.event_history.values()); + const history_values = Array.from(this.event_history.values()) for (let i = history_values.length - 1; i >= 0; i -= 1) { - const event = history_values[i]; + const event = history_values[i] if (!matches(event)) { - continue; + continue } - if (event.event_status === "completed") { + if (event.event_status === 'completed') { if (past === false) { - continue; + continue } - if (cutoff_ms !== null && event.event_created_at_ms < cutoff_ms) { - continue; + if (cutoff_ms !== null && Date.parse(event.event_created_at) < cutoff_ms) { + continue } - return this._getBusScopedEvent(event) as T; + return this._getBusScopedEvent(event) as T } if (future !== false) { - return this._getBusScopedEvent(event) as T; + return this._getBusScopedEvent(event) as T } } } if (future === false) { - return null; + return null } return new Promise((resolve, _reject) => { const waiter: FindWaiter = { event_key, matches, - resolve: (event) => resolve(this._getBusScopedEvent(event) as T) - }; + resolve: (event) => resolve(this._getBusScopedEvent(event) as T), + } if (future !== true) { - const timeout_ms = Math.max(0, Number(future)) * 1000; + const timeout_ms = Math.max(0, Number(future)) * 1000 waiter.timeout_id = setTimeout(() => { - this.find_waiters.delete(waiter); - resolve(null); - }, timeout_ms); + this.find_waiters.delete(waiter) + resolve(null) + }, timeout_ms) } - this.find_waiters.add(waiter); - }); + this.find_waiters.add(waiter) + }) } // Called when a handler does `await child.done()` β€” processes the child event @@ -397,115 +370,113 @@ export class EventBus { // we temporarily release it so child handlers on the same bus can acquire it // (preventing deadlock for bus-serial/global-serial modes). We re-acquire after // the child completes so the parent handler can continue with the limiter held. - async _runImmediately( - event: T, - handler_result?: EventResult - ): Promise { - const original_event = event._original_event ?? event; + async _runImmediately(event: T, handler_result?: EventResult): Promise { + const original_event = event._original_event ?? event // Find the parent handler's result: prefer the proxy-provided one (only if // the handler is still running), then this bus's stack, then walk up the // parent event tree (cross-bus case). If none found, we're not inside a // handler and should fall back to waitForCompletion. - const proxy_result = handler_result?.status === "started" ? handler_result : undefined; - const effective_result = proxy_result - ?? this._event_result_stack[this._event_result_stack.length - 1] - ?? this._findInFlightAncestorResult(original_event) - ?? undefined; + const proxy_result = handler_result?.status === 'started' ? handler_result : undefined + const effective_result = + proxy_result ?? + this._event_result_stack[this._event_result_stack.length - 1] ?? + this._findInFlightAncestorResult(original_event) ?? + undefined if (!effective_result) { // Not inside any handler β€” fall back to normal completion waiting - await original_event.waitForCompletion(); - return event; + await original_event.waitForCompletion() + return event } if (!effective_result.queue_jump_hold) { - effective_result.queue_jump_hold = true; - this.immediate_processing_stack_depth += 1; + effective_result.queue_jump_hold = true + this.immediate_processing_stack_depth += 1 } - if (original_event.event_status === "completed") { - return event; + if (original_event.event_status === 'completed') { + return event } // Yield the parent handler's limiter so child handlers can use it. // Null out _held_handler_limiter so concurrent calls from the same handler // (e.g. Promise.all([child1.done(), child2.done()])) don't double-release. - const limiter_to_yield = effective_result?._held_handler_limiter ?? null; + const limiter_to_yield = effective_result?._held_handler_limiter ?? null if (limiter_to_yield) { - effective_result!._held_handler_limiter = null; - limiter_to_yield.release(); + effective_result!._held_handler_limiter = null + limiter_to_yield.release() } try { - if (original_event.event_status === "started") { - await this.runImmediatelyAcrossBuses(original_event); - return event; + if (original_event.event_status === 'started') { + await this.runImmediatelyAcrossBuses(original_event) + return event } - const index = this.pending_event_queue.indexOf(original_event); + const index = this.pending_event_queue.indexOf(original_event) if (index >= 0) { - this.pending_event_queue.splice(index, 1); + this.pending_event_queue.splice(index, 1) } - await this.runImmediatelyAcrossBuses(original_event); - return event; + await this.runImmediatelyAcrossBuses(original_event) + return event } finally { // Re-acquire the parent handler's limiter before returning control. // Only the call that actually released it will re-acquire. - if (limiter_to_yield) { - await limiter_to_yield.acquire(); - effective_result!._held_handler_limiter = limiter_to_yield; + // If the handler timed out while we were processing children, + // runHandlerEntry's finally has already run and the limiter is no longer + // needed β€” skip re-acquire to avoid leaking the limiter. + if (limiter_to_yield && effective_result!.status === 'started') { + await limiter_to_yield.acquire() + effective_result!._held_handler_limiter = limiter_to_yield } } } async waitUntilIdle(): Promise { if (this.isIdleSnapshot()) { - return; + return } return new Promise((resolve) => { - this.idle_waiters.push(resolve); - this.scheduleIdleCheck(); - }); + this.idle_waiters.push(resolve) + this.scheduleIdleCheck() + }) } private scheduleIdleCheck(): void { if (this.idle_check_pending) { - return; + return } - this.idle_check_pending = true; + this.idle_check_pending = true setTimeout(() => { - this.idle_check_pending = false; - this.resolveIdleWaitersIfDone(); - }, 0); + this.idle_check_pending = false + this.resolveIdleWaitersIfDone() + }, 0) } private isIdleSnapshot(): boolean { return ( - this.pending_event_queue.length === 0 && - this.in_flight_event_ids.size === 0 && - !this.hasPendingResults() && - !this.runloop_running - ); + this.pending_event_queue.length === 0 && this.in_flight_event_ids.size === 0 && !this.hasPendingResults() && !this.runloop_running + ) } private resolveIdleWaitersIfDone(): void { if (!this.isIdleSnapshot()) { - this.idle_check_streak = 0; + this.idle_check_streak = 0 if (this.idle_waiters.length > 0) { - this.scheduleIdleCheck(); + this.scheduleIdleCheck() } - return; + return } - this.idle_check_streak += 1; + this.idle_check_streak += 1 if (this.idle_check_streak < 2) { if (this.idle_waiters.length > 0) { - this.scheduleIdleCheck(); + this.scheduleIdleCheck() } - return; + return } - this.idle_check_streak = 0; - const idle_waiters = this.idle_waiters; - this.idle_waiters = []; + this.idle_check_streak = 0 + const idle_waiters = this.idle_waiters + this.idle_waiters = [] for (const resolve of idle_waiters) { - resolve(); + resolve() } } @@ -513,140 +484,128 @@ export class EventBus { for (const event of this.event_history.values()) { for (const result of event.event_results.values()) { if (result.eventbus_name !== this.name) { - continue; + continue } - if (result.status === "pending") { - return true; + if (result.status === 'pending') { + return true } } } - return false; + return false } eventIsChildOf(event: BaseEvent, ancestor: BaseEvent): boolean { if (event.event_id === ancestor.event_id) { - return false; + return false } - let current_parent_id = event.event_parent_id; + let current_parent_id = event.event_parent_id while (current_parent_id) { if (current_parent_id === ancestor.event_id) { - return true; + return true } - const parent = this.event_history.get(current_parent_id); + const parent = this.event_history.get(current_parent_id) if (!parent) { - return false; + return false } - current_parent_id = parent.event_parent_id; + current_parent_id = parent.event_parent_id } - return false; + return false } eventIsParentOf(event: BaseEvent, descendant: BaseEvent): boolean { - return this.eventIsChildOf(descendant, event); + return this.eventIsChildOf(descendant, event) } - recordChildEvent( - parent_event_id: string, - child_event: BaseEvent, - handler_id?: string - ): void { - const original_child = child_event._original_event ?? child_event; - const parent_event = this.event_history.get(parent_event_id); + recordChildEvent(parent_event_id: string, child_event: BaseEvent, handler_id?: string): void { + const original_child = child_event._original_event ?? child_event + const parent_event = this.event_history.get(parent_event_id) - const target_handler_id = - handler_id ?? original_child.event_emitted_by_handler_id ?? undefined; + const target_handler_id = handler_id ?? original_child.event_emitted_by_handler_id ?? undefined if (target_handler_id) { - const current_result = parent_event?.event_results.get(target_handler_id); + const current_result = parent_event?.event_results.get(target_handler_id) if (current_result) { if (!current_result.event_children.some((child) => child.event_id === original_child.event_id)) { - current_result.event_children.push(original_child); + current_result.event_children.push(original_child) } } - original_child.event_emitted_by_handler_id = target_handler_id; + original_child.event_emitted_by_handler_id = target_handler_id } } logTree(): string { - const parent_to_children = new Map(); + const parent_to_children = new Map() const add_child = (parent_id: string | null, child: BaseEvent): void => { - const existing = parent_to_children.get(parent_id) ?? []; - existing.push(child); - parent_to_children.set(parent_id, existing); - }; + const existing = parent_to_children.get(parent_id) ?? [] + existing.push(child) + parent_to_children.set(parent_id, existing) + } for (const event of this.event_history.values()) { - add_child(event.event_parent_id ?? null, event); + add_child(event.event_parent_id ?? null, event) } for (const children of parent_to_children.values()) { - children.sort((a, b) => a.event_created_at_ms - b.event_created_at_ms); + children.sort((a, b) => (a.event_created_at < b.event_created_at ? -1 : a.event_created_at > b.event_created_at ? 1 : 0)) } - const root_events: BaseEvent[] = []; - const seen = new Set(); + const root_events: BaseEvent[] = [] + const seen = new Set() for (const event of this.event_history.values()) { - const parent_id = event.event_parent_id; + const parent_id = event.event_parent_id if (!parent_id || parent_id === event.event_id || !this.event_history.has(parent_id)) { if (!seen.has(event.event_id)) { - root_events.push(event); - seen.add(event.event_id); + root_events.push(event) + seen.add(event.event_id) } } } if (root_events.length === 0) { - return "(No events in history)"; + return '(No events in history)' } - const lines: string[] = []; - lines.push(`πŸ“Š Event History Tree for ${this.name}`); - lines.push("=".repeat(80)); + const lines: string[] = [] + lines.push(`πŸ“Š Event History Tree for ${this.name}`) + lines.push('='.repeat(80)) - root_events.sort((a, b) => a.event_created_at_ms - b.event_created_at_ms); - const visited = new Set(); + root_events.sort((a, b) => (a.event_created_at < b.event_created_at ? -1 : a.event_created_at > b.event_created_at ? 1 : 0)) + const visited = new Set() root_events.forEach((event, index) => { - lines.push( - this.buildTreeLine( - event, - "", - index === root_events.length - 1, - parent_to_children, - visited - ) - ); - }); - - lines.push("=".repeat(80)); - - return lines.join("\n"); + lines.push(this.buildTreeLine(event, '', index === root_events.length - 1, parent_to_children, visited)) + }) + + lines.push('='.repeat(80)) + + return lines.join('\n') } // Per-bus check: true only if this specific bus has a handler on its stack. - // For cross-bus queue-jumping, done() uses the _is_handler_scoped flag on - // the bus proxy instead (set by _getBusScopedEvent when handler_result exists). + // For cross-bus queue-jumping, _runImmediately uses _findInFlightAncestorResult() + // to walk up the parent event tree, and the bus proxy passes handler_result + // to _runImmediately so it can yield/reacquire the correct limiter. isInsideHandler(): boolean { - return this._event_result_stack.length > 0; + return this._event_result_stack.length > 0 } // Walk up the parent event chain to find an in-flight ancestor handler result. // Returns the result if found, null otherwise. Used by _runImmediately to detect // cross-bus queue-jump scenarios where the calling handler is on a different bus. _findInFlightAncestorResult(event: BaseEvent): EventResult | null { - const original = event._original_event ?? event; - let current_parent_id = original.event_parent_id; - let current_handler_id = original.event_emitted_by_handler_id; + const original = event._original_event ?? event + let current_parent_id = original.event_parent_id + let current_handler_id = original.event_emitted_by_handler_id while (current_handler_id && current_parent_id) { - const parent = EventBus.findEventById(current_parent_id); - if (!parent) break; - const handler_result = parent.event_results.get(current_handler_id); - if (handler_result && handler_result.status === "started") return handler_result; - current_parent_id = parent.event_parent_id; - current_handler_id = parent.event_emitted_by_handler_id; - } - return null; + const parent = EventBus.findEventById(current_parent_id) + if (!parent) break + const handler_result = parent.event_results.get(current_handler_id) + if (handler_result && handler_result.status === 'started') return handler_result + current_parent_id = parent.event_parent_id + current_handler_id = parent.event_emitted_by_handler_id + } + return null } // Processes a queue-jumped event across all buses that have it dispatched. @@ -661,570 +620,514 @@ export class EventBus { // limiter normally. This works because _runImmediately already released the // parent's handler limiter via yield-and-reacquire. private async runImmediatelyAcrossBuses(event: BaseEvent): Promise { - const buses = this.getBusesForImmediateRun(event); + const buses = this.getBusesForImmediateRun(event) if (buses.length === 0) { - await event.waitForCompletion(); - return; + await event.waitForCompletion() + return } for (const bus of buses) { - bus.immediate_processing_stack_depth += 1; + bus.immediate_processing_stack_depth += 1 } // Determine which event limiter the initiating bus resolves to, so we can // detect when other buses share the same instance (global-serial). - const initiating_event_limiter = this.resolveEventLimiter(event); + const initiating_event_limiter = this.resolveEventLimiter(event) try { for (const bus of buses) { - const index = bus.pending_event_queue.indexOf(event); + const index = bus.pending_event_queue.indexOf(event) if (index >= 0) { - bus.pending_event_queue.splice(index, 1); + bus.pending_event_queue.splice(index, 1) } if (bus.eventHasVisited(event)) { - continue; + continue } if (bus.in_flight_event_ids.has(event.event_id)) { - continue; + continue } - bus.in_flight_event_ids.add(event.event_id); + bus.in_flight_event_ids.add(event.event_id) // Bypass event limiter on the initiating bus (we're already inside a handler // that acquired it). For other buses, only bypass if they resolve to the same // limiter instance (global-serial shares one limiter across all buses). - const bus_event_limiter = bus.resolveEventLimiter(event); + const bus_event_limiter = bus.resolveEventLimiter(event) const should_bypass_event_limiter = - bus === this || - (initiating_event_limiter !== null && - bus_event_limiter === initiating_event_limiter); + bus === this || (initiating_event_limiter !== null && bus_event_limiter === initiating_event_limiter) await bus.scheduleEventProcessing(event, { - bypass_event_limiters: should_bypass_event_limiter - }); + bypass_event_limiters: should_bypass_event_limiter, + }) } - if (event.event_status !== "completed") { - await event.waitForCompletion(); + if (event.event_status !== 'completed') { + await event.waitForCompletion() } } finally { for (const bus of buses) { - bus.immediate_processing_stack_depth = Math.max( - 0, - bus.immediate_processing_stack_depth - 1 - ); - bus.releaseImmediateProcessingWaiters(); + bus.immediate_processing_stack_depth = Math.max(0, bus.immediate_processing_stack_depth - 1) + bus.releaseImmediateProcessingWaiters() } } } private getBusesForImmediateRun(event: BaseEvent): EventBus[] { - const ordered: EventBus[] = []; - const seen = new Set(); + const ordered: EventBus[] = [] + const seen = new Set() - const event_path = Array.isArray(event.event_path) ? event.event_path : []; + const event_path = Array.isArray(event.event_path) ? event.event_path : [] for (const name of event_path) { for (const bus of EventBus.instances) { if (bus.name !== name) { - continue; + continue } if (!bus.event_history.has(event.event_id)) { - continue; + continue } if (bus.eventHasVisited(event)) { - continue; + continue } if (!seen.has(bus)) { - ordered.push(bus); - seen.add(bus); + ordered.push(bus) + seen.add(bus) } } } if (!seen.has(this) && this.event_history.has(event.event_id)) { - ordered.push(this); + ordered.push(this) } - return ordered; + return ordered } private releaseImmediateProcessingWaiters(): void { - if ( - this.immediate_processing_stack_depth !== 0 || - this.immediate_processing_waiters.length === 0 - ) { - return; - } - const waiters = this.immediate_processing_waiters; - this.immediate_processing_waiters = []; + if (this.immediate_processing_stack_depth !== 0 || this.immediate_processing_waiters.length === 0) { + return + } + const waiters = this.immediate_processing_waiters + this.immediate_processing_waiters = [] for (const resolve of waiters) { try { // Each waiter is a Promise resolver created by runloop() while it was paused. // Resolving it resumes that runloop tick so it can continue draining the queue. - resolve(); + resolve() } catch (error) { // Should never happen: these are internal Promise resolve callbacks. - console.error("[bubus] immediate processing waiter threw", error); + console.error('[bubus] immediate processing waiter threw', error) } } } - private startRunloop(): void { if (this.runloop_running) { - return; + return } - this.runloop_running = true; + this.runloop_running = true queueMicrotask(() => { - void this.runloop(); - }); + void this.runloop() + }) } private async scheduleEventProcessing( event: BaseEvent, options: { - bypass_event_limiters?: boolean; - pre_acquired_limiter?: AsyncLimiter | null; + bypass_event_limiters?: boolean + pre_acquired_limiter?: AsyncLimiter | null } = {} ): Promise { try { - const limiter = options.bypass_event_limiters ? null : this.resolveEventLimiter(event); - const pre_acquired_limiter = options.pre_acquired_limiter ?? null; + const limiter = options.bypass_event_limiters ? null : this.resolveEventLimiter(event) + const pre_acquired_limiter = options.pre_acquired_limiter ?? null if (pre_acquired_limiter) { - await this.processEvent(event); + await this.processEvent(event) } else { await runWithLimiter(limiter, async () => { - await this.processEvent(event); - }); + await this.processEvent(event) + }) } } finally { if (options.pre_acquired_limiter) { - options.pre_acquired_limiter.release(); + options.pre_acquired_limiter.release() } - this.in_flight_event_ids.delete(event.event_id); - this.resolveIdleWaitersIfDone(); + this.in_flight_event_ids.delete(event.event_id) + this.resolveIdleWaitersIfDone() } } private async runloop(): Promise { for (;;) { while (this.pending_event_queue.length > 0) { - await Promise.resolve(); + await Promise.resolve() if (this.immediate_processing_stack_depth > 0) { await new Promise((resolve) => { - this.immediate_processing_waiters.push(resolve); - }); - continue; + this.immediate_processing_waiters.push(resolve) + }) + continue } - const next_event = this.pending_event_queue[0]; + const next_event = this.pending_event_queue[0] if (!next_event) { - continue; + continue } - const original_event = next_event._original_event ?? next_event; + const original_event = next_event._original_event ?? next_event if (this.eventHasVisited(original_event)) { - this.pending_event_queue.shift(); - continue; + this.pending_event_queue.shift() + continue } - let pre_acquired_limiter: AsyncLimiter | null = null; - const event_limiter = this.resolveEventLimiter(original_event); + let pre_acquired_limiter: AsyncLimiter | null = null + const event_limiter = this.resolveEventLimiter(original_event) if (event_limiter) { - await event_limiter.acquire(); - pre_acquired_limiter = event_limiter; + await event_limiter.acquire() + pre_acquired_limiter = event_limiter } - this.pending_event_queue.shift(); + this.pending_event_queue.shift() if (this.in_flight_event_ids.has(original_event.event_id)) { if (pre_acquired_limiter) { - pre_acquired_limiter.release(); + pre_acquired_limiter.release() } - continue; + continue } - this.in_flight_event_ids.add(original_event.event_id); + this.in_flight_event_ids.add(original_event.event_id) void this.scheduleEventProcessing(original_event, { bypass_event_limiters: true, - pre_acquired_limiter - }); - await Promise.resolve(); + pre_acquired_limiter, + }) + await Promise.resolve() } - this.runloop_running = false; + this.runloop_running = false if (this.pending_event_queue.length > 0) { - this.startRunloop(); - return; + this.startRunloop() + return } - this.resolveIdleWaitersIfDone(); - return; + this.resolveIdleWaitersIfDone() + return } } private async processEvent(event: BaseEvent): Promise { if (this.eventHasVisited(event)) { - return; + return } - event.markStarted(); - this.notifyFinders(event); + event.markStarted() + this.notifyFinders(event) const deadlock_timer = event.event_timeout === null ? null : setTimeout(() => { - if (event.event_status === "completed") { - return; + if (event.event_status === 'completed') { + return } - const started_at = event.event_started_at ?? event.event_created_at; - const elapsed_ms = Date.now() - Date.parse(started_at); - const elapsed_seconds = (elapsed_ms / 1000).toFixed(1); + const started_at = event.event_started_at ?? event.event_created_at + const elapsed_ms = Date.now() - Date.parse(started_at) + const elapsed_seconds = (elapsed_ms / 1000).toFixed(1) console.warn( `[bubus] Possible deadlock: ${event.event_type}#${event.event_id} still ${event.event_status} on ${this.name} after ${elapsed_seconds}s (timeout ${event.event_timeout}s)` - ); - }, event.event_timeout * 1000); + ) + }, event.event_timeout * 1000) try { - const handler_entries = this.createPendingHandlerResults(event); + const handler_entries = this.createPendingHandlerResults(event) - const handler_promises = handler_entries.map((entry) => - this.runHandlerEntry(event, entry.handler, entry.result, entry.options) - ); - await Promise.all(handler_promises); + const handler_promises = handler_entries.map((entry) => this.runHandlerEntry(event, entry.handler, entry.result, entry.options)) + await Promise.all(handler_promises) - event.event_pending_buses = Math.max(0, event.event_pending_buses - 1); - event.tryFinalizeCompletion(); - if (event.event_status === "completed") { - this.notifyParentsFor(event); + event.event_pending_buses = Math.max(0, event.event_pending_buses - 1) + event.tryFinalizeCompletion() + if (event.event_status === 'completed') { + this.notifyParentsFor(event) } } finally { if (deadlock_timer) { - clearTimeout(deadlock_timer); + clearTimeout(deadlock_timer) } } } private resolveEventLimiter(event: BaseEvent): AsyncLimiter | null { - const resolved = resolveConcurrencyMode( - event.event_concurrency, - this.event_concurrency_default - ); - return limiterForMode(resolved, EventBus.global_event_limiter, this.bus_event_limiter); + const resolved = resolveConcurrencyMode(event.event_concurrency, this.event_concurrency_default) + return limiterForMode(resolved, EventBus.global_event_limiter, this.bus_event_limiter) } - private resolveHandlerLimiter( - event: BaseEvent, - options?: HandlerOptions - ): AsyncLimiter | null { - const event_override = - event.handler_concurrency && event.handler_concurrency !== "auto" - ? event.handler_concurrency - : undefined; + private resolveHandlerLimiter(event: BaseEvent, options?: HandlerOptions): AsyncLimiter | null { + const event_override = event.handler_concurrency && event.handler_concurrency !== 'auto' ? event.handler_concurrency : undefined const handler_override = - options?.handler_concurrency && options.handler_concurrency !== "auto" - ? options.handler_concurrency - : undefined; - const fallback = this.handler_concurrency_default; - const resolved = resolveConcurrencyMode( - event_override ?? handler_override ?? fallback, - fallback - ); - return limiterForMode(resolved, EventBus.global_handler_limiter, this.bus_handler_limiter); + options?.handler_concurrency && options.handler_concurrency !== 'auto' ? options.handler_concurrency : undefined + const fallback = this.handler_concurrency_default + const resolved = resolveConcurrencyMode(event_override ?? handler_override ?? fallback, fallback) + return limiterForMode(resolved, EventBus.global_handler_limiter, this.bus_handler_limiter) } - private async runHandlerEntry( - event: BaseEvent, - handler: EventHandler, - result: EventResult, - options?: HandlerOptions - ): Promise { - if (result.status === "error" && result.error instanceof EventHandlerCancelledError) { - return; + // Manually manages the handler concurrency limiter instead of using runWithLimiter, + // because _runImmediately may temporarily yield it during queue-jumping. If the handler + // times out while the limiter is yielded, runWithLimiter's unconditional release() would + // double-release (and _runImmediately's later re-acquire would leak). By tracking + // _held_handler_limiter, we only release if we still own the limiter. + private async runHandlerEntry(event: BaseEvent, handler: EventHandler, result: EventResult, options?: HandlerOptions): Promise { + if (result.status === 'error' && result.error instanceof EventHandlerCancelledError) { + return } - const handler_event = this._getBusScopedEvent(event, result); - const limiter = this.resolveHandlerLimiter(event, options); + const handler_event = this._getBusScopedEvent(event, result) + const limiter = this.resolveHandlerLimiter(event, options) - await runWithLimiter(limiter, async () => { - if (result.status === "error" && result.error instanceof EventHandlerCancelledError) { - return; - } + if (limiter) { + await limiter.acquire() + } - // Track which limiter this handler holds so _runImmediately can yield it - // (release before child processing, re-acquire after) to prevent deadlock. - result._held_handler_limiter = limiter; - this._event_result_stack.push(result); - try { - result.markStarted(); - const handler_result = await this.runHandlerWithTimeout(event, handler, handler_event); - if (event.event_result_schema) { - const parsed = event.event_result_schema.safeParse(handler_result); - if (parsed.success) { - result.markCompleted(parsed.data); - } else { - const error = new Error( - `handler result did not match event_result_schema: ${parsed.error.message}` - ); - result.markError(error); - event.markFailed(error); - } - } else { - result.markCompleted(handler_result); - } - } catch (error) { - if (error instanceof EventHandlerTimeoutError) { - result.markError(error); - event.markFailed(error); - const cancelled_error = new EventHandlerCancelledError( - `Cancelled pending handler due to parent timeout: ${error.message}`, - { - event_type: event.event_type, - handler_name: result.handler_name, - parent_error: error - } - ); - this.cancelPendingChildProcessing(event, cancelled_error); + if (result.status === 'error' && result.error instanceof EventHandlerCancelledError) { + if (limiter) limiter.release() + return + } + + // Track which limiter this handler holds so _runImmediately can yield it + // (release before child processing, re-acquire after) to prevent deadlock. + result._held_handler_limiter = limiter + this._event_result_stack.push(result) + try { + result.markStarted() + const handler_result = await this.runHandlerWithTimeout(event, handler, handler_event) + if (event.event_result_schema) { + const parsed = event.event_result_schema.safeParse(handler_result) + if (parsed.success) { + result.markCompleted(parsed.data) } else { - result.markError(error); - event.markFailed(error); - } - } finally { - result._held_handler_limiter = null; - const stack_idx = this._event_result_stack.indexOf(result); - if (stack_idx >= 0) { - this._event_result_stack.splice(stack_idx, 1); - } - if (result.queue_jump_hold) { - result.queue_jump_hold = false; - this.immediate_processing_stack_depth = Math.max( - 0, - this.immediate_processing_stack_depth - 1 - ); - this.releaseImmediateProcessingWaiters(); + const error = new Error(`handler result did not match event_result_schema: ${parsed.error.message}`) + result.markError(error) } + } else { + result.markCompleted(handler_result) } - }); + } catch (error) { + if (error instanceof EventHandlerTimeoutError) { + result.markError(error) + const cancelled_error = new EventHandlerCancelledError(`Cancelled pending handler due to parent timeout: ${error.message}`, { + event_type: event.event_type, + handler_name: result.handler_name, + parent_error: error, + }) + this.cancelPendingChildProcessing(event, cancelled_error) + } else { + result.markError(error) + } + } finally { + // If _runImmediately yielded our limiter (_held_handler_limiter is null), it was + // already released. Only release if we still own it (normal completion or no yield). + const handler_still_owns_limiter = result._held_handler_limiter !== null + result._held_handler_limiter = null + const stack_idx = this._event_result_stack.indexOf(result) + if (stack_idx >= 0) { + this._event_result_stack.splice(stack_idx, 1) + } + if (result.queue_jump_hold) { + result.queue_jump_hold = false + this.immediate_processing_stack_depth = Math.max(0, this.immediate_processing_stack_depth - 1) + this.releaseImmediateProcessingWaiters() + } + if (limiter && handler_still_owns_limiter) { + limiter.release() + } + } } - - - private async runHandlerWithTimeout( - event: BaseEvent, - handler: EventHandler, - handler_event: BaseEvent = event - ): Promise { - const handler_name = handler.name || "anonymous"; - const warn_ms = 15000; - const started_at_ms = Date.now(); - const should_warn = - event.event_timeout === null || event.event_timeout * 1000 > warn_ms; + private async runHandlerWithTimeout(event: BaseEvent, handler: EventHandler, handler_event: BaseEvent = event): Promise { + const handler_name = handler.name || 'anonymous' + const warn_ms = 15000 + const started_at_ms = Date.now() + const should_warn = event.event_timeout === null || event.event_timeout * 1000 > warn_ms const warn_timer = should_warn ? setTimeout(() => { - const elapsed_ms = Date.now() - started_at_ms; - const elapsed_seconds = (elapsed_ms / 1000).toFixed(1); - console.warn( - `[bubus] Slow handler: ${event.event_type}.${handler_name} running ${elapsed_seconds}s on ${this.name}` - ); + const elapsed_ms = Date.now() - started_at_ms + const elapsed_seconds = (elapsed_ms / 1000).toFixed(1) + console.warn(`[bubus] Slow handler: ${event.event_type}.${handler_name} running ${elapsed_seconds}s on ${this.name}`) }, warn_ms) - : null; + : null const clear_warn = () => { if (warn_timer) { - clearTimeout(warn_timer); + clearTimeout(warn_timer) } - }; + } const run_handler = () => - Promise.resolve().then(() => - runWithAsyncContext(event._dispatch_context ?? null, () => handler(handler_event)) - ); + Promise.resolve().then(() => runWithAsyncContext(event._dispatch_context ?? null, () => handler(handler_event))) if (event.event_timeout === null) { - return run_handler().finally(clear_warn); + return run_handler().finally(clear_warn) } - const timeout_seconds = event.event_timeout; - const timeout_ms = timeout_seconds * 1000; + const timeout_seconds = event.event_timeout + const timeout_ms = timeout_seconds * 1000 - const { promise, resolve, reject } = withResolvers(); - let settled = false; + const { promise, resolve, reject } = withResolvers() + let settled = false const finalize = (fn: (value?: unknown) => void) => { return (value?: unknown) => { if (settled) { - return; + return } - settled = true; - clearTimeout(timer); - clear_warn(); - fn(value); - }; - }; + settled = true + clearTimeout(timer) + clear_warn() + fn(value) + } + } const timer = setTimeout(() => { finalize(reject)( - new EventHandlerTimeoutError( - `handler ${handler_name} timed out after ${timeout_seconds}s`, - { - event_type: event.event_type, - handler_name, - timeout_seconds - } - ) - ); - }, timeout_ms); + new EventHandlerTimeoutError(`handler ${handler_name} timed out after ${timeout_seconds}s`, { + event_type: event.event_type, + handler_name, + timeout_seconds, + }) + ) + }, timeout_ms) - run_handler().then(finalize(resolve)).catch(finalize(reject)); + run_handler().then(finalize(resolve)).catch(finalize(reject)) - return promise; + return promise } private eventHasVisited(event: BaseEvent): boolean { - const results = Array.from(event.event_results.values()).filter( - (result) => result.eventbus_name === this.name - ); + const results = Array.from(event.event_results.values()).filter((result) => result.eventbus_name === this.name) if (results.length === 0) { - return false; + return false } - return results.every( - (result) => result.status === "completed" || result.status === "error" - ); + return results.every((result) => result.status === 'completed' || result.status === 'error') } private notifyParentsFor(event: BaseEvent): void { - const visited = new Set(); - let parent_id = event.event_parent_id; + const visited = new Set() + let parent_id = event.event_parent_id while (parent_id && !visited.has(parent_id)) { - visited.add(parent_id); - const parent = EventBus.findEventById(parent_id); + visited.add(parent_id) + const parent = EventBus.findEventById(parent_id) if (!parent) { - break; + break } - parent.tryFinalizeCompletion(); - if (parent.event_status !== "completed") { - break; + parent.tryFinalizeCompletion() + if (parent.event_status !== 'completed') { + break } - parent_id = parent.event_parent_id; + parent_id = parent.event_parent_id } } _getBusScopedEvent(event: T, handler_result?: EventResult): T { - const original_event = event._original_event ?? event; - const bus = this; - const parent_event_id = original_event.event_id; - const handler_id = handler_result?.handler_id; + const original_event = event._original_event ?? event + const bus = this + const parent_event_id = original_event.event_id + const handler_id = handler_result?.handler_id const bus_proxy = new Proxy(bus, { get(target, prop, receiver) { - if (prop === "_runImmediately") { + if (prop === '_runImmediately') { return (child_event: BaseEvent) => { - const runner = Reflect.get(target, prop, receiver) as ( - event: BaseEvent, - handler_result?: EventResult - ) => Promise; - return runner.call(target, child_event, handler_result); - }; + const runner = Reflect.get(target, prop, receiver) as (event: BaseEvent, handler_result?: EventResult) => Promise + return runner.call(target, child_event, handler_result) + } } - if (prop === "dispatch" || prop === "emit") { + if (prop === 'dispatch' || prop === 'emit') { return (child_event: BaseEvent, event_key?: EventKey) => { - const original_child = child_event._original_event ?? child_event; + const original_child = child_event._original_event ?? child_event if (!original_child.event_parent_id) { - original_child.event_parent_id = parent_event_id; + original_child.event_parent_id = parent_event_id } if (handler_id && !original_child.event_emitted_by_handler_id) { - original_child.event_emitted_by_handler_id = handler_id; + original_child.event_emitted_by_handler_id = handler_id } - const dispatcher = Reflect.get(target, prop, receiver) as ( - event: BaseEvent, - event_key?: EventKey - ) => BaseEvent; - const dispatched = dispatcher.call(target, original_child, event_key); - return target._getBusScopedEvent(dispatched, handler_result); - }; + const dispatcher = Reflect.get(target, prop, receiver) as (event: BaseEvent, event_key?: EventKey) => BaseEvent + const dispatched = dispatcher.call(target, original_child, event_key) + return target._getBusScopedEvent(dispatched, handler_result) + } } - return Reflect.get(target, prop, receiver); - } - }); + return Reflect.get(target, prop, receiver) + }, + }) const scoped = new Proxy(original_event, { get(target, prop, receiver) { - if (prop === "bus") { - return bus_proxy; + if (prop === 'bus') { + return bus_proxy } - if (prop === "_original_event") { - return target; + if (prop === '_original_event') { + return target } - return Reflect.get(target, prop, receiver); + return Reflect.get(target, prop, receiver) }, set(target, prop, value) { - if (prop === "bus") { - return true; + if (prop === 'bus') { + return true } - return Reflect.set(target, prop, value, target); + return Reflect.set(target, prop, value, target) }, has(target, prop) { - if (prop === "bus") { - return true; + if (prop === 'bus') { + return true } - if (prop === "_original_event") { - return true; + if (prop === '_original_event') { + return true } - return Reflect.has(target, prop); - } - }); + return Reflect.has(target, prop) + }, + }) - return scoped as T; + return scoped as T } - private cancelPendingChildProcessing( - event: BaseEvent, - error: EventHandlerCancelledError - ): void { - const visited = new Set(); + private cancelPendingChildProcessing(event: BaseEvent, error: EventHandlerCancelledError): void { + const visited = new Set() const cancel_child = (child: BaseEvent): void => { - const original_child = child._original_event ?? child; + const original_child = child._original_event ?? child if (visited.has(original_child.event_id)) { - return; + return } - visited.add(original_child.event_id); + visited.add(original_child.event_id) - const path = Array.isArray(original_child.event_path) - ? original_child.event_path - : []; - const buses_to_cancel = new Set(path); + const path = Array.isArray(original_child.event_path) ? original_child.event_path : [] + const buses_to_cancel = new Set(path) for (const bus of EventBus.instances) { if (!buses_to_cancel.has(bus.name)) { - continue; + continue } - bus.cancelEventOnBus(original_child, error); + bus.cancelEventOnBus(original_child, error) } for (const grandchild of original_child.event_children) { - cancel_child(grandchild); + cancel_child(grandchild) } - }; + } for (const child of event.event_children) { - cancel_child(child); + cancel_child(child) } } private cancelEventOnBus(event: BaseEvent, error: EventHandlerCancelledError): void { - const original_event = event._original_event ?? event; - const handler_entries = this.createPendingHandlerResults(original_event); - let updated = false; + const original_event = event._original_event ?? event + const handler_entries = this.createPendingHandlerResults(original_event) + let updated = false for (const entry of handler_entries) { - if (entry.result.status === "pending") { - entry.result.markError(error); - updated = true; + if (entry.result.status === 'pending') { + entry.result.markError(error) + updated = true } } - let removed = 0; + let removed = 0 if (this.pending_event_queue.length > 0) { - const before_len = this.pending_event_queue.length; + const before_len = this.pending_event_queue.length this.pending_event_queue = this.pending_event_queue.filter( (queued) => (queued._original_event ?? queued).event_id !== original_event.event_id - ); - removed = before_len - this.pending_event_queue.length; + ) + removed = before_len - this.pending_event_queue.length } if (removed > 0 && !this.in_flight_event_ids.has(original_event.event_id)) { - original_event.event_pending_buses = Math.max(0, original_event.event_pending_buses - 1); + original_event.event_pending_buses = Math.max(0, original_event.event_pending_buses - 1) } if (updated || removed > 0) { - original_event.tryFinalizeCompletion(); - if (original_event.event_status === "completed") { - this.notifyParentsFor(original_event); + original_event.tryFinalizeCompletion() + if (original_event.event_status === 'completed') { + this.notifyParentsFor(original_event) } } } @@ -1236,91 +1139,69 @@ export class EventBus { parent_to_children: Map, visited: Set ): string { - const connector = is_last ? "└── " : "β”œβ”€β”€ "; - const status_icon = - event.event_status === "completed" - ? "βœ…" - : event.event_status === "started" - ? "πŸƒ" - : "⏳"; - - const created_at = this.formatTimestamp(event.event_created_at); - let timing = `[${created_at}`; + const connector = is_last ? '└── ' : 'β”œβ”€β”€ ' + const status_icon = event.event_status === 'completed' ? 'βœ…' : event.event_status === 'started' ? 'πŸƒ' : '⏳' + + const created_at = this.formatTimestamp(event.event_created_at) + let timing = `[${created_at}` if (event.event_completed_at) { - const created_ms = Date.parse(event.event_created_at); - const completed_ms = Date.parse(event.event_completed_at); + const created_ms = Date.parse(event.event_created_at) + const completed_ms = Date.parse(event.event_completed_at) if (!Number.isNaN(created_ms) && !Number.isNaN(completed_ms)) { - const duration = (completed_ms - created_ms) / 1000; - timing += ` (${duration.toFixed(3)}s)`; + const duration = (completed_ms - created_ms) / 1000 + timing += ` (${duration.toFixed(3)}s)` } } - timing += "]"; + timing += ']' - const line = `${indent}${connector}${status_icon} ${event.event_type}#${event.event_id.slice(-4)} ${timing}`; + const line = `${indent}${connector}${status_icon} ${event.event_type}#${event.event_id.slice(-4)} ${timing}` if (visited.has(event.event_id)) { - return line; + return line } - visited.add(event.event_id); + visited.add(event.event_id) - const extension = is_last ? " " : "β”‚ "; - const new_indent = indent + extension; + const extension = is_last ? ' ' : 'β”‚ ' + const new_indent = indent + extension - const result_items: Array<{ type: "result"; result: EventResult } | { type: "child"; child: BaseEvent }> = - []; - const printed_child_ids = new Set(); + const result_items: Array<{ type: 'result'; result: EventResult } | { type: 'child'; child: BaseEvent }> = [] + const printed_child_ids = new Set() const results = Array.from(event.event_results.values()).sort((a, b) => { - const a_time = a.started_at ? Date.parse(a.started_at) : 0; - const b_time = b.started_at ? Date.parse(b.started_at) : 0; - return a_time - b_time; - }); + const a_time = a.started_at ? Date.parse(a.started_at) : 0 + const b_time = b.started_at ? Date.parse(b.started_at) : 0 + return a_time - b_time + }) results.forEach((result) => { - result_items.push({ type: "result", result }); + result_items.push({ type: 'result', result }) result.event_children.forEach((child) => { - printed_child_ids.add(child.event_id); - }); - }); + printed_child_ids.add(child.event_id) + }) + }) - const children = parent_to_children.get(event.event_id) ?? []; + const children = parent_to_children.get(event.event_id) ?? [] children.forEach((child) => { if (!printed_child_ids.has(child.event_id) && !child.event_emitted_by_handler_id) { - result_items.push({ type: "child", child }); + result_items.push({ type: 'child', child }) } - }); + }) if (result_items.length === 0) { - return line; + return line } - const child_lines: string[] = []; + const child_lines: string[] = [] result_items.forEach((item, index) => { - const is_last_item = index === result_items.length - 1; - if (item.type === "result") { - child_lines.push( - this.buildResultLine( - item.result, - new_indent, - is_last_item, - parent_to_children, - visited - ) - ); + const is_last_item = index === result_items.length - 1 + if (item.type === 'result') { + child_lines.push(this.buildResultLine(item.result, new_indent, is_last_item, parent_to_children, visited)) } else { - child_lines.push( - this.buildTreeLine( - item.child, - new_indent, - is_last_item, - parent_to_children, - visited - ) - ); + child_lines.push(this.buildTreeLine(item.child, new_indent, is_last_item, parent_to_children, visited)) } - }); + }) - return [line, ...child_lines].join("\n"); + return [line, ...child_lines].join('\n') } private buildResultLine( @@ -1330,177 +1211,152 @@ export class EventBus { parent_to_children: Map, visited: Set ): string { - const connector = is_last ? "└── " : "β”œβ”€β”€ "; - const status_icon = - result.status === "completed" - ? "βœ…" - : result.status === "error" - ? "❌" - : result.status === "started" - ? "πŸƒ" - : "⏳"; + const connector = is_last ? '└── ' : 'β”œβ”€β”€ ' + const status_icon = result.status === 'completed' ? 'βœ…' : result.status === 'error' ? '❌' : result.status === 'started' ? 'πŸƒ' : '⏳' const handler_label = - result.handler_name && result.handler_name !== "anonymous" + result.handler_name && result.handler_name !== 'anonymous' ? result.handler_name : result.handler_file_path ? result.handler_file_path - : "anonymous"; - const handler_display = `${result.eventbus_name}.${handler_label}#${result.handler_id.slice(-4)}`; - let line = `${indent}${connector}${status_icon} ${handler_display}`; + : 'anonymous' + const handler_display = `${result.eventbus_name}.${handler_label}#${result.handler_id.slice(-4)}` + let line = `${indent}${connector}${status_icon} ${handler_display}` if (result.started_at) { - line += ` [${this.formatTimestamp(result.started_at)}`; + line += ` [${this.formatTimestamp(result.started_at)}` if (result.completed_at) { - const started_ms = Date.parse(result.started_at); - const completed_ms = Date.parse(result.completed_at); + const started_ms = Date.parse(result.started_at) + const completed_ms = Date.parse(result.completed_at) if (!Number.isNaN(started_ms) && !Number.isNaN(completed_ms)) { - const duration = (completed_ms - started_ms) / 1000; - line += ` (${duration.toFixed(3)}s)`; + const duration = (completed_ms - started_ms) / 1000 + line += ` (${duration.toFixed(3)}s)` } } - line += "]"; + line += ']' } - if (result.status === "error" && result.error) { + if (result.status === 'error' && result.error) { if (result.error instanceof EventHandlerTimeoutError) { - line += ` ⏱️ Timeout: ${result.error.message}`; + line += ` ⏱️ Timeout: ${result.error.message}` } else if (result.error instanceof EventHandlerCancelledError) { - line += ` 🚫 Cancelled: ${result.error.message}`; + line += ` 🚫 Cancelled: ${result.error.message}` } else { - const error_name = result.error instanceof Error ? result.error.name : "Error"; - const error_message = result.error instanceof Error ? result.error.message : String(result.error); - line += ` ☠️ ${error_name}: ${error_message}`; + const error_name = result.error instanceof Error ? result.error.name : 'Error' + const error_message = result.error instanceof Error ? result.error.message : String(result.error) + line += ` ☠️ ${error_name}: ${error_message}` } - } else if (result.status === "completed") { - line += ` β†’ ${this.formatResultValue(result.result)}`; + } else if (result.status === 'completed') { + line += ` β†’ ${this.formatResultValue(result.result)}` } - const extension = is_last ? " " : "β”‚ "; - const new_indent = indent + extension; + const extension = is_last ? ' ' : 'β”‚ ' + const new_indent = indent + extension if (result.event_children.length === 0) { - return line; + return line } - const child_lines: string[] = []; - const direct_children = result.event_children; - const parent_children = parent_to_children.get(result.event_id) ?? []; - const emitted_children = parent_children.filter( - (child) => child.event_emitted_by_handler_id === result.handler_id - ); - const children_by_id = new Map(); + const child_lines: string[] = [] + const direct_children = result.event_children + const parent_children = parent_to_children.get(result.event_id) ?? [] + const emitted_children = parent_children.filter((child) => child.event_emitted_by_handler_id === result.handler_id) + const children_by_id = new Map() direct_children.forEach((child) => { - children_by_id.set(child.event_id, child); - }); + children_by_id.set(child.event_id, child) + }) emitted_children.forEach((child) => { if (!children_by_id.has(child.event_id)) { - children_by_id.set(child.event_id, child); + children_by_id.set(child.event_id, child) } - }); - const children_to_print = Array.from(children_by_id.values()).filter( - (child) => !visited.has(child.event_id) - ); + }) + const children_to_print = Array.from(children_by_id.values()).filter((child) => !visited.has(child.event_id)) children_to_print.forEach((child, index) => { - child_lines.push( - this.buildTreeLine( - child, - new_indent, - index === children_to_print.length - 1, - parent_to_children, - visited - ) - ); - }); - - return [line, ...child_lines].join("\n"); + child_lines.push(this.buildTreeLine(child, new_indent, index === children_to_print.length - 1, parent_to_children, visited)) + }) + + return [line, ...child_lines].join('\n') } private formatTimestamp(value?: string): string { if (!value) { - return "N/A"; + return 'N/A' } - const date = new Date(value); + const date = new Date(value) if (Number.isNaN(date.getTime())) { - return "N/A"; + return 'N/A' } - return date.toISOString().slice(11, 23); + return date.toISOString().slice(11, 23) } private inferHandlerFilePath(): string | null { - const stack = new Error().stack; + const stack = new Error().stack if (!stack) { - return null; + return null } - const lines = stack.split("\n").map((line) => line.trim()); + const lines = stack.split('\n').map((line) => line.trim()) for (const line of lines) { - if (!line || line.startsWith("Error")) { - continue; + if (!line || line.startsWith('Error')) { + continue } - if ( - line.includes("event_bus.ts") || - line.includes("node:internal") || - line.includes("/node_modules/") - ) { - continue; + if (line.includes('event_bus.ts') || line.includes('node:internal') || line.includes('/node_modules/')) { + continue } - const match = line.match(/\(?(.+?:\d+:\d+)\)?$/); + const match = line.match(/\(?(.+?:\d+:\d+)\)?$/) if (match && match[1]) { - return match[1]; + return match[1] } } - return null; + return null } private formatResultValue(value: unknown): string { if (value === null || value === undefined) { - return "None"; + return 'None' } if (value instanceof BaseEvent) { - return `Event(${value.event_type}#${value.event_id.slice(-4)})`; + return `Event(${value.event_type}#${value.event_id.slice(-4)})` } - if (typeof value === "string") { - return JSON.stringify(value); + if (typeof value === 'string') { + return JSON.stringify(value) } - if (typeof value === "number" || typeof value === "boolean") { - return String(value); + if (typeof value === 'number' || typeof value === 'boolean') { + return String(value) } if (Array.isArray(value)) { - return `list(${value.length} items)`; + return `list(${value.length} items)` } - if (typeof value === "object") { - return `dict(${Object.keys(value as Record).length} items)`; + if (typeof value === 'object') { + return `dict(${Object.keys(value as Record).length} items)` } - return `${typeof value}(...)`; + return `${typeof value}(...)` } private notifyFinders(event: BaseEvent): void { for (const waiter of Array.from(this.find_waiters)) { if (!this.eventMatchesKey(event, waiter.event_key)) { - continue; + continue } if (!waiter.matches(event)) { - continue; + continue } if (waiter.timeout_id) { - clearTimeout(waiter.timeout_id); + clearTimeout(waiter.timeout_id) } - this.find_waiters.delete(waiter); - waiter.resolve(event); + this.find_waiters.delete(waiter) + waiter.resolve(event) } } - private createPendingHandlerResults( - event: BaseEvent - ): Array<{ - handler: EventHandler; - result: EventResult; - options?: HandlerOptions; + private createPendingHandlerResults(event: BaseEvent): Array<{ + handler: EventHandler + result: EventResult + options?: HandlerOptions }> { - const handlers = this.collectHandlers(event); + const handlers = this.collectHandlers(event) return handlers.map(({ handler_id, handler, handler_name, handler_file_path, options }) => { - const existing = event.event_results.get(handler_id); + const existing = event.event_results.get(handler_id) const result = existing ?? new EventResult({ @@ -1508,105 +1364,114 @@ export class EventBus { handler_id, handler_name, handler_file_path, - eventbus_name: this.name - }); + eventbus_name: this.name, + }) if (!existing) { - event.event_results.set(handler_id, result); + event.event_results.set(handler_id, result) } - return { handler, result, options }; - }); + return { handler, result, options } + }) } - private collectHandlers( - event: BaseEvent - ): Array<{ - handler_id: string; - handler: EventHandler; - handler_name: string; - handler_file_path?: string; - options?: HandlerOptions; + private collectHandlers(event: BaseEvent): Array<{ + handler_id: string + handler: EventHandler + handler_name: string + handler_file_path?: string + options?: HandlerOptions }> { const handlers: Array<{ - handler_id: string; - handler: EventHandler; - handler_name: string; - handler_file_path?: string; - options?: HandlerOptions; - }> = []; - + handler_id: string + handler: EventHandler + handler_name: string + handler_file_path?: string + options?: HandlerOptions + }> = [] + + // Exact-match handlers first, then wildcard β€” preserves original ordering + for (const [handler_id, entry] of this.handlers) { + if (entry.event_key === event.event_type) { + handlers.push({ + handler_id, + handler: entry.handler, + handler_name: entry.handler_name, + handler_file_path: entry.handler_file_path, + options: entry.options, + }) + } + } for (const [handler_id, entry] of this.handlers) { - if (entry.event_key !== event.event_type && entry.event_key !== "*") { - continue; + if (entry.event_key === '*') { + handlers.push({ + handler_id, + handler: entry.handler, + handler_name: entry.handler_name, + handler_file_path: entry.handler_file_path, + options: entry.options, + }) } - handlers.push({ - handler_id, - handler: entry.handler, - handler_name: entry.handler_name, - handler_file_path: entry.handler_file_path, - options: entry.options - }); } - return handlers; + return handlers } private eventMatchesKey(event: BaseEvent, event_key: EventKey): boolean { - if (event_key === "*") { - return true; + if (event_key === '*') { + return true } - const normalized = this.normalizeEventKey(event_key); - if (normalized === "*") { - return true; + const normalized = this.normalizeEventKey(event_key) + if (normalized === '*') { + return true } - return event.event_type === normalized; + return event.event_type === normalized } - private normalizeEventKey(event_key: EventKey | "*"): string | "*" { - if (event_key === "*") { - return "*"; + private normalizeEventKey(event_key: EventKey | '*'): string | '*' { + if (event_key === '*') { + return '*' } - if (typeof event_key === "string") { - return event_key; + if (typeof event_key === 'string') { + return event_key } - const event_type = (event_key as { event_type?: unknown }).event_type; - if (typeof event_type === "string" && event_type.length > 0 && event_type !== "BaseEvent") { - return event_type; + const event_type = (event_key as { event_type?: unknown }).event_type + if (typeof event_type === 'string' && event_type.length > 0 && event_type !== 'BaseEvent') { + return event_type } - throw new Error( - "event_key must be a string or an event class with a static event_type (not BaseEvent)" - ); + throw new Error('event_key must be a string or an event class with a static event_type (not BaseEvent)') } private trimHistory(): void { if (this.max_history_size === null) { - return; + return } if (this.event_history.size <= this.max_history_size) { - return; + return } - let remaining_overage = this.event_history.size - this.max_history_size; + let remaining_overage = this.event_history.size - this.max_history_size // First pass: remove completed events (oldest first, Map iterates in insertion order) for (const [event_id, event] of this.event_history) { if (remaining_overage <= 0) { - break; + break } - if (event.event_status !== "completed") { - continue; + if (event.event_status !== 'completed') { + continue } - this.event_history.delete(event_id); - remaining_overage -= 1; + this.event_history.delete(event_id) + event._gc() + remaining_overage -= 1 } // Second pass: force-remove oldest events regardless of status if (remaining_overage > 0) { - for (const event_id of this.event_history.keys()) { + for (const [event_id, event] of this.event_history) { if (remaining_overage <= 0) { - break; + break } - this.event_history.delete(event_id); - remaining_overage -= 1; + this.event_history.delete(event_id) + event._gc() + remaining_overage -= 1 } } } diff --git a/bubus-ts/src/event_result.ts b/bubus-ts/src/event_result.ts index d62e213..d669a22 100644 --- a/bubus-ts/src/event_result.ts +++ b/bubus-ts/src/event_result.ts @@ -1,23 +1,23 @@ -import { v7 as uuidv7 } from "uuid"; +import { v7 as uuidv7 } from 'uuid' -import type { BaseEvent } from "./base_event.js"; -import type { AsyncLimiter } from "./semaphores.js"; +import type { BaseEvent } from './base_event.js' +import type { AsyncLimiter } from './semaphores.js' -export type EventResultStatus = "pending" | "started" | "completed" | "error"; +export type EventResultStatus = 'pending' | 'started' | 'completed' | 'error' export class EventResult { - id: string; - status: EventResultStatus; - event_id: string; - handler_id: string; - handler_name: string; - handler_file_path?: string; - eventbus_name: string; - started_at?: string; - completed_at?: string; - result?: unknown; - error?: unknown; - event_children: BaseEvent[]; + id: string + status: EventResultStatus + event_id: string + handler_id: string + handler_name: string + handler_file_path?: string + eventbus_name: string + started_at?: string + completed_at?: string + result?: unknown + error?: unknown + event_children: BaseEvent[] // Tracks whether this handler's execution has triggered a queue-jump via done(). // // Lifecycle: @@ -34,46 +34,40 @@ export class EventResult { // finishes β€” without this hold, the runloop would resume prematurely // while the handler is still executing after `await child.done()`. // 4. Reset to `false` in the same finally block after decrementing. - queue_jump_hold: boolean; + queue_jump_hold: boolean // The handler concurrency limiter currently held by this handler execution. // Set by runHandlerEntry so that _runImmediately can temporarily release it // (yield-and-reacquire) to let child event handlers use the same limiter // without deadlocking. - _held_handler_limiter: AsyncLimiter | null; + _held_handler_limiter: AsyncLimiter | null - constructor(params: { - event_id: string; - handler_id: string; - handler_name: string; - handler_file_path?: string; - eventbus_name: string; - }) { - this.id = uuidv7(); - this.status = "pending"; - this.event_id = params.event_id; - this.handler_id = params.handler_id; - this.handler_name = params.handler_name; - this.handler_file_path = params.handler_file_path; - this.eventbus_name = params.eventbus_name; - this.event_children = []; - this.queue_jump_hold = false; - this._held_handler_limiter = null; + constructor(params: { event_id: string; handler_id: string; handler_name: string; handler_file_path?: string; eventbus_name: string }) { + this.id = uuidv7() + this.status = 'pending' + this.event_id = params.event_id + this.handler_id = params.handler_id + this.handler_name = params.handler_name + this.handler_file_path = params.handler_file_path + this.eventbus_name = params.eventbus_name + this.event_children = [] + this.queue_jump_hold = false + this._held_handler_limiter = null } markStarted(): void { - this.status = "started"; - this.started_at = new Date().toISOString(); + this.status = 'started' + this.started_at = new Date().toISOString() } markCompleted(result: unknown): void { - this.status = "completed"; - this.result = result; - this.completed_at = new Date().toISOString(); + this.status = 'completed' + this.result = result + this.completed_at = new Date().toISOString() } markError(error: unknown): void { - this.status = "error"; - this.error = error; - this.completed_at = new Date().toISOString(); + this.status = 'error' + this.error = error + this.completed_at = new Date().toISOString() } } diff --git a/bubus-ts/src/index.ts b/bubus-ts/src/index.ts index b494ed0..ea0071d 100644 --- a/bubus-ts/src/index.ts +++ b/bubus-ts/src/index.ts @@ -1,13 +1,5 @@ -export { BaseEvent, BaseEventSchema } from "./base_event.js"; -export { EventResult } from "./event_result.js"; -export { EventBus, EventHandlerTimeoutError, EventHandlerCancelledError } from "./event_bus.js"; -export type { ConcurrencyMode } from "./semaphores.js"; -export type { - EventClass, - EventHandler, - EventKey, - HandlerOptions, - EventStatus, - FindOptions, - FindWindow -} from "./types.js"; +export { BaseEvent, BaseEventSchema } from './base_event.js' +export { EventResult } from './event_result.js' +export { EventBus, EventHandlerTimeoutError, EventHandlerCancelledError } from './event_bus.js' +export type { ConcurrencyMode } from './semaphores.js' +export type { EventClass, EventHandler, EventKey, HandlerOptions, EventStatus, FindOptions, FindWindow } from './types.js' diff --git a/bubus-ts/src/semaphores.ts b/bubus-ts/src/semaphores.ts index eb90805..3693389 100644 --- a/bubus-ts/src/semaphores.ts +++ b/bubus-ts/src/semaphores.ts @@ -1,101 +1,91 @@ export type Deferred = { - promise: Promise; - resolve: (value: T | PromiseLike) => void; - reject: (reason?: unknown) => void; -}; + promise: Promise + resolve: (value: T | PromiseLike) => void + reject: (reason?: unknown) => void +} export const withResolvers = (): Deferred => { - if (typeof Promise.withResolvers === "function") { - return Promise.withResolvers(); + if (typeof Promise.withResolvers === 'function') { + return Promise.withResolvers() } - let resolve!: (value: T | PromiseLike) => void; - let reject!: (reason?: unknown) => void; + let resolve!: (value: T | PromiseLike) => void + let reject!: (reason?: unknown) => void const promise = new Promise((resolve_fn, reject_fn) => { - resolve = resolve_fn; - reject = reject_fn; - }); - return { promise, resolve, reject }; -}; + resolve = resolve_fn + reject = reject_fn + }) + return { promise, resolve, reject } +} -export const CONCURRENCY_MODES = ["global-serial", "bus-serial", "parallel", "auto"] as const; -export type ConcurrencyMode = (typeof CONCURRENCY_MODES)[number]; +export const CONCURRENCY_MODES = ['global-serial', 'bus-serial', 'parallel', 'auto'] as const +export type ConcurrencyMode = (typeof CONCURRENCY_MODES)[number] export class AsyncLimiter { - size: number; - in_use: number; - waiters: Array<() => void>; + size: number + in_use: number + waiters: Array<() => void> constructor(size: number) { - this.size = size; - this.in_use = 0; - this.waiters = []; + this.size = size + this.in_use = 0 + this.waiters = [] } async acquire(): Promise { if (this.size === Infinity) { - return; + return } if (this.in_use < this.size) { - this.in_use += 1; - return; + this.in_use += 1 + return } await new Promise((resolve) => { - this.waiters.push(resolve); - }); - this.in_use += 1; + this.waiters.push(resolve) + }) + this.in_use += 1 } release(): void { if (this.size === Infinity) { - return; + return } - this.in_use = Math.max(0, this.in_use - 1); - const next = this.waiters.shift(); + this.in_use = Math.max(0, this.in_use - 1) + const next = this.waiters.shift() if (next) { - next(); + next() } } } -export const resolveConcurrencyMode = ( - mode: ConcurrencyMode | undefined, - fallback: ConcurrencyMode -): ConcurrencyMode => { - const normalized_fallback = fallback === "auto" ? "bus-serial" : fallback; - if (!mode || mode === "auto") { - return normalized_fallback; +export const resolveConcurrencyMode = (mode: ConcurrencyMode | undefined, fallback: ConcurrencyMode): ConcurrencyMode => { + const normalized_fallback = fallback === 'auto' ? 'bus-serial' : fallback + if (!mode || mode === 'auto') { + return normalized_fallback } - return mode; -}; + return mode +} -export const limiterForMode = ( - mode: ConcurrencyMode, - global_limiter: AsyncLimiter, - bus_limiter: AsyncLimiter -): AsyncLimiter | null => { - if (mode === "parallel") { - return null; +export const limiterForMode = (mode: ConcurrencyMode, global_limiter: AsyncLimiter, bus_limiter: AsyncLimiter): AsyncLimiter | null => { + if (mode === 'parallel') { + return null } - if (mode === "global-serial") { - return global_limiter; + if (mode === 'global-serial') { + return global_limiter } - if (mode === "bus-serial") { - return bus_limiter; + if (mode === 'bus-serial') { + return bus_limiter } - return bus_limiter; -}; + return bus_limiter +} -export const runWithLimiter = async ( - limiter: AsyncLimiter | null, - fn: () => Promise -): Promise => { +export const runWithLimiter = async (limiter: AsyncLimiter | null, fn: () => Promise): Promise => { if (!limiter) { - return await fn(); + return await fn() } - await limiter.acquire(); + await limiter.acquire() try { - return await fn(); + return await fn() } finally { - limiter.release(); + limiter.release() } -}; +} diff --git a/bubus-ts/src/types.ts b/bubus-ts/src/types.ts index ab675a3..5f44cdf 100644 --- a/bubus-ts/src/types.ts +++ b/bubus-ts/src/types.ts @@ -1,24 +1,22 @@ -import type { BaseEvent } from "./base_event.js"; -import type { ConcurrencyMode } from "./semaphores.js"; +import type { BaseEvent } from './base_event.js' +import type { ConcurrencyMode } from './semaphores.js' -export type EventStatus = "pending" | "started" | "completed"; +export type EventStatus = 'pending' | 'started' | 'completed' -export type EventClass = { event_type?: string } & (new ( - ...args: any[] -) => T); +export type EventClass = { event_type?: string } & (new (...args: any[]) => T) -export type EventKey = string | EventClass; +export type EventKey = string | EventClass -export type EventHandler = (event: T) => void | Promise; +export type EventHandler = (event: T) => void | Promise export type HandlerOptions = { - handler_concurrency?: ConcurrencyMode; -}; + handler_concurrency?: ConcurrencyMode +} -export type FindWindow = boolean | number; +export type FindWindow = boolean | number export type FindOptions = { - past?: FindWindow; - future?: FindWindow; - child_of?: BaseEvent | null; -}; + past?: FindWindow + future?: FindWindow + child_of?: BaseEvent | null +} diff --git a/bubus-ts/tests/_perf_profile.ts b/bubus-ts/tests/_perf_profile.ts new file mode 100644 index 0000000..6307e41 --- /dev/null +++ b/bubus-ts/tests/_perf_profile.ts @@ -0,0 +1,52 @@ +import { BaseEvent, EventBus } from '../src/index.js' + +const SimpleEvent = BaseEvent.extend('SimpleEvent', {}) + +const total_events = 200_000 +const bus = new EventBus('PerfBus', { max_history_size: 1000 }) + +let processed_count = 0 +bus.on(SimpleEvent, () => { + processed_count += 1 +}) + +// Baseline memory +global.gc?.() +const mem_before = process.memoryUsage() +console.log(`Memory before: RSS=${(mem_before.rss / 1024 / 1024).toFixed(1)}MB, Heap=${(mem_before.heapUsed / 1024 / 1024).toFixed(1)}MB`) + +// Phase 1: Dispatch all events (measure dispatch throughput) +const t0 = performance.now() +const pending: Array> = [] +for (let i = 0; i < total_events; i++) { + pending.push(bus.dispatch(SimpleEvent({}))) +} +const t1 = performance.now() +console.log(`Dispatch ${total_events} events: ${(t1 - t0).toFixed(0)}ms (${(total_events / ((t1 - t0) / 1000)).toFixed(0)} events/s)`) + +const mem_after_dispatch = process.memoryUsage() +console.log( + `Memory after dispatch: RSS=${(mem_after_dispatch.rss / 1024 / 1024).toFixed(1)}MB, Heap=${(mem_after_dispatch.heapUsed / 1024 / 1024).toFixed(1)}MB` +) + +// Phase 2: Wait for all to complete +const t2 = performance.now() +await Promise.all(pending.map((e) => e.done())) +await bus.waitUntilIdle() +const t3 = performance.now() +console.log(`Await completion: ${(t3 - t2).toFixed(0)}ms`) +console.log(`Total: ${(t3 - t0).toFixed(0)}ms (${(total_events / ((t3 - t0) / 1000)).toFixed(0)} events/s)`) + +const mem_after = process.memoryUsage() +console.log( + `Memory after complete: RSS=${(mem_after.rss / 1024 / 1024).toFixed(1)}MB, Heap=${(mem_after.heapUsed / 1024 / 1024).toFixed(1)}MB` +) + +global.gc?.() +const mem_gc = process.memoryUsage() +console.log(`Memory after GC: RSS=${(mem_gc.rss / 1024 / 1024).toFixed(1)}MB, Heap=${(mem_gc.heapUsed / 1024 / 1024).toFixed(1)}MB`) + +console.log(`\nProcessed: ${processed_count}/${total_events}`) +console.log(`History size: ${bus.event_history.size} (max: ${bus.max_history_size})`) +console.log(`Heap delta (before GC): +${((mem_after.heapUsed - mem_before.heapUsed) / 1024 / 1024).toFixed(1)}MB`) +console.log(`Heap delta (after GC): +${((mem_gc.heapUsed - mem_before.heapUsed) / 1024 / 1024).toFixed(1)}MB`) diff --git a/bubus-ts/tests/comprehensive_patterns.test.ts b/bubus-ts/tests/comprehensive_patterns.test.ts index 3f36e74..5c5f2d2 100644 --- a/bubus-ts/tests/comprehensive_patterns.test.ts +++ b/bubus-ts/tests/comprehensive_patterns.test.ts @@ -1,772 +1,730 @@ -import assert from "node:assert/strict"; -import { test } from "node:test"; +import assert from 'node:assert/strict' +import { test } from 'node:test' -import { BaseEvent, EventBus } from "../src/index.js"; +import { BaseEvent, EventBus } from '../src/index.js' -const ParentEvent = BaseEvent.extend("ParentEvent", {}); -const ImmediateChildEvent = BaseEvent.extend("ImmediateChildEvent", {}); -const QueuedChildEvent = BaseEvent.extend("QueuedChildEvent", {}); +const ParentEvent = BaseEvent.extend('ParentEvent', {}) +const ImmediateChildEvent = BaseEvent.extend('ImmediateChildEvent', {}) +const QueuedChildEvent = BaseEvent.extend('QueuedChildEvent', {}) const delay = (ms: number): Promise => new Promise((resolve) => { - setTimeout(resolve, ms); - }); + setTimeout(resolve, ms) + }) -test("comprehensive patterns: forwarding, async/sync dispatch, parent tracking", async () => { - const bus_1 = new EventBus("bus1"); - const bus_2 = new EventBus("bus2"); +test('comprehensive patterns: forwarding, async/sync dispatch, parent tracking', async () => { + const bus_1 = new EventBus('bus1') + const bus_2 = new EventBus('bus2') - const results: Array<[number, string]> = []; - const execution_counter = { count: 0 }; + const results: Array<[number, string]> = [] + const execution_counter = { count: 0 } const child_bus2_event_handler = (event: BaseEvent): string => { - execution_counter.count += 1; - const seq = execution_counter.count; - const event_type_short = event.event_type.replace(/Event$/, ""); - results.push([seq, `bus2_handler_${event_type_short}`]); - return "forwarded bus result"; - }; + execution_counter.count += 1 + const seq = execution_counter.count + const event_type_short = event.event_type.replace(/Event$/, '') + results.push([seq, `bus2_handler_${event_type_short}`]) + return 'forwarded bus result' + } - bus_2.on("*", child_bus2_event_handler); - bus_1.on("*", bus_2.dispatch); + bus_2.on('*', child_bus2_event_handler) + bus_1.on('*', bus_2.dispatch) const parent_bus1_handler = async (event: BaseEvent): Promise => { - execution_counter.count += 1; - const seq = execution_counter.count; - results.push([seq, "parent_start"]); + execution_counter.count += 1 + const seq = execution_counter.count + results.push([seq, 'parent_start']) - const child_event_async = event.bus?.emit(QueuedChildEvent({}))!; - assert.notEqual(child_event_async.event_status, "completed"); + const child_event_async = event.bus?.emit(QueuedChildEvent({}))! + assert.notEqual(child_event_async.event_status, 'completed') - const child_event_sync = await event.bus?.emit(ImmediateChildEvent({})).done()!; - assert.equal(child_event_sync.event_status, "completed"); + const child_event_sync = await event.bus?.emit(ImmediateChildEvent({})).done()! + assert.equal(child_event_sync.event_status, 'completed') - assert.ok(child_event_sync.event_path.includes("bus2")); - assert.ok( - Array.from(child_event_sync.event_results.values()).some((result) => - result.handler_name.includes("dispatch") - ) - ); + assert.ok(child_event_sync.event_path.includes('bus2')) + assert.ok(Array.from(child_event_sync.event_results.values()).some((result) => result.handler_name.includes('dispatch'))) - assert.equal(child_event_async.event_parent_id, event.event_id); - assert.equal(child_event_sync.event_parent_id, event.event_id); + assert.equal(child_event_async.event_parent_id, event.event_id) + assert.equal(child_event_sync.event_parent_id, event.event_id) - execution_counter.count += 1; - const end_seq = execution_counter.count; - results.push([end_seq, "parent_end"]); - return "parent_done"; - }; + execution_counter.count += 1 + const end_seq = execution_counter.count + results.push([end_seq, 'parent_end']) + return 'parent_done' + } - bus_1.on(ParentEvent, parent_bus1_handler); + bus_1.on(ParentEvent, parent_bus1_handler) - const parent_event = bus_1.dispatch(ParentEvent({})); - await parent_event.done(); - await bus_1.waitUntilIdle(); - await bus_2.waitUntilIdle(); + const parent_event = bus_1.dispatch(ParentEvent({})) + await parent_event.done() + await bus_1.waitUntilIdle() + await bus_2.waitUntilIdle() const event_children = Array.from(bus_1.event_history.values()).filter( - (event) => - event.event_type === "ImmediateChildEvent" || event.event_type === "QueuedChildEvent" - ); - assert.ok(event_children.length > 0); - assert.ok( - event_children.every((event) => event.event_parent_id === parent_event.event_id) - ); + (event) => event.event_type === 'ImmediateChildEvent' || event.event_type === 'QueuedChildEvent' + ) + assert.ok(event_children.length > 0) + assert.ok(event_children.every((event) => event.event_parent_id === parent_event.event_id)) - const sorted_results = results.slice().sort((a, b) => a[0] - b[0]); - const execution_order = sorted_results.map((item) => item[1]); + const sorted_results = results.slice().sort((a, b) => a[0] - b[0]) + const execution_order = sorted_results.map((item) => item[1]) - assert.equal(execution_order[0], "parent_start"); - assert.ok(execution_order.includes("bus2_handler_ImmediateChild")); + assert.equal(execution_order[0], 'parent_start') + assert.ok(execution_order.includes('bus2_handler_ImmediateChild')) - if (execution_order.includes("parent_end")) { - const parent_end_idx = execution_order.indexOf("parent_end"); - assert.ok(parent_end_idx > 1); + if (execution_order.includes('parent_end')) { + const parent_end_idx = execution_order.indexOf('parent_end') + assert.ok(parent_end_idx > 1) } - assert.equal( - execution_order.filter((value) => value === "bus2_handler_ImmediateChild").length, - 1 - ); - assert.equal( - execution_order.filter((value) => value === "bus2_handler_QueuedChild").length, - 1 - ); - assert.equal( - execution_order.filter((value) => value === "bus2_handler_Parent").length, - 1 - ); -}); + assert.equal(execution_order.filter((value) => value === 'bus2_handler_ImmediateChild').length, 1) + assert.equal(execution_order.filter((value) => value === 'bus2_handler_QueuedChild').length, 1) + assert.equal(execution_order.filter((value) => value === 'bus2_handler_Parent').length, 1) +}) -test("race condition stress", async () => { - const bus_1 = new EventBus("bus1"); - const bus_2 = new EventBus("bus2"); - const RootEvent = BaseEvent.extend("RootEvent", {}); +test('race condition stress', async () => { + const bus_1 = new EventBus('bus1') + const bus_2 = new EventBus('bus2') + const RootEvent = BaseEvent.extend('RootEvent', {}) - const results: string[] = []; + const results: string[] = [] const child_handler = async (event: BaseEvent): Promise => { - const bus_name = event.event_path[event.event_path.length - 1] ?? "unknown"; - results.push(`child_${bus_name}`); - await delay(1); - return `child_done_${bus_name}`; - }; + const bus_name = event.event_path[event.event_path.length - 1] ?? 'unknown' + results.push(`child_${bus_name}`) + await delay(1) + return `child_done_${bus_name}` + } const parent_handler = async (event: BaseEvent): Promise => { - const children: BaseEvent[] = []; + const children: BaseEvent[] = [] for (let i = 0; i < 3; i += 1) { - children.push(event.bus?.emit(QueuedChildEvent({}))!); + children.push(event.bus?.emit(QueuedChildEvent({}))!) } for (let i = 0; i < 3; i += 1) { - const child = await event.bus?.emit(ImmediateChildEvent({})).done()!; - assert.equal(child.event_status, "completed"); - children.push(child); + const child = await event.bus?.emit(ImmediateChildEvent({})).done()! + assert.equal(child.event_status, 'completed') + children.push(child) } - assert.ok(children.every((child) => child.event_parent_id === event.event_id)); - return "parent_done"; - }; + assert.ok(children.every((child) => child.event_parent_id === event.event_id)) + return 'parent_done' + } - const bad_handler = (_bad: BaseEvent): void => {}; + const bad_handler = (_bad: BaseEvent): void => {} - bus_1.on("*", bus_2.dispatch); - bus_1.on(QueuedChildEvent, child_handler); - bus_1.on(ImmediateChildEvent, child_handler); - bus_2.on(QueuedChildEvent, child_handler); - bus_2.on(ImmediateChildEvent, child_handler); - bus_1.on(RootEvent, parent_handler); - bus_1.on(RootEvent, bad_handler); + bus_1.on('*', bus_2.dispatch) + bus_1.on(QueuedChildEvent, child_handler) + bus_1.on(ImmediateChildEvent, child_handler) + bus_2.on(QueuedChildEvent, child_handler) + bus_2.on(ImmediateChildEvent, child_handler) + bus_1.on(RootEvent, parent_handler) + bus_1.on(RootEvent, bad_handler) for (let run = 0; run < 5; run += 1) { - results.length = 0; + results.length = 0 - const event = bus_1.dispatch(RootEvent({})); - await event.done(); - await bus_1.waitUntilIdle(); - await bus_2.waitUntilIdle(); + const event = bus_1.dispatch(RootEvent({})) + await event.done() + await bus_1.waitUntilIdle() + await bus_2.waitUntilIdle() assert.equal( - results.filter((value) => value === "child_bus1").length, + results.filter((value) => value === 'child_bus1').length, 6, - `Run ${run}: Expected 6 child_bus1, got ${results.filter((value) => value === "child_bus1").length}` - ); + `Run ${run}: Expected 6 child_bus1, got ${results.filter((value) => value === 'child_bus1').length}` + ) assert.equal( - results.filter((value) => value === "child_bus2").length, + results.filter((value) => value === 'child_bus2').length, 6, - `Run ${run}: Expected 6 child_bus2, got ${results.filter((value) => value === "child_bus2").length}` - ); + `Run ${run}: Expected 6 child_bus2, got ${results.filter((value) => value === 'child_bus2').length}` + ) } -}); +}) -test("awaited child jumps queue without overshoot", async () => { - const bus = new EventBus("TestBus", { max_history_size: 100 }); - const execution_order: string[] = []; - const debug_order: Array<{ label: string; at: string }> = []; +test('awaited child jumps queue without overshoot', async () => { + const bus = new EventBus('TestBus', { max_history_size: 100 }) + const execution_order: string[] = [] + const debug_order: Array<{ label: string; at: string }> = [] - const Event1 = BaseEvent.extend("Event1", {}); - const Event2 = BaseEvent.extend("Event2", {}); - const Event3 = BaseEvent.extend("Event3", {}); - const LocalChildEvent = BaseEvent.extend("ChildEvent", {}); + const Event1 = BaseEvent.extend('Event1', {}) + const Event2 = BaseEvent.extend('Event2', {}) + const Event3 = BaseEvent.extend('Event3', {}) + const LocalChildEvent = BaseEvent.extend('ChildEvent', {}) const event1_handler = async (_event: BaseEvent): Promise => { - execution_order.push("Event1_start"); - debug_order.push({ label: "Event1_start", at: new Date().toISOString() }); - const child = _event.bus?.emit(LocalChildEvent({}))!; - execution_order.push("Child_dispatched"); - debug_order.push({ label: "Child_dispatched", at: new Date().toISOString() }); - await child.done(); - execution_order.push("Child_await_returned"); - debug_order.push({ label: "Child_await_returned", at: new Date().toISOString() }); - execution_order.push("Event1_end"); - debug_order.push({ label: "Event1_end", at: new Date().toISOString() }); - return "event1_done"; - }; + execution_order.push('Event1_start') + debug_order.push({ label: 'Event1_start', at: new Date().toISOString() }) + const child = _event.bus?.emit(LocalChildEvent({}))! + execution_order.push('Child_dispatched') + debug_order.push({ label: 'Child_dispatched', at: new Date().toISOString() }) + await child.done() + execution_order.push('Child_await_returned') + debug_order.push({ label: 'Child_await_returned', at: new Date().toISOString() }) + execution_order.push('Event1_end') + debug_order.push({ label: 'Event1_end', at: new Date().toISOString() }) + return 'event1_done' + } const event2_handler = async (): Promise => { - execution_order.push("Event2_start"); - debug_order.push({ label: "Event2_start", at: new Date().toISOString() }); - execution_order.push("Event2_end"); - debug_order.push({ label: "Event2_end", at: new Date().toISOString() }); - return "event2_done"; - }; + execution_order.push('Event2_start') + debug_order.push({ label: 'Event2_start', at: new Date().toISOString() }) + execution_order.push('Event2_end') + debug_order.push({ label: 'Event2_end', at: new Date().toISOString() }) + return 'event2_done' + } const event3_handler = async (): Promise => { - execution_order.push("Event3_start"); - debug_order.push({ label: "Event3_start", at: new Date().toISOString() }); - execution_order.push("Event3_end"); - debug_order.push({ label: "Event3_end", at: new Date().toISOString() }); - return "event3_done"; - }; + execution_order.push('Event3_start') + debug_order.push({ label: 'Event3_start', at: new Date().toISOString() }) + execution_order.push('Event3_end') + debug_order.push({ label: 'Event3_end', at: new Date().toISOString() }) + return 'event3_done' + } const child_handler = async (): Promise => { - execution_order.push("Child_start"); - debug_order.push({ label: "Child_start", at: new Date().toISOString() }); - execution_order.push("Child_end"); - debug_order.push({ label: "Child_end", at: new Date().toISOString() }); - return "child_done"; - }; - - bus.on(Event1, event1_handler); - bus.on(Event2, event2_handler); - bus.on(Event3, event3_handler); - bus.on(LocalChildEvent, child_handler); - - const event_1 = bus.dispatch(Event1({})); - const event_2 = bus.dispatch(Event2({})); - const event_3 = bus.dispatch(Event3({})); + execution_order.push('Child_start') + debug_order.push({ label: 'Child_start', at: new Date().toISOString() }) + execution_order.push('Child_end') + debug_order.push({ label: 'Child_end', at: new Date().toISOString() }) + return 'child_done' + } + + bus.on(Event1, event1_handler) + bus.on(Event2, event2_handler) + bus.on(Event3, event3_handler) + bus.on(LocalChildEvent, child_handler) + + const event_1 = bus.dispatch(Event1({})) + const event_2 = bus.dispatch(Event2({})) + const event_3 = bus.dispatch(Event3({})) // Wait for everything to complete - await event_1.done(); - await bus.waitUntilIdle(); + await event_1.done() + await bus.waitUntilIdle() // Core assertion: child jumped the queue and ran DURING Event1's handler - assert.ok(execution_order.includes("Child_start")); - assert.ok(execution_order.includes("Child_end")); - const child_start_idx = execution_order.indexOf("Child_start"); - const child_end_idx = execution_order.indexOf("Child_end"); - const event1_end_idx = execution_order.indexOf("Event1_end"); - assert.ok(child_start_idx < event1_end_idx, "child must start before Event1 handler returns"); - assert.ok(child_end_idx < event1_end_idx, "child must end before Event1 handler returns"); + assert.ok(execution_order.includes('Child_start')) + assert.ok(execution_order.includes('Child_end')) + const child_start_idx = execution_order.indexOf('Child_start') + const child_end_idx = execution_order.indexOf('Child_end') + const event1_end_idx = execution_order.indexOf('Event1_end') + assert.ok(child_start_idx < event1_end_idx, 'child must start before Event1 handler returns') + assert.ok(child_end_idx < event1_end_idx, 'child must end before Event1 handler returns') // No overshoot: Event2 and Event3 must only start AFTER Event1's handler fully completes. // In JS, the microtask-based runloop processes them after Event1 completes (so they may // already be done by this point), but the key guarantee is ordering, not timing. - const event2_start_idx = execution_order.indexOf("Event2_start"); - const event3_start_idx = execution_order.indexOf("Event3_start"); - assert.ok(event2_start_idx > event1_end_idx, "Event2 must not start until Event1 handler returns"); - assert.ok(event3_start_idx > event1_end_idx, "Event3 must not start until Event1 handler returns"); + const event2_start_idx = execution_order.indexOf('Event2_start') + const event3_start_idx = execution_order.indexOf('Event3_start') + assert.ok(event2_start_idx > event1_end_idx, 'Event2 must not start until Event1 handler returns') + assert.ok(event3_start_idx > event1_end_idx, 'Event3 must not start until Event1 handler returns') // FIFO preserved among queued events - assert.ok(event2_start_idx < event3_start_idx, "Event2 must start before Event3 (FIFO)"); + assert.ok(event2_start_idx < event3_start_idx, 'Event2 must start before Event3 (FIFO)') // All events completed - assert.equal(event_1.event_status, "completed"); - assert.equal(event_2.event_status, "completed"); - assert.equal(event_3.event_status, "completed"); + assert.equal(event_1.event_status, 'completed') + assert.equal(event_2.event_status, 'completed') + assert.equal(event_3.event_status, 'completed') // Timestamp ordering confirms the same - const history_list = Array.from(bus.event_history.values()); - const child_event = history_list.find((event) => event.event_type === "ChildEvent"); - const event2_from_history = history_list.find((event) => event.event_type === "Event2"); - const event3_from_history = history_list.find((event) => event.event_type === "Event3"); + const history_list = Array.from(bus.event_history.values()) + const child_event = history_list.find((event) => event.event_type === 'ChildEvent') + const event2_from_history = history_list.find((event) => event.event_type === 'Event2') + const event3_from_history = history_list.find((event) => event.event_type === 'Event3') - assert.ok(child_event?.event_started_at); - assert.ok(event2_from_history?.event_started_at); - assert.ok(event3_from_history?.event_started_at); + assert.ok(child_event?.event_started_at) + assert.ok(event2_from_history?.event_started_at) + assert.ok(event3_from_history?.event_started_at) - assert.ok(child_event!.event_started_at! < event2_from_history!.event_started_at!); - assert.ok(child_event!.event_started_at! < event3_from_history!.event_started_at!); -}); + assert.ok(child_event!.event_started_at! < event2_from_history!.event_started_at!) + assert.ok(child_event!.event_started_at! < event3_from_history!.event_started_at!) +}) -test("done() on non-proxied event still holds immediate_processing_stack_depth", async () => { - const bus = new EventBus("RawDoneBus", { max_history_size: 100 }); - const Event1 = BaseEvent.extend("Event1", {}); - const ChildEvent = BaseEvent.extend("RawChild", {}); +test('done() on non-proxied event still holds immediate_processing_stack_depth', async () => { + const bus = new EventBus('RawDoneBus', { max_history_size: 100 }) + const Event1 = BaseEvent.extend('Event1', {}) + const ChildEvent = BaseEvent.extend('RawChild', {}) - let depth_after_done = -1; + let depth_after_done = -1 - bus.on(ChildEvent, () => {}); + bus.on(ChildEvent, () => {}) - bus.on(Event1, async (event) => { + bus.on(Event1, async (_event) => { // Dispatch child via the raw bus (not the proxied event.bus) - const child = bus.dispatch(ChildEvent({})); + const child = bus.dispatch(ChildEvent({})) // Get the raw (non-proxied) event - const raw_child = child._original_event ?? child; + const raw_child = child._original_event ?? child // done() on raw event bypasses handler_result injection from proxy - await raw_child.done(); + await raw_child.done() // After done() returns, depth should still be > 0 because // we're still inside a handler doing queue-jump processing - depth_after_done = bus.immediate_processing_stack_depth; - }); + depth_after_done = bus.immediate_processing_stack_depth + }) - bus.dispatch(Event1({})); - await bus.waitUntilIdle(); + bus.dispatch(Event1({})) + await bus.waitUntilIdle() assert.ok( depth_after_done > 0, - `immediate_processing_stack_depth should be > 0 after raw done() ` + - `but before handler returns, got ${depth_after_done}` - ); -}); + `immediate_processing_stack_depth should be > 0 after raw done() ` + `but before handler returns, got ${depth_after_done}` + ) +}) -test("immediate_processing_stack_depth returns to 0 after queue-jump completes", async () => { - const bus = new EventBus("DepthBalanceBus", { max_history_size: 100 }); - const Event1 = BaseEvent.extend("DepthEvent1", {}); - const ChildA = BaseEvent.extend("DepthChildA", {}); - const ChildB = BaseEvent.extend("DepthChildB", {}); +test('immediate_processing_stack_depth returns to 0 after queue-jump completes', async () => { + const bus = new EventBus('DepthBalanceBus', { max_history_size: 100 }) + const Event1 = BaseEvent.extend('DepthEvent1', {}) + const ChildA = BaseEvent.extend('DepthChildA', {}) + const ChildB = BaseEvent.extend('DepthChildB', {}) - let depth_during_handler = -1; - let depth_between_dones = -1; - let depth_after_second_done = -1; + let depth_during_handler = -1 + let depth_between_dones = -1 + let depth_after_second_done = -1 - bus.on(ChildA, () => {}); - bus.on(ChildB, () => {}); + bus.on(ChildA, () => {}) + bus.on(ChildB, () => {}) bus.on(Event1, async (event) => { // First queue-jump - const child_a = event.bus?.emit(ChildA({}))!; - await child_a.done(); - depth_during_handler = bus.immediate_processing_stack_depth; + const child_a = event.bus?.emit(ChildA({}))! + await child_a.done() + depth_during_handler = bus.immediate_processing_stack_depth // Second queue-jump β€” should NOT double-increment (queue_jump_hold guard) - const child_b = event.bus?.emit(ChildB({}))!; - depth_between_dones = bus.immediate_processing_stack_depth; - await child_b.done(); - depth_after_second_done = bus.immediate_processing_stack_depth; - }); + const child_b = event.bus?.emit(ChildB({}))! + depth_between_dones = bus.immediate_processing_stack_depth + await child_b.done() + depth_after_second_done = bus.immediate_processing_stack_depth + }) - bus.dispatch(Event1({})); - await bus.waitUntilIdle(); + bus.dispatch(Event1({})) + await bus.waitUntilIdle() // During handler, depth should be > 0 (held by queue_jump_hold) - assert.ok( - depth_during_handler > 0, - `depth should be > 0 after first done(), got ${depth_during_handler}` - ); + assert.ok(depth_during_handler > 0, `depth should be > 0 after first done(), got ${depth_during_handler}`) // Between done() calls, depth should still be held - assert.ok( - depth_between_dones > 0, - `depth should be > 0 between done() calls, got ${depth_between_dones}` - ); + assert.ok(depth_between_dones > 0, `depth should be > 0 between done() calls, got ${depth_between_dones}`) // After second done(), still held until handler returns - assert.ok( - depth_after_second_done > 0, - `depth should be > 0 after second done(), got ${depth_after_second_done}` - ); + assert.ok(depth_after_second_done > 0, `depth should be > 0 after second done(), got ${depth_after_second_done}`) // After handler finishes and bus is idle, depth must be exactly 0 assert.equal( bus.immediate_processing_stack_depth, 0, `depth should return to 0 after handler completes, got ${bus.immediate_processing_stack_depth}` - ); -}); + ) +}) -test("isInsideHandler() is per-bus, not global", async () => { - const bus_a = new EventBus("InsideHandlerA", { max_history_size: 100 }); - const bus_b = new EventBus("InsideHandlerB", { max_history_size: 100 }); +test('isInsideHandler() is per-bus, not global', async () => { + const bus_a = new EventBus('InsideHandlerA', { max_history_size: 100 }) + const bus_b = new EventBus('InsideHandlerB', { max_history_size: 100 }) - const EventA = BaseEvent.extend("InsideHandlerEventA", {}); - const EventB = BaseEvent.extend("InsideHandlerEventB", {}); + const EventA = BaseEvent.extend('InsideHandlerEventA', {}) + const EventB = BaseEvent.extend('InsideHandlerEventB', {}) - let bus_a_inside_during_a_handler = false; - let bus_b_inside_during_a_handler = false; - let bus_a_inside_during_b_handler = false; - let bus_b_inside_during_b_handler = false; + let bus_a_inside_during_a_handler = false + let bus_b_inside_during_a_handler = false + let bus_a_inside_during_b_handler = false + let bus_b_inside_during_b_handler = false bus_a.on(EventA, () => { - bus_a_inside_during_a_handler = bus_a.isInsideHandler(); - bus_b_inside_during_a_handler = bus_b.isInsideHandler(); - }); + bus_a_inside_during_a_handler = bus_a.isInsideHandler() + bus_b_inside_during_a_handler = bus_b.isInsideHandler() + }) bus_b.on(EventB, () => { - bus_a_inside_during_b_handler = bus_a.isInsideHandler(); - bus_b_inside_during_b_handler = bus_b.isInsideHandler(); - }); + bus_a_inside_during_b_handler = bus_a.isInsideHandler() + bus_b_inside_during_b_handler = bus_b.isInsideHandler() + }) // Dispatch to bus_a first, wait for completion so bus_b has no active handlers - await bus_a.dispatch(EventA({})).done(); - await bus_a.waitUntilIdle(); + await bus_a.dispatch(EventA({})).done() + await bus_a.waitUntilIdle() // Then dispatch to bus_b so bus_a has no active handlers - await bus_b.dispatch(EventB({})).done(); - await bus_b.waitUntilIdle(); + await bus_b.dispatch(EventB({})).done() + await bus_b.waitUntilIdle() // During bus_a's handler: bus_a should report inside, bus_b should not - assert.equal( - bus_a_inside_during_a_handler, - true, - "bus_a.isInsideHandler() should be true during bus_a handler" - ); - assert.equal( - bus_b_inside_during_a_handler, - false, - "bus_b.isInsideHandler() should be false during bus_a handler" - ); + assert.equal(bus_a_inside_during_a_handler, true, 'bus_a.isInsideHandler() should be true during bus_a handler') + assert.equal(bus_b_inside_during_a_handler, false, 'bus_b.isInsideHandler() should be false during bus_a handler') // During bus_b's handler: bus_b should report inside, bus_a should not - assert.equal( - bus_b_inside_during_b_handler, - true, - "bus_b.isInsideHandler() should be true during bus_b handler" - ); - assert.equal( - bus_a_inside_during_b_handler, - false, - "bus_a.isInsideHandler() should be false during bus_b handler" - ); + assert.equal(bus_b_inside_during_b_handler, true, 'bus_b.isInsideHandler() should be true during bus_b handler') + assert.equal(bus_a_inside_during_b_handler, false, 'bus_a.isInsideHandler() should be false during bus_b handler') // After all handlers complete, neither bus should report inside - assert.equal(bus_a.isInsideHandler(), false, "bus_a.isInsideHandler() should be false after idle"); - assert.equal(bus_b.isInsideHandler(), false, "bus_b.isInsideHandler() should be false after idle"); -}); + assert.equal(bus_a.isInsideHandler(), false, 'bus_a.isInsideHandler() should be false after idle') + assert.equal(bus_b.isInsideHandler(), false, 'bus_b.isInsideHandler() should be false after idle') +}) -test("dispatch multiple, await one skips others until after handler completes", async () => { - const bus = new EventBus("MultiDispatchBus", { max_history_size: 100 }); - const execution_order: string[] = []; +test('dispatch multiple, await one skips others until after handler completes', async () => { + const bus = new EventBus('MultiDispatchBus', { max_history_size: 100 }) + const execution_order: string[] = [] - const Event1 = BaseEvent.extend("Event1", {}); - const Event2 = BaseEvent.extend("Event2", {}); - const Event3 = BaseEvent.extend("Event3", {}); - const ChildA = BaseEvent.extend("ChildA", {}); - const ChildB = BaseEvent.extend("ChildB", {}); - const ChildC = BaseEvent.extend("ChildC", {}); + const Event1 = BaseEvent.extend('Event1', {}) + const Event2 = BaseEvent.extend('Event2', {}) + const Event3 = BaseEvent.extend('Event3', {}) + const ChildA = BaseEvent.extend('ChildA', {}) + const ChildB = BaseEvent.extend('ChildB', {}) + const ChildC = BaseEvent.extend('ChildC', {}) const event1_handler = async (event: BaseEvent): Promise => { - execution_order.push("Event1_start"); + execution_order.push('Event1_start') - event.bus?.emit(ChildA({})); - execution_order.push("ChildA_dispatched"); + event.bus?.emit(ChildA({})) + execution_order.push('ChildA_dispatched') - const child_b = event.bus?.emit(ChildB({}))!; - execution_order.push("ChildB_dispatched"); + const child_b = event.bus?.emit(ChildB({}))! + execution_order.push('ChildB_dispatched') - event.bus?.emit(ChildC({})); - execution_order.push("ChildC_dispatched"); + event.bus?.emit(ChildC({})) + execution_order.push('ChildC_dispatched') - await child_b.done(); - execution_order.push("ChildB_await_returned"); + await child_b.done() + execution_order.push('ChildB_await_returned') - execution_order.push("Event1_end"); - return "event1_done"; - }; + execution_order.push('Event1_end') + return 'event1_done' + } const event2_handler = async (): Promise => { - execution_order.push("Event2_start"); - execution_order.push("Event2_end"); - return "event2_done"; - }; + execution_order.push('Event2_start') + execution_order.push('Event2_end') + return 'event2_done' + } const event3_handler = async (): Promise => { - execution_order.push("Event3_start"); - execution_order.push("Event3_end"); - return "event3_done"; - }; + execution_order.push('Event3_start') + execution_order.push('Event3_end') + return 'event3_done' + } const child_a_handler = async (): Promise => { - execution_order.push("ChildA_start"); - execution_order.push("ChildA_end"); - return "child_a_done"; - }; + execution_order.push('ChildA_start') + execution_order.push('ChildA_end') + return 'child_a_done' + } const child_b_handler = async (): Promise => { - execution_order.push("ChildB_start"); - execution_order.push("ChildB_end"); - return "child_b_done"; - }; + execution_order.push('ChildB_start') + execution_order.push('ChildB_end') + return 'child_b_done' + } const child_c_handler = async (): Promise => { - execution_order.push("ChildC_start"); - execution_order.push("ChildC_end"); - return "child_c_done"; - }; - - bus.on(Event1, event1_handler); - bus.on(Event2, event2_handler); - bus.on(Event3, event3_handler); - bus.on(ChildA, child_a_handler); - bus.on(ChildB, child_b_handler); - bus.on(ChildC, child_c_handler); - - const event_1 = bus.dispatch(Event1({})); - bus.dispatch(Event2({})); - bus.dispatch(Event3({})); - - await event_1.done(); - - assert.ok(execution_order.includes("ChildB_start")); - assert.ok(execution_order.includes("ChildB_end")); - - const child_b_end_idx = execution_order.indexOf("ChildB_end"); - const event1_end_idx = execution_order.indexOf("Event1_end"); - assert.ok(child_b_end_idx < event1_end_idx); - - if (execution_order.includes("ChildA_start")) { - const child_a_start_idx = execution_order.indexOf("ChildA_start"); - assert.ok(child_a_start_idx > event1_end_idx); + execution_order.push('ChildC_start') + execution_order.push('ChildC_end') + return 'child_c_done' } - if (execution_order.includes("ChildC_start")) { - const child_c_start_idx = execution_order.indexOf("ChildC_start"); - assert.ok(child_c_start_idx > event1_end_idx); + + bus.on(Event1, event1_handler) + bus.on(Event2, event2_handler) + bus.on(Event3, event3_handler) + bus.on(ChildA, child_a_handler) + bus.on(ChildB, child_b_handler) + bus.on(ChildC, child_c_handler) + + const event_1 = bus.dispatch(Event1({})) + bus.dispatch(Event2({})) + bus.dispatch(Event3({})) + + await event_1.done() + + assert.ok(execution_order.includes('ChildB_start')) + assert.ok(execution_order.includes('ChildB_end')) + + const child_b_end_idx = execution_order.indexOf('ChildB_end') + const event1_end_idx = execution_order.indexOf('Event1_end') + assert.ok(child_b_end_idx < event1_end_idx) + + if (execution_order.includes('ChildA_start')) { + const child_a_start_idx = execution_order.indexOf('ChildA_start') + assert.ok(child_a_start_idx > event1_end_idx) + } + if (execution_order.includes('ChildC_start')) { + const child_c_start_idx = execution_order.indexOf('ChildC_start') + assert.ok(child_c_start_idx > event1_end_idx) } - if (execution_order.includes("Event2_start")) { - const event2_start_idx = execution_order.indexOf("Event2_start"); - assert.ok(event2_start_idx > event1_end_idx); + if (execution_order.includes('Event2_start')) { + const event2_start_idx = execution_order.indexOf('Event2_start') + assert.ok(event2_start_idx > event1_end_idx) } - if (execution_order.includes("Event3_start")) { - const event3_start_idx = execution_order.indexOf("Event3_start"); - assert.ok(event3_start_idx > event1_end_idx); + if (execution_order.includes('Event3_start')) { + const event3_start_idx = execution_order.indexOf('Event3_start') + assert.ok(event3_start_idx > event1_end_idx) } - await bus.waitUntilIdle(); + await bus.waitUntilIdle() - const event2_start_idx = execution_order.indexOf("Event2_start"); - const event3_start_idx = execution_order.indexOf("Event3_start"); - const child_a_start_idx = execution_order.indexOf("ChildA_start"); - const child_c_start_idx = execution_order.indexOf("ChildC_start"); + const event2_start_idx = execution_order.indexOf('Event2_start') + const event3_start_idx = execution_order.indexOf('Event3_start') + const child_a_start_idx = execution_order.indexOf('ChildA_start') + const child_c_start_idx = execution_order.indexOf('ChildC_start') - assert.ok(event2_start_idx < event3_start_idx); - assert.ok(event3_start_idx < child_a_start_idx); - assert.ok(child_a_start_idx < child_c_start_idx); -}); + assert.ok(event2_start_idx < event3_start_idx) + assert.ok(event3_start_idx < child_a_start_idx) + assert.ok(child_a_start_idx < child_c_start_idx) +}) -test("multi-bus queues are independent when awaiting child", async () => { - const bus_1 = new EventBus("Bus1", { max_history_size: 100 }); - const bus_2 = new EventBus("Bus2", { max_history_size: 100 }); - const execution_order: string[] = []; +test('multi-bus queues are independent when awaiting child', async () => { + const bus_1 = new EventBus('Bus1', { max_history_size: 100 }) + const bus_2 = new EventBus('Bus2', { max_history_size: 100 }) + const execution_order: string[] = [] - const Event1 = BaseEvent.extend("Event1", {}); - const Event2 = BaseEvent.extend("Event2", {}); - const Event3 = BaseEvent.extend("Event3", {}); - const Event4 = BaseEvent.extend("Event4", {}); - const LocalChildEvent = BaseEvent.extend("ChildEvent", {}); + const Event1 = BaseEvent.extend('Event1', {}) + const Event2 = BaseEvent.extend('Event2', {}) + const Event3 = BaseEvent.extend('Event3', {}) + const Event4 = BaseEvent.extend('Event4', {}) + const LocalChildEvent = BaseEvent.extend('ChildEvent', {}) const event1_handler = async (event: BaseEvent): Promise => { - execution_order.push("Bus1_Event1_start"); - const child = event.bus?.emit(LocalChildEvent({}))!; - execution_order.push("Child_dispatched_to_Bus1"); - await child.done(); - execution_order.push("Child_await_returned"); - execution_order.push("Bus1_Event1_end"); - return "event1_done"; - }; + execution_order.push('Bus1_Event1_start') + const child = event.bus?.emit(LocalChildEvent({}))! + execution_order.push('Child_dispatched_to_Bus1') + await child.done() + execution_order.push('Child_await_returned') + execution_order.push('Bus1_Event1_end') + return 'event1_done' + } const event2_handler = async (): Promise => { - execution_order.push("Bus1_Event2_start"); - execution_order.push("Bus1_Event2_end"); - return "event2_done"; - }; + execution_order.push('Bus1_Event2_start') + execution_order.push('Bus1_Event2_end') + return 'event2_done' + } const event3_handler = async (): Promise => { - execution_order.push("Bus2_Event3_start"); - execution_order.push("Bus2_Event3_end"); - return "event3_done"; - }; + execution_order.push('Bus2_Event3_start') + execution_order.push('Bus2_Event3_end') + return 'event3_done' + } const event4_handler = async (): Promise => { - execution_order.push("Bus2_Event4_start"); - execution_order.push("Bus2_Event4_end"); - return "event4_done"; - }; + execution_order.push('Bus2_Event4_start') + execution_order.push('Bus2_Event4_end') + return 'event4_done' + } const child_handler = async (): Promise => { - execution_order.push("Child_start"); - execution_order.push("Child_end"); - return "child_done"; - }; + execution_order.push('Child_start') + execution_order.push('Child_end') + return 'child_done' + } - bus_1.on(Event1, event1_handler); - bus_1.on(Event2, event2_handler); - bus_1.on(LocalChildEvent, child_handler); + bus_1.on(Event1, event1_handler) + bus_1.on(Event2, event2_handler) + bus_1.on(LocalChildEvent, child_handler) - bus_2.on(Event3, event3_handler); - bus_2.on(Event4, event4_handler); + bus_2.on(Event3, event3_handler) + bus_2.on(Event4, event4_handler) - const event_1 = bus_1.dispatch(Event1({})); - bus_1.dispatch(Event2({})); - bus_2.dispatch(Event3({})); - bus_2.dispatch(Event4({})); + const event_1 = bus_1.dispatch(Event1({})) + bus_1.dispatch(Event2({})) + bus_2.dispatch(Event3({})) + bus_2.dispatch(Event4({})) - await delay(0); + await delay(0) - await event_1.done(); + await event_1.done() - assert.ok(execution_order.includes("Child_start")); - assert.ok(execution_order.includes("Child_end")); + assert.ok(execution_order.includes('Child_start')) + assert.ok(execution_order.includes('Child_end')) - const child_end_idx = execution_order.indexOf("Child_end"); - const event1_end_idx = execution_order.indexOf("Bus1_Event1_end"); - assert.ok(child_end_idx < event1_end_idx); + const child_end_idx = execution_order.indexOf('Child_end') + const event1_end_idx = execution_order.indexOf('Bus1_Event1_end') + assert.ok(child_end_idx < event1_end_idx) - const bus1_event2_start_idx = execution_order.indexOf("Bus1_Event2_start"); + const bus1_event2_start_idx = execution_order.indexOf('Bus1_Event2_start') if (bus1_event2_start_idx !== -1) { - assert.ok(bus1_event2_start_idx > event1_end_idx); + assert.ok(bus1_event2_start_idx > event1_end_idx) } - const bus2_event3_start_idx = execution_order.indexOf("Bus2_Event3_start"); - const bus2_event4_start_idx = execution_order.indexOf("Bus2_Event4_start"); - assert.ok(bus2_event3_start_idx !== -1 || bus2_event4_start_idx !== -1); + const bus2_event3_start_idx = execution_order.indexOf('Bus2_Event3_start') + const bus2_event4_start_idx = execution_order.indexOf('Bus2_Event4_start') + assert.ok(bus2_event3_start_idx !== -1 || bus2_event4_start_idx !== -1) const bus2_start_idx = bus2_event3_start_idx === -1 ? bus2_event4_start_idx : bus2_event4_start_idx === -1 ? bus2_event3_start_idx - : Math.min(bus2_event3_start_idx, bus2_event4_start_idx); - assert.ok(bus2_start_idx < event1_end_idx); + : Math.min(bus2_event3_start_idx, bus2_event4_start_idx) + assert.ok(bus2_start_idx < event1_end_idx) - await bus_1.waitUntilIdle(); - await bus_2.waitUntilIdle(); + await bus_1.waitUntilIdle() + await bus_2.waitUntilIdle() - assert.ok(execution_order.includes("Bus1_Event2_start")); - assert.ok(execution_order.includes("Bus2_Event3_start")); - assert.ok(execution_order.includes("Bus2_Event4_start")); -}); + assert.ok(execution_order.includes('Bus1_Event2_start')) + assert.ok(execution_order.includes('Bus2_Event3_start')) + assert.ok(execution_order.includes('Bus2_Event4_start')) +}) -test("awaiting an already completed event is a no-op", async () => { - const bus = new EventBus("AlreadyCompletedBus", { max_history_size: 100 }); - const execution_order: string[] = []; +test('awaiting an already completed event is a no-op', async () => { + const bus = new EventBus('AlreadyCompletedBus', { max_history_size: 100 }) + const execution_order: string[] = [] - const Event1 = BaseEvent.extend("Event1", {}); - const Event2 = BaseEvent.extend("Event2", {}); + const Event1 = BaseEvent.extend('Event1', {}) + const Event2 = BaseEvent.extend('Event2', {}) const event1_handler = async (): Promise => { - execution_order.push("Event1_start"); - execution_order.push("Event1_end"); - return "event1_done"; - }; + execution_order.push('Event1_start') + execution_order.push('Event1_end') + return 'event1_done' + } const event2_handler = async (): Promise => { - execution_order.push("Event2_start"); - execution_order.push("Event2_end"); - return "event2_done"; - }; + execution_order.push('Event2_start') + execution_order.push('Event2_end') + return 'event2_done' + } - bus.on(Event1, event1_handler); - bus.on(Event2, event2_handler); + bus.on(Event1, event1_handler) + bus.on(Event2, event2_handler) - const event_1 = await bus.dispatch(Event1({})).done(); - assert.equal(event_1.event_status, "completed"); + const event_1 = await bus.dispatch(Event1({})).done() + assert.equal(event_1.event_status, 'completed') - const event_2 = bus.dispatch(Event2({})); + const event_2 = bus.dispatch(Event2({})) - await event_1.done(); + await event_1.done() - assert.equal(event_2.event_status, "pending"); + assert.equal(event_2.event_status, 'pending') - await bus.waitUntilIdle(); -}); + await bus.waitUntilIdle() +}) -test("multiple awaits on same event", async () => { - const bus = new EventBus("MultiAwaitBus", { max_history_size: 100 }); - const execution_order: string[] = []; - const await_results: string[] = []; +test('multiple awaits on same event', async () => { + const bus = new EventBus('MultiAwaitBus', { max_history_size: 100 }) + const execution_order: string[] = [] + const await_results: string[] = [] - const Event1 = BaseEvent.extend("Event1", {}); - const Event2 = BaseEvent.extend("Event2", {}); - const LocalChildEvent = BaseEvent.extend("ChildEvent", {}); + const Event1 = BaseEvent.extend('Event1', {}) + const Event2 = BaseEvent.extend('Event2', {}) + const LocalChildEvent = BaseEvent.extend('ChildEvent', {}) const event1_handler = async (event: BaseEvent): Promise => { - execution_order.push("Event1_start"); + execution_order.push('Event1_start') - const child = event.bus?.emit(LocalChildEvent({}))!; + const child = event.bus?.emit(LocalChildEvent({}))! const await_child = async (name: string): Promise => { - await child.done(); - await_results.push(`${name}_completed`); - }; + await child.done() + await_results.push(`${name}_completed`) + } - await Promise.all([await_child("await1"), await_child("await2")]); - execution_order.push("Both_awaits_completed"); - execution_order.push("Event1_end"); - return "event1_done"; - }; + await Promise.all([await_child('await1'), await_child('await2')]) + execution_order.push('Both_awaits_completed') + execution_order.push('Event1_end') + return 'event1_done' + } const event2_handler = async (): Promise => { - execution_order.push("Event2_start"); - execution_order.push("Event2_end"); - return "event2_done"; - }; + execution_order.push('Event2_start') + execution_order.push('Event2_end') + return 'event2_done' + } const child_handler = async (): Promise => { - execution_order.push("Child_start"); - await delay(10); - execution_order.push("Child_end"); - return "child_done"; - }; + execution_order.push('Child_start') + await delay(10) + execution_order.push('Child_end') + return 'child_done' + } - bus.on(Event1, event1_handler); - bus.on(Event2, event2_handler); - bus.on(LocalChildEvent, child_handler); + bus.on(Event1, event1_handler) + bus.on(Event2, event2_handler) + bus.on(LocalChildEvent, child_handler) - const event_1 = bus.dispatch(Event1({})); - bus.dispatch(Event2({})); + const event_1 = bus.dispatch(Event1({})) + bus.dispatch(Event2({})) - await event_1.done(); + await event_1.done() - assert.equal(await_results.length, 2); - assert.ok(await_results.includes("await1_completed")); - assert.ok(await_results.includes("await2_completed")); + assert.equal(await_results.length, 2) + assert.ok(await_results.includes('await1_completed')) + assert.ok(await_results.includes('await2_completed')) - assert.ok(execution_order.includes("Child_start")); - assert.ok(execution_order.includes("Child_end")); - const child_end_idx = execution_order.indexOf("Child_end"); - const event1_end_idx = execution_order.indexOf("Event1_end"); - assert.ok(child_end_idx < event1_end_idx); + assert.ok(execution_order.includes('Child_start')) + assert.ok(execution_order.includes('Child_end')) + const child_end_idx = execution_order.indexOf('Child_end') + const event1_end_idx = execution_order.indexOf('Event1_end') + assert.ok(child_end_idx < event1_end_idx) - assert.ok(!execution_order.includes("Event2_start")); + assert.ok(!execution_order.includes('Event2_start')) - await bus.waitUntilIdle(); -}); + await bus.waitUntilIdle() +}) -test("deeply nested awaited children", async () => { - const bus = new EventBus("DeepNestedBus", { max_history_size: 100 }); - const execution_order: string[] = []; +test('deeply nested awaited children', async () => { + const bus = new EventBus('DeepNestedBus', { max_history_size: 100 }) + const execution_order: string[] = [] - const Event1 = BaseEvent.extend("Event1", {}); - const Event2 = BaseEvent.extend("Event2", {}); - const Child1 = BaseEvent.extend("Child1", {}); - const Child2 = BaseEvent.extend("Child2", {}); + const Event1 = BaseEvent.extend('Event1', {}) + const Event2 = BaseEvent.extend('Event2', {}) + const Child1 = BaseEvent.extend('Child1', {}) + const Child2 = BaseEvent.extend('Child2', {}) const event1_handler = async (event: BaseEvent): Promise => { - execution_order.push("Event1_start"); - const child1 = event.bus?.emit(Child1({}))!; - await child1.done(); - execution_order.push("Event1_end"); - return "event1_done"; - }; + execution_order.push('Event1_start') + const child1 = event.bus?.emit(Child1({}))! + await child1.done() + execution_order.push('Event1_end') + return 'event1_done' + } const child1_handler = async (event: BaseEvent): Promise => { - execution_order.push("Child1_start"); - const child2 = event.bus?.emit(Child2({}))!; - await child2.done(); - execution_order.push("Child1_end"); - return "child1_done"; - }; + execution_order.push('Child1_start') + const child2 = event.bus?.emit(Child2({}))! + await child2.done() + execution_order.push('Child1_end') + return 'child1_done' + } const child2_handler = async (): Promise => { - execution_order.push("Child2_start"); - execution_order.push("Child2_end"); - return "child2_done"; - }; + execution_order.push('Child2_start') + execution_order.push('Child2_end') + return 'child2_done' + } const event2_handler = async (): Promise => { - execution_order.push("Event2_start"); - execution_order.push("Event2_end"); - return "event2_done"; - }; + execution_order.push('Event2_start') + execution_order.push('Event2_end') + return 'event2_done' + } - bus.on(Event1, event1_handler); - bus.on(Child1, child1_handler); - bus.on(Child2, child2_handler); - bus.on(Event2, event2_handler); + bus.on(Event1, event1_handler) + bus.on(Child1, child1_handler) + bus.on(Child2, child2_handler) + bus.on(Event2, event2_handler) - const event_1 = bus.dispatch(Event1({})); - bus.dispatch(Event2({})); + const event_1 = bus.dispatch(Event1({})) + bus.dispatch(Event2({})) - await event_1.done(); + await event_1.done() - assert.ok(execution_order.includes("Child1_start")); - assert.ok(execution_order.includes("Child1_end")); - assert.ok(execution_order.includes("Child2_start")); - assert.ok(execution_order.includes("Child2_end")); + assert.ok(execution_order.includes('Child1_start')) + assert.ok(execution_order.includes('Child1_end')) + assert.ok(execution_order.includes('Child2_start')) + assert.ok(execution_order.includes('Child2_end')) - const child2_end_idx = execution_order.indexOf("Child2_end"); - const child1_end_idx = execution_order.indexOf("Child1_end"); - const event1_end_idx = execution_order.indexOf("Event1_end"); - assert.ok(child2_end_idx < child1_end_idx); - assert.ok(child1_end_idx < event1_end_idx); + const child2_end_idx = execution_order.indexOf('Child2_end') + const child1_end_idx = execution_order.indexOf('Child1_end') + const event1_end_idx = execution_order.indexOf('Event1_end') + assert.ok(child2_end_idx < child1_end_idx) + assert.ok(child1_end_idx < event1_end_idx) - assert.ok(!execution_order.includes("Event2_start")); + assert.ok(!execution_order.includes('Event2_start')) - await bus.waitUntilIdle(); + await bus.waitUntilIdle() - const event2_start_idx = execution_order.indexOf("Event2_start"); - assert.ok(event2_start_idx > event1_end_idx); -}); + const event2_start_idx = execution_order.indexOf('Event2_start') + assert.ok(event2_start_idx > event1_end_idx) +}) // ============================================================================= // Queue-Jump Concurrency Tests (Two-Bus) @@ -796,98 +754,124 @@ test("deeply nested awaited children", async () => { // then awaits child.done(), which queue-jumps the child on both buses. // ============================================================================= -test("BUG: queue-jump two-bus bus-serial handlers should serialize on each bus", async () => { - const TriggerEvent = BaseEvent.extend("QJ2BS_Trigger", {}); - const ChildEvent = BaseEvent.extend("QJ2BS_Child", {}); +test('BUG: queue-jump two-bus bus-serial handlers should serialize on each bus', async () => { + const TriggerEvent = BaseEvent.extend('QJ2BS_Trigger', {}) + const ChildEvent = BaseEvent.extend('QJ2BS_Child', {}) - const bus_a = new EventBus("QJ2BS_A", { - event_concurrency: "bus-serial", - handler_concurrency: "bus-serial" - }); - const bus_b = new EventBus("QJ2BS_B", { - event_concurrency: "bus-serial", - handler_concurrency: "bus-serial" - }); + const bus_a = new EventBus('QJ2BS_A', { + event_concurrency: 'bus-serial', + handler_concurrency: 'bus-serial', + }) + const bus_b = new EventBus('QJ2BS_B', { + event_concurrency: 'bus-serial', + handler_concurrency: 'bus-serial', + }) - const log: string[] = []; + const log: string[] = [] // Two handlers per bus. handler_1 is slow (15ms), handler_2 is fast (5ms). // With bus-serial, handler_1 must finish before handler_2 starts ON EACH BUS. // With buggy parallel, both start simultaneously and handler_2 finishes first. - const a_handler_1 = async () => { log.push("a1_start"); await delay(15); log.push("a1_end"); }; - const a_handler_2 = async () => { log.push("a2_start"); await delay(5); log.push("a2_end"); }; - const b_handler_1 = async () => { log.push("b1_start"); await delay(15); log.push("b1_end"); }; - const b_handler_2 = async () => { log.push("b2_start"); await delay(5); log.push("b2_end"); }; + const a_handler_1 = async () => { + log.push('a1_start') + await delay(15) + log.push('a1_end') + } + const a_handler_2 = async () => { + log.push('a2_start') + await delay(5) + log.push('a2_end') + } + const b_handler_1 = async () => { + log.push('b1_start') + await delay(15) + log.push('b1_end') + } + const b_handler_2 = async () => { + log.push('b2_start') + await delay(5) + log.push('b2_end') + } bus_a.on(TriggerEvent, async (event: InstanceType) => { - const child = event.bus?.emit(ChildEvent({ event_timeout: null }))!; - bus_b.dispatch(child); - await child.done(); - }); - bus_a.on(ChildEvent, a_handler_1); - bus_a.on(ChildEvent, a_handler_2); - bus_b.on(ChildEvent, b_handler_1); - bus_b.on(ChildEvent, b_handler_2); - - const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })); - await top.done(); - await bus_a.waitUntilIdle(); - await bus_b.waitUntilIdle(); + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))! + bus_b.dispatch(child) + await child.done() + }) + bus_a.on(ChildEvent, a_handler_1) + bus_a.on(ChildEvent, a_handler_2) + bus_b.on(ChildEvent, b_handler_1) + bus_b.on(ChildEvent, b_handler_2) + + const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })) + await top.done() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() // Bus A: handlers must serialize (a1 finishes before a2 starts) - const a1_end = log.indexOf("a1_end"); - const a2_start = log.indexOf("a2_start"); - assert.ok(a1_end >= 0 && a2_start >= 0, "bus_a handlers should have run"); - assert.ok( - a1_end < a2_start, - `bus_a (bus-serial): a1 should finish before a2 starts. Got: [${log.join(", ")}]` - ); + const a1_end = log.indexOf('a1_end') + const a2_start = log.indexOf('a2_start') + assert.ok(a1_end >= 0 && a2_start >= 0, 'bus_a handlers should have run') + assert.ok(a1_end < a2_start, `bus_a (bus-serial): a1 should finish before a2 starts. Got: [${log.join(', ')}]`) // Bus B: handlers must serialize (b1 finishes before b2 starts) - const b1_end = log.indexOf("b1_end"); - const b2_start = log.indexOf("b2_start"); - assert.ok(b1_end >= 0 && b2_start >= 0, "bus_b handlers should have run"); - assert.ok( - b1_end < b2_start, - `bus_b (bus-serial): b1 should finish before b2 starts. Got: [${log.join(", ")}]` - ); -}); + const b1_end = log.indexOf('b1_end') + const b2_start = log.indexOf('b2_start') + assert.ok(b1_end >= 0 && b2_start >= 0, 'bus_b handlers should have run') + assert.ok(b1_end < b2_start, `bus_b (bus-serial): b1 should finish before b2 starts. Got: [${log.join(', ')}]`) +}) -test("BUG: queue-jump two-bus global-serial handlers should serialize across both buses", async () => { - const TriggerEvent = BaseEvent.extend("QJ2GS_Trigger", {}); - const ChildEvent = BaseEvent.extend("QJ2GS_Child", {}); +test('BUG: queue-jump two-bus global-serial handlers should serialize across both buses', async () => { + const TriggerEvent = BaseEvent.extend('QJ2GS_Trigger', {}) + const ChildEvent = BaseEvent.extend('QJ2GS_Child', {}) // Global-serial means ONE handler at a time GLOBALLY, across all buses. - const bus_a = new EventBus("QJ2GS_A", { - event_concurrency: "bus-serial", - handler_concurrency: "global-serial" - }); - const bus_b = new EventBus("QJ2GS_B", { - event_concurrency: "bus-serial", - handler_concurrency: "global-serial" - }); - - const log: string[] = []; - - const a_handler_1 = async () => { log.push("a1_start"); await delay(15); log.push("a1_end"); }; - const a_handler_2 = async () => { log.push("a2_start"); await delay(5); log.push("a2_end"); }; - const b_handler_1 = async () => { log.push("b1_start"); await delay(15); log.push("b1_end"); }; - const b_handler_2 = async () => { log.push("b2_start"); await delay(5); log.push("b2_end"); }; + const bus_a = new EventBus('QJ2GS_A', { + event_concurrency: 'bus-serial', + handler_concurrency: 'global-serial', + }) + const bus_b = new EventBus('QJ2GS_B', { + event_concurrency: 'bus-serial', + handler_concurrency: 'global-serial', + }) + + const log: string[] = [] + + const a_handler_1 = async () => { + log.push('a1_start') + await delay(15) + log.push('a1_end') + } + const a_handler_2 = async () => { + log.push('a2_start') + await delay(5) + log.push('a2_end') + } + const b_handler_1 = async () => { + log.push('b1_start') + await delay(15) + log.push('b1_end') + } + const b_handler_2 = async () => { + log.push('b2_start') + await delay(5) + log.push('b2_end') + } bus_a.on(TriggerEvent, async (event: InstanceType) => { - const child = event.bus?.emit(ChildEvent({ event_timeout: null }))!; - bus_b.dispatch(child); - await child.done(); - }); - bus_a.on(ChildEvent, a_handler_1); - bus_a.on(ChildEvent, a_handler_2); - bus_b.on(ChildEvent, b_handler_1); - bus_b.on(ChildEvent, b_handler_2); - - const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })); - await top.done(); - await bus_a.waitUntilIdle(); - await bus_b.waitUntilIdle(); + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))! + bus_b.dispatch(child) + await child.done() + }) + bus_a.on(ChildEvent, a_handler_1) + bus_a.on(ChildEvent, a_handler_2) + bus_b.on(ChildEvent, b_handler_1) + bus_b.on(ChildEvent, b_handler_2) + + const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })) + await top.done() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() // With global-serial, no two handlers should overlap anywhere. // runImmediatelyAcrossBuses processes buses sequentially (bus_a first, @@ -898,136 +882,147 @@ test("BUG: queue-jump two-bus global-serial handlers should serialize across bot // a1_start, a2_start, a2_end, a1_end, b1_start, b2_start, b2_end, b1_end // Check: within bus_a, handlers are serial - const a1_end = log.indexOf("a1_end"); - const a2_start = log.indexOf("a2_start"); - assert.ok( - a1_end < a2_start, - `global-serial: a1 should finish before a2 starts. Got: [${log.join(", ")}]` - ); + const a1_end = log.indexOf('a1_end') + const a2_start = log.indexOf('a2_start') + assert.ok(a1_end < a2_start, `global-serial: a1 should finish before a2 starts. Got: [${log.join(', ')}]`) // Check: within bus_b, handlers are serial - const b1_end = log.indexOf("b1_end"); - const b2_start = log.indexOf("b2_start"); - assert.ok( - b1_end < b2_start, - `global-serial: b1 should finish before b2 starts. Got: [${log.join(", ")}]` - ); + const b1_end = log.indexOf('b1_end') + const b2_start = log.indexOf('b2_start') + assert.ok(b1_end < b2_start, `global-serial: b1 should finish before b2 starts. Got: [${log.join(', ')}]`) // Check: bus_a handlers all finish before bus_b handlers start // (because runImmediatelyAcrossBuses processes sequentially and // all share the global handler limiter) - const a2_end = log.indexOf("a2_end"); - const b1_start = log.indexOf("b1_start"); - assert.ok( - a2_end < b1_start, - `global-serial: bus_a should finish before bus_b starts. Got: [${log.join(", ")}]` - ); -}); - -test("BUG: queue-jump two-bus mixed: bus_a bus-serial, bus_b parallel", async () => { - const TriggerEvent = BaseEvent.extend("QJ2Mix1_Trigger", {}); - const ChildEvent = BaseEvent.extend("QJ2Mix1_Child", {}); - - const bus_a = new EventBus("QJ2Mix1_A", { - event_concurrency: "bus-serial", - handler_concurrency: "bus-serial" - }); - const bus_b = new EventBus("QJ2Mix1_B", { - event_concurrency: "bus-serial", - handler_concurrency: "parallel" // bus_b handlers should run in parallel - }); - - const log: string[] = []; - - const a_handler_1 = async () => { log.push("a1_start"); await delay(15); log.push("a1_end"); }; - const a_handler_2 = async () => { log.push("a2_start"); await delay(5); log.push("a2_end"); }; - const b_handler_1 = async () => { log.push("b1_start"); await delay(15); log.push("b1_end"); }; - const b_handler_2 = async () => { log.push("b2_start"); await delay(5); log.push("b2_end"); }; + const a2_end = log.indexOf('a2_end') + const b1_start = log.indexOf('b1_start') + assert.ok(a2_end < b1_start, `global-serial: bus_a should finish before bus_b starts. Got: [${log.join(', ')}]`) +}) + +test('BUG: queue-jump two-bus mixed: bus_a bus-serial, bus_b parallel', async () => { + const TriggerEvent = BaseEvent.extend('QJ2Mix1_Trigger', {}) + const ChildEvent = BaseEvent.extend('QJ2Mix1_Child', {}) + + const bus_a = new EventBus('QJ2Mix1_A', { + event_concurrency: 'bus-serial', + handler_concurrency: 'bus-serial', + }) + const bus_b = new EventBus('QJ2Mix1_B', { + event_concurrency: 'bus-serial', + handler_concurrency: 'parallel', // bus_b handlers should run in parallel + }) + + const log: string[] = [] + + const a_handler_1 = async () => { + log.push('a1_start') + await delay(15) + log.push('a1_end') + } + const a_handler_2 = async () => { + log.push('a2_start') + await delay(5) + log.push('a2_end') + } + const b_handler_1 = async () => { + log.push('b1_start') + await delay(15) + log.push('b1_end') + } + const b_handler_2 = async () => { + log.push('b2_start') + await delay(5) + log.push('b2_end') + } bus_a.on(TriggerEvent, async (event: InstanceType) => { - const child = event.bus?.emit(ChildEvent({ event_timeout: null }))!; - bus_b.dispatch(child); - await child.done(); - }); - bus_a.on(ChildEvent, a_handler_1); - bus_a.on(ChildEvent, a_handler_2); - bus_b.on(ChildEvent, b_handler_1); - bus_b.on(ChildEvent, b_handler_2); - - const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })); - await top.done(); - await bus_a.waitUntilIdle(); - await bus_b.waitUntilIdle(); + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))! + bus_b.dispatch(child) + await child.done() + }) + bus_a.on(ChildEvent, a_handler_1) + bus_a.on(ChildEvent, a_handler_2) + bus_b.on(ChildEvent, b_handler_1) + bus_b.on(ChildEvent, b_handler_2) + + const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })) + await top.done() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() // Bus A (bus-serial): a1 must finish before a2 starts - const a1_end = log.indexOf("a1_end"); - const a2_start = log.indexOf("a2_start"); - assert.ok( - a1_end < a2_start, - `bus_a (bus-serial): a1 should finish before a2 starts. Got: [${log.join(", ")}]` - ); + const a1_end = log.indexOf('a1_end') + const a2_start = log.indexOf('a2_start') + assert.ok(a1_end < a2_start, `bus_a (bus-serial): a1 should finish before a2 starts. Got: [${log.join(', ')}]`) // Bus B (parallel): both handlers should start before the slower one finishes. // b2 (5ms) starts and finishes before b1 (15ms) finishes. - const b1_end = log.indexOf("b1_end"); - const b2_start = log.indexOf("b2_start"); - assert.ok( - b2_start < b1_end, - `bus_b (parallel): b2 should start before b1 finishes. Got: [${log.join(", ")}]` - ); -}); - -test("BUG: queue-jump two-bus mixed: bus_a parallel, bus_b bus-serial", async () => { - const TriggerEvent = BaseEvent.extend("QJ2Mix2_Trigger", {}); - const ChildEvent = BaseEvent.extend("QJ2Mix2_Child", {}); - - const bus_a = new EventBus("QJ2Mix2_A", { - event_concurrency: "bus-serial", - handler_concurrency: "parallel" // bus_a handlers should run in parallel - }); - const bus_b = new EventBus("QJ2Mix2_B", { - event_concurrency: "bus-serial", - handler_concurrency: "bus-serial" - }); - - const log: string[] = []; - - const a_handler_1 = async () => { log.push("a1_start"); await delay(15); log.push("a1_end"); }; - const a_handler_2 = async () => { log.push("a2_start"); await delay(5); log.push("a2_end"); }; - const b_handler_1 = async () => { log.push("b1_start"); await delay(15); log.push("b1_end"); }; - const b_handler_2 = async () => { log.push("b2_start"); await delay(5); log.push("b2_end"); }; + const b1_end = log.indexOf('b1_end') + const b2_start = log.indexOf('b2_start') + assert.ok(b2_start < b1_end, `bus_b (parallel): b2 should start before b1 finishes. Got: [${log.join(', ')}]`) +}) + +test('BUG: queue-jump two-bus mixed: bus_a parallel, bus_b bus-serial', async () => { + const TriggerEvent = BaseEvent.extend('QJ2Mix2_Trigger', {}) + const ChildEvent = BaseEvent.extend('QJ2Mix2_Child', {}) + + const bus_a = new EventBus('QJ2Mix2_A', { + event_concurrency: 'bus-serial', + handler_concurrency: 'parallel', // bus_a handlers should run in parallel + }) + const bus_b = new EventBus('QJ2Mix2_B', { + event_concurrency: 'bus-serial', + handler_concurrency: 'bus-serial', + }) + + const log: string[] = [] + + const a_handler_1 = async () => { + log.push('a1_start') + await delay(15) + log.push('a1_end') + } + const a_handler_2 = async () => { + log.push('a2_start') + await delay(5) + log.push('a2_end') + } + const b_handler_1 = async () => { + log.push('b1_start') + await delay(15) + log.push('b1_end') + } + const b_handler_2 = async () => { + log.push('b2_start') + await delay(5) + log.push('b2_end') + } bus_a.on(TriggerEvent, async (event: InstanceType) => { - const child = event.bus?.emit(ChildEvent({ event_timeout: null }))!; - bus_b.dispatch(child); - await child.done(); - }); - bus_a.on(ChildEvent, a_handler_1); - bus_a.on(ChildEvent, a_handler_2); - bus_b.on(ChildEvent, b_handler_1); - bus_b.on(ChildEvent, b_handler_2); - - const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })); - await top.done(); - await bus_a.waitUntilIdle(); - await bus_b.waitUntilIdle(); + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))! + bus_b.dispatch(child) + await child.done() + }) + bus_a.on(ChildEvent, a_handler_1) + bus_a.on(ChildEvent, a_handler_2) + bus_b.on(ChildEvent, b_handler_1) + bus_b.on(ChildEvent, b_handler_2) + + const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })) + await top.done() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() // Bus A (parallel): handlers should overlap - const a1_end = log.indexOf("a1_end"); - const a2_start = log.indexOf("a2_start"); - assert.ok( - a2_start < a1_end, - `bus_a (parallel): a2 should start before a1 finishes. Got: [${log.join(", ")}]` - ); + const a1_end = log.indexOf('a1_end') + const a2_start = log.indexOf('a2_start') + assert.ok(a2_start < a1_end, `bus_a (parallel): a2 should start before a1 finishes. Got: [${log.join(', ')}]`) // Bus B (bus-serial): b1 must finish before b2 starts - const b1_end = log.indexOf("b1_end"); - const b2_start = log.indexOf("b2_start"); - assert.ok( - b1_end < b2_start, - `bus_b (bus-serial): b1 should finish before b2 starts. Got: [${log.join(", ")}]` - ); -}); + const b1_end = log.indexOf('b1_end') + const b2_start = log.indexOf('b2_start') + assert.ok(b1_end < b2_start, `bus_b (bus-serial): b1 should finish before b2 starts. Got: [${log.join(', ')}]`) +}) // ============================================================================= // Event-level concurrency on the forward bus. @@ -1042,185 +1037,179 @@ test("BUG: queue-jump two-bus mixed: bus_a parallel, bus_b bus-serial", async () // to the SAME limiter instance (global-serial shares one global limiter). // ============================================================================= -test("BUG: queue-jump should respect bus-serial event concurrency on forward bus", async () => { - const TriggerEvent = BaseEvent.extend("QJEvt_Trigger", {}); - const ChildEvent = BaseEvent.extend("QJEvt_Child", {}); - const SlowEvent = BaseEvent.extend("QJEvt_Slow", {}); +test('BUG: queue-jump should respect bus-serial event concurrency on forward bus', async () => { + const TriggerEvent = BaseEvent.extend('QJEvt_Trigger', {}) + const ChildEvent = BaseEvent.extend('QJEvt_Child', {}) + const SlowEvent = BaseEvent.extend('QJEvt_Slow', {}) - const bus_a = new EventBus("QJEvt_A", { - event_concurrency: "bus-serial", - handler_concurrency: "bus-serial" - }); - const bus_b = new EventBus("QJEvt_B", { - event_concurrency: "bus-serial", // only one event at a time on bus_b - handler_concurrency: "bus-serial" - }); + const bus_a = new EventBus('QJEvt_A', { + event_concurrency: 'bus-serial', + handler_concurrency: 'bus-serial', + }) + const bus_b = new EventBus('QJEvt_B', { + event_concurrency: 'bus-serial', // only one event at a time on bus_b + handler_concurrency: 'bus-serial', + }) - const log: string[] = []; + const log: string[] = [] // SlowEvent handler: occupies bus_b's event limiter for 40ms bus_b.on(SlowEvent, async () => { - log.push("slow_start"); - await delay(40); - log.push("slow_end"); - }); + log.push('slow_start') + await delay(40) + log.push('slow_end') + }) // ChildEvent handler on bus_b: should only run after SlowEvent finishes bus_b.on(ChildEvent, async () => { - log.push("child_b_start"); - await delay(5); - log.push("child_b_end"); - }); + log.push('child_b_start') + await delay(5) + log.push('child_b_end') + }) // ChildEvent handler on bus_a (so bus_a also processes the child) bus_a.on(ChildEvent, async () => { - log.push("child_a_start"); - await delay(5); - log.push("child_a_end"); - }); + log.push('child_a_start') + await delay(5) + log.push('child_a_end') + }) // TriggerEvent handler: dispatches child to both buses, awaits completion bus_a.on(TriggerEvent, async (event: InstanceType) => { - const child = event.bus?.emit(ChildEvent({ event_timeout: null }))!; - bus_b.dispatch(child); - await child.done(); - }); + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))! + bus_b.dispatch(child) + await child.done() + }) // Step 1: Start a slow event on bus_b so it's busy - bus_b.dispatch(SlowEvent({ event_timeout: null })); - await delay(5); // let slow_handler start + bus_b.dispatch(SlowEvent({ event_timeout: null })) + await delay(5) // let slow_handler start // Step 2: Trigger the queue-jump on bus_a - const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })); - await top.done(); - await bus_a.waitUntilIdle(); - await bus_b.waitUntilIdle(); + const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })) + await top.done() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() // The child on bus_b should start AFTER the slow event finishes, // because bus_b has bus-serial event concurrency. - const slow_end = log.indexOf("slow_end"); - const child_b_start = log.indexOf("child_b_start"); - assert.ok(slow_end >= 0, "slow event should have completed"); - assert.ok(child_b_start >= 0, "child on bus_b should have run"); + const slow_end = log.indexOf('slow_end') + const child_b_start = log.indexOf('child_b_start') + assert.ok(slow_end >= 0, 'slow event should have completed') + assert.ok(child_b_start >= 0, 'child on bus_b should have run') assert.ok( slow_end < child_b_start, - `bus_b (bus-serial events): child should wait for slow event to finish. ` + - `Got: [${log.join(", ")}]` - ); + `bus_b (bus-serial events): child should wait for slow event to finish. ` + `Got: [${log.join(', ')}]` + ) // The child on bus_a should have processed (queue-jumped, bypasses bus_a's event limiter) - assert.ok(log.includes("child_a_start"), "child on bus_a should have run"); - assert.ok(log.includes("child_a_end"), "child on bus_a should have completed"); -}); + assert.ok(log.includes('child_a_start'), 'child on bus_a should have run') + assert.ok(log.includes('child_a_end'), 'child on bus_a should have completed') +}) -test("queue-jump with fully-parallel forward bus starts immediately", async () => { +test('queue-jump with fully-parallel forward bus starts immediately', async () => { // When bus_b uses parallel event AND handler concurrency, the queue-jumped // child should start immediately even while another event's handler is running. - const TriggerEvent = BaseEvent.extend("QJFullPar_Trigger", {}); - const ChildEvent = BaseEvent.extend("QJFullPar_Child", {}); - const SlowEvent = BaseEvent.extend("QJFullPar_Slow", {}); + const TriggerEvent = BaseEvent.extend('QJFullPar_Trigger', {}) + const ChildEvent = BaseEvent.extend('QJFullPar_Child', {}) + const SlowEvent = BaseEvent.extend('QJFullPar_Slow', {}) - const bus_a = new EventBus("QJFullPar_A", { - event_concurrency: "bus-serial", - handler_concurrency: "bus-serial" - }); - const bus_b = new EventBus("QJFullPar_B", { - event_concurrency: "parallel", - handler_concurrency: "parallel" - }); + const bus_a = new EventBus('QJFullPar_A', { + event_concurrency: 'bus-serial', + handler_concurrency: 'bus-serial', + }) + const bus_b = new EventBus('QJFullPar_B', { + event_concurrency: 'parallel', + handler_concurrency: 'parallel', + }) - const log: string[] = []; + const log: string[] = [] bus_b.on(SlowEvent, async () => { - log.push("slow_start"); - await delay(40); - log.push("slow_end"); - }); + log.push('slow_start') + await delay(40) + log.push('slow_end') + }) bus_b.on(ChildEvent, async () => { - log.push("child_b_start"); - await delay(5); - log.push("child_b_end"); - }); + log.push('child_b_start') + await delay(5) + log.push('child_b_end') + }) bus_a.on(TriggerEvent, async (event: InstanceType) => { - const child = event.bus?.emit(ChildEvent({ event_timeout: null }))!; - bus_b.dispatch(child); - await child.done(); - }); - - bus_b.dispatch(SlowEvent({ event_timeout: null })); - await delay(5); - - const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })); - await top.done(); - await bus_a.waitUntilIdle(); - await bus_b.waitUntilIdle(); - - const slow_end = log.indexOf("slow_end"); - const child_b_start = log.indexOf("child_b_start"); - assert.ok(child_b_start >= 0, "child on bus_b should have run"); - assert.ok( - child_b_start < slow_end, - `bus_b (fully parallel): child should start before slow finishes. ` + - `Got: [${log.join(", ")}]` - ); -}); - -test("queue-jump with parallel events but bus-serial handlers on forward bus serializes handlers", async () => { + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))! + bus_b.dispatch(child) + await child.done() + }) + + bus_b.dispatch(SlowEvent({ event_timeout: null })) + await delay(5) + + const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })) + await top.done() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + const slow_end = log.indexOf('slow_end') + const child_b_start = log.indexOf('child_b_start') + assert.ok(child_b_start >= 0, 'child on bus_b should have run') + assert.ok(child_b_start < slow_end, `bus_b (fully parallel): child should start before slow finishes. ` + `Got: [${log.join(', ')}]`) +}) + +test('queue-jump with parallel events but bus-serial handlers on forward bus serializes handlers', async () => { // When bus_b has parallel event concurrency but bus-serial handler concurrency, // the child event can start processing immediately (event limiter is parallel), // but its handler must wait for the slow handler to release the handler limiter. - const TriggerEvent = BaseEvent.extend("QJEvtParHSer_Trigger", {}); - const ChildEvent = BaseEvent.extend("QJEvtParHSer_Child", {}); - const SlowEvent = BaseEvent.extend("QJEvtParHSer_Slow", {}); + const TriggerEvent = BaseEvent.extend('QJEvtParHSer_Trigger', {}) + const ChildEvent = BaseEvent.extend('QJEvtParHSer_Child', {}) + const SlowEvent = BaseEvent.extend('QJEvtParHSer_Slow', {}) - const bus_a = new EventBus("QJEvtParHSer_A", { - event_concurrency: "bus-serial", - handler_concurrency: "bus-serial" - }); - const bus_b = new EventBus("QJEvtParHSer_B", { - event_concurrency: "parallel", // events can start concurrently - handler_concurrency: "bus-serial" // but handlers serialize - }); + const bus_a = new EventBus('QJEvtParHSer_A', { + event_concurrency: 'bus-serial', + handler_concurrency: 'bus-serial', + }) + const bus_b = new EventBus('QJEvtParHSer_B', { + event_concurrency: 'parallel', // events can start concurrently + handler_concurrency: 'bus-serial', // but handlers serialize + }) - const log: string[] = []; + const log: string[] = [] bus_b.on(SlowEvent, async () => { - log.push("slow_start"); - await delay(40); - log.push("slow_end"); - }); + log.push('slow_start') + await delay(40) + log.push('slow_end') + }) bus_b.on(ChildEvent, async () => { - log.push("child_b_start"); - await delay(5); - log.push("child_b_end"); - }); + log.push('child_b_start') + await delay(5) + log.push('child_b_end') + }) bus_a.on(TriggerEvent, async (event: InstanceType) => { - const child = event.bus?.emit(ChildEvent({ event_timeout: null }))!; - bus_b.dispatch(child); - await child.done(); - }); + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))! + bus_b.dispatch(child) + await child.done() + }) - bus_b.dispatch(SlowEvent({ event_timeout: null })); - await delay(5); + bus_b.dispatch(SlowEvent({ event_timeout: null })) + await delay(5) - const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })); - await top.done(); - await bus_a.waitUntilIdle(); - await bus_b.waitUntilIdle(); + const top = bus_a.dispatch(TriggerEvent({ event_timeout: null })) + await top.done() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() // With bus-serial handler concurrency, child handler must wait for slow handler - const slow_end = log.indexOf("slow_end"); - const child_b_start = log.indexOf("child_b_start"); - assert.ok(child_b_start >= 0, "child on bus_b should have run"); + const slow_end = log.indexOf('slow_end') + const child_b_start = log.indexOf('child_b_start') + assert.ok(child_b_start >= 0, 'child on bus_b should have run') assert.ok( child_b_start > slow_end, - `bus_b (bus-serial handlers): child handler should wait for slow handler. ` + - `Got: [${log.join(", ")}]` - ); -}); + `bus_b (bus-serial handlers): child handler should wait for slow handler. ` + `Got: [${log.join(', ')}]` + ) +}) diff --git a/bubus-ts/tests/context_propagation.test.ts b/bubus-ts/tests/context_propagation.test.ts index e85ca8b..a597aea 100644 --- a/bubus-ts/tests/context_propagation.test.ts +++ b/bubus-ts/tests/context_propagation.test.ts @@ -1,349 +1,307 @@ -import assert from "node:assert/strict"; -import { test } from "node:test"; +import assert from 'node:assert/strict' +import { test } from 'node:test' -import { BaseEvent, EventBus } from "../src/index.js"; -import { async_local_storage, hasAsyncLocalStorage } from "../src/async_context.js"; +import { BaseEvent, EventBus } from '../src/index.js' +import { async_local_storage, hasAsyncLocalStorage } from '../src/async_context.js' type ContextStore = { - request_id?: string; - user_id?: string; - trace_id?: string; -}; + request_id?: string + user_id?: string + trace_id?: string +} -const SimpleEvent = BaseEvent.extend("SimpleEvent", {}); -const ChildEvent = BaseEvent.extend("ChildEvent", {}); +const SimpleEvent = BaseEvent.extend('SimpleEvent', {}) +const ChildEvent = BaseEvent.extend('ChildEvent', {}) -const skip_if_no_async_local_storage = !hasAsyncLocalStorage(); +const skip_if_no_async_local_storage = !hasAsyncLocalStorage() const require_async_local_storage = () => { - assert.ok(async_local_storage, "AsyncLocalStorage not available"); - return async_local_storage; -}; - -const get_store = (store: ContextStore | undefined | null): ContextStore => store ?? {}; - -test( - "context propagates to handler", - { skip: skip_if_no_async_local_storage }, - async () => { - const bus = new EventBus("ContextTestBus"); - const captured_values: ContextStore = {}; - const storage = require_async_local_storage(); - - bus.on(SimpleEvent, () => { - const store = storage.getStore() as ContextStore | undefined; - captured_values.request_id = store?.request_id; - captured_values.user_id = store?.user_id; - }); - - await storage.run( - { request_id: "req-12345", user_id: "user-abc" }, - async () => { - const event = bus.dispatch(SimpleEvent({})); - await event.done(); - } - ); - - assert.equal(captured_values.request_id, "req-12345"); - assert.equal(captured_values.user_id, "user-abc"); - } -); - -test( - "context propagates through nested handlers", - { skip: skip_if_no_async_local_storage }, - async () => { - const bus = new EventBus("NestedContextBus"); - const captured_parent: ContextStore = {}; - const captured_child: ContextStore = {}; - const storage = require_async_local_storage(); - - bus.on(SimpleEvent, async (event) => { - const store = storage.getStore() as ContextStore | undefined; - captured_parent.request_id = store?.request_id; - captured_parent.trace_id = store?.trace_id; - - const child = event.bus?.dispatch(ChildEvent({})); - if (child) { - await child.done(); - } - }); - - bus.on(ChildEvent, () => { - const store = storage.getStore() as ContextStore | undefined; - captured_child.request_id = store?.request_id; - captured_child.trace_id = store?.trace_id; - }); - - await storage.run( - { request_id: "req-nested-123", trace_id: "trace-xyz" }, - async () => { - const event = bus.dispatch(SimpleEvent({})); - await event.done(); - } - ); - - assert.equal(captured_parent.request_id, "req-nested-123"); - assert.equal(captured_parent.trace_id, "trace-xyz"); - assert.equal(captured_child.request_id, "req-nested-123"); - assert.equal(captured_child.trace_id, "trace-xyz"); - } -); - -test( - "context isolation between dispatches", - { skip: skip_if_no_async_local_storage }, - async () => { - const bus = new EventBus("IsolationTestBus"); - const captured_values: string[] = []; - const storage = require_async_local_storage(); - - bus.on(SimpleEvent, async () => { - const store = storage.getStore() as ContextStore | undefined; - captured_values.push(store?.request_id ?? ""); - }); - - const event_a = storage.run({ request_id: "req-A" }, () => bus.dispatch(SimpleEvent({}))); - const event_b = storage.run({ request_id: "req-B" }, () => bus.dispatch(SimpleEvent({}))); - - await event_a.done(); - await event_b.done(); + assert.ok(async_local_storage, 'AsyncLocalStorage not available') + return async_local_storage +} + +const get_store = (store: ContextStore | undefined | null): ContextStore => store ?? {} + +test('context propagates to handler', { skip: skip_if_no_async_local_storage }, async () => { + const bus = new EventBus('ContextTestBus') + const captured_values: ContextStore = {} + const storage = require_async_local_storage() + + bus.on(SimpleEvent, () => { + const store = storage.getStore() as ContextStore | undefined + captured_values.request_id = store?.request_id + captured_values.user_id = store?.user_id + }) + + await storage.run({ request_id: 'req-12345', user_id: 'user-abc' }, async () => { + const event = bus.dispatch(SimpleEvent({})) + await event.done() + }) + + assert.equal(captured_values.request_id, 'req-12345') + assert.equal(captured_values.user_id, 'user-abc') +}) + +test('context propagates through nested handlers', { skip: skip_if_no_async_local_storage }, async () => { + const bus = new EventBus('NestedContextBus') + const captured_parent: ContextStore = {} + const captured_child: ContextStore = {} + const storage = require_async_local_storage() + + bus.on(SimpleEvent, async (event) => { + const store = storage.getStore() as ContextStore | undefined + captured_parent.request_id = store?.request_id + captured_parent.trace_id = store?.trace_id + + const child = event.bus?.dispatch(ChildEvent({})) + if (child) { + await child.done() + } + }) + + bus.on(ChildEvent, () => { + const store = storage.getStore() as ContextStore | undefined + captured_child.request_id = store?.request_id + captured_child.trace_id = store?.trace_id + }) + + await storage.run({ request_id: 'req-nested-123', trace_id: 'trace-xyz' }, async () => { + const event = bus.dispatch(SimpleEvent({})) + await event.done() + }) + + assert.equal(captured_parent.request_id, 'req-nested-123') + assert.equal(captured_parent.trace_id, 'trace-xyz') + assert.equal(captured_child.request_id, 'req-nested-123') + assert.equal(captured_child.trace_id, 'trace-xyz') +}) + +test('context isolation between dispatches', { skip: skip_if_no_async_local_storage }, async () => { + const bus = new EventBus('IsolationTestBus') + const captured_values: string[] = [] + const storage = require_async_local_storage() + + bus.on(SimpleEvent, async () => { + const store = storage.getStore() as ContextStore | undefined + captured_values.push(store?.request_id ?? '') + }) + + const event_a = storage.run({ request_id: 'req-A' }, () => bus.dispatch(SimpleEvent({}))) + const event_b = storage.run({ request_id: 'req-B' }, () => bus.dispatch(SimpleEvent({}))) + + await event_a.done() + await event_b.done() + + assert.ok(captured_values.includes('req-A')) + assert.ok(captured_values.includes('req-B')) +}) + +test('context propagates to multiple handlers', { skip: skip_if_no_async_local_storage }, async () => { + const bus = new EventBus('ParallelContextBus') + const captured_values: string[] = [] + const storage = require_async_local_storage() + + bus.on(SimpleEvent, () => { + const store = storage.getStore() as ContextStore | undefined + captured_values.push(`h1:${store?.request_id ?? ''}`) + }) + + bus.on(SimpleEvent, () => { + const store = storage.getStore() as ContextStore | undefined + captured_values.push(`h2:${store?.request_id ?? ''}`) + }) + + await storage.run({ request_id: 'req-parallel' }, async () => { + const event = bus.dispatch(SimpleEvent({})) + await event.done() + }) + + assert.ok(captured_values.includes('h1:req-parallel')) + assert.ok(captured_values.includes('h2:req-parallel')) +}) + +test('context propagates through event forwarding', { skip: skip_if_no_async_local_storage }, async () => { + const bus_a = new EventBus('BusA') + const bus_b = new EventBus('BusB') + const captured_bus_a: ContextStore = {} + const captured_bus_b: ContextStore = {} + const storage = require_async_local_storage() + + bus_a.on(SimpleEvent, () => { + const store = storage.getStore() as ContextStore | undefined + captured_bus_a.request_id = store?.request_id + }) + + bus_b.on(SimpleEvent, () => { + const store = storage.getStore() as ContextStore | undefined + captured_bus_b.request_id = store?.request_id + }) + + bus_a.on('*', bus_b.dispatch) + + await storage.run({ request_id: 'req-forwarded' }, async () => { + const event = bus_a.dispatch(SimpleEvent({})) + await event.done() + await bus_b.waitUntilIdle() + }) + + assert.equal(captured_bus_a.request_id, 'req-forwarded') + assert.equal(captured_bus_b.request_id, 'req-forwarded') +}) + +test('handler can modify context without affecting parent', { skip: skip_if_no_async_local_storage }, async () => { + const bus = new EventBus('ModifyContextBus') + const storage = require_async_local_storage() + let parent_value_after_child = '' + + bus.on(SimpleEvent, async (event) => { + if (!storage.enterWith) { + throw new Error('AsyncLocalStorage.enterWith is required for this test') + } + storage.enterWith({ request_id: 'parent-value' }) + const child = event.bus?.dispatch(ChildEvent({})) + if (child) { + await child.done() + } + const store = get_store(storage.getStore() as ContextStore | undefined) + parent_value_after_child = store.request_id ?? '' + }) - assert.ok(captured_values.includes("req-A")); - assert.ok(captured_values.includes("req-B")); - } -); - -test( - "context propagates to multiple handlers", - { skip: skip_if_no_async_local_storage }, - async () => { - const bus = new EventBus("ParallelContextBus"); - const captured_values: string[] = []; - const storage = require_async_local_storage(); - - bus.on(SimpleEvent, () => { - const store = storage.getStore() as ContextStore | undefined; - captured_values.push(`h1:${store?.request_id ?? ""}`); - }); - - bus.on(SimpleEvent, () => { - const store = storage.getStore() as ContextStore | undefined; - captured_values.push(`h2:${store?.request_id ?? ""}`); - }); - - await storage.run({ request_id: "req-parallel" }, async () => { - const event = bus.dispatch(SimpleEvent({})); - await event.done(); - }); - - assert.ok(captured_values.includes("h1:req-parallel")); - assert.ok(captured_values.includes("h2:req-parallel")); - } -); - -test( - "context propagates through event forwarding", - { skip: skip_if_no_async_local_storage }, - async () => { - const bus_a = new EventBus("BusA"); - const bus_b = new EventBus("BusB"); - const captured_bus_a: ContextStore = {}; - const captured_bus_b: ContextStore = {}; - const storage = require_async_local_storage(); - - bus_a.on(SimpleEvent, () => { - const store = storage.getStore() as ContextStore | undefined; - captured_bus_a.request_id = store?.request_id; - }); - - bus_b.on(SimpleEvent, () => { - const store = storage.getStore() as ContextStore | undefined; - captured_bus_b.request_id = store?.request_id; - }); - - bus_a.on("*", bus_b.dispatch); - - await storage.run({ request_id: "req-forwarded" }, async () => { - const event = bus_a.dispatch(SimpleEvent({})); - await event.done(); - await bus_b.waitUntilIdle(); - }); - - assert.equal(captured_bus_a.request_id, "req-forwarded"); - assert.equal(captured_bus_b.request_id, "req-forwarded"); - } -); - -test( - "handler can modify context without affecting parent", - { skip: skip_if_no_async_local_storage }, - async () => { - const bus = new EventBus("ModifyContextBus"); - const storage = require_async_local_storage(); - let parent_value_after_child = ""; - - bus.on(SimpleEvent, async (event) => { - if (!storage.enterWith) { - throw new Error("AsyncLocalStorage.enterWith is required for this test"); - } - storage.enterWith({ request_id: "parent-value" }); - const child = event.bus?.dispatch(ChildEvent({})); - if (child) { - await child.done(); - } - const store = get_store(storage.getStore() as ContextStore | undefined); - parent_value_after_child = store.request_id ?? ""; - }); - - bus.on(ChildEvent, () => { - if (!storage.enterWith) { - throw new Error("AsyncLocalStorage.enterWith is required for this test"); - } - storage.enterWith({ request_id: "child-modified" }); - }); - - await storage.run({}, async () => { - const event = bus.dispatch(SimpleEvent({})); - await event.done(); - }); - - assert.equal(parent_value_after_child, "parent-value"); - } -); - -test( - "event parent_id tracking still works with context propagation", - { skip: skip_if_no_async_local_storage }, - async () => { - const bus = new EventBus("ParentIdTrackingBus"); - const storage = require_async_local_storage(); - let parent_event_id: string | undefined; - let child_event_parent_id: string | undefined; - - bus.on(SimpleEvent, async (event) => { - parent_event_id = event.event_id; - const child = event.bus?.dispatch(ChildEvent({})); - if (child) { - await child.done(); - } - }); - - bus.on(ChildEvent, (event) => { - child_event_parent_id = event.event_parent_id; - }); - - await storage.run({ request_id: "req-parent-tracking" }, async () => { - const event = bus.dispatch(SimpleEvent({})); - await event.done(); - }); - - assert.ok(parent_event_id); - assert.ok(child_event_parent_id); - assert.equal(child_event_parent_id, parent_event_id); - } -); - -test( - "dispatch context and parent_id both work together", - { skip: skip_if_no_async_local_storage }, - async () => { - const bus = new EventBus("CombinedContextBus"); - const storage = require_async_local_storage(); - const results: Record = {}; - - bus.on(SimpleEvent, async (event) => { - const store = storage.getStore() as ContextStore | undefined; - results.parent_request_id = store?.request_id; - results.parent_event_id = event.event_id; - const child = event.bus?.dispatch(ChildEvent({})); - if (child) { - await child.done(); - } - }); - - bus.on(ChildEvent, (event) => { - const store = storage.getStore() as ContextStore | undefined; - results.child_request_id = store?.request_id; - results.child_event_parent_id = event.event_parent_id; - }); - - await storage.run({ request_id: "req-combined-test" }, async () => { - const event = bus.dispatch(SimpleEvent({})); - await event.done(); - }); - - assert.equal(results.parent_request_id, "req-combined-test"); - assert.equal(results.child_request_id, "req-combined-test"); - assert.equal(results.child_event_parent_id, results.parent_event_id); - } -); - -test( - "deeply nested context and parent tracking", - { skip: skip_if_no_async_local_storage }, - async () => { - const bus = new EventBus("DeepNestingBus"); - const storage = require_async_local_storage(); - const results: Array<{ - level: number; - request_id?: string; - event_id: string; - parent_id?: string; - }> = []; - - const Level2Event = BaseEvent.extend("Level2Event", {}); - const Level3Event = BaseEvent.extend("Level3Event", {}); - - bus.on(SimpleEvent, async (event) => { - const store = storage.getStore() as ContextStore | undefined; - results.push({ - level: 1, - request_id: store?.request_id, - event_id: event.event_id, - parent_id: event.event_parent_id - }); - const child = event.bus?.dispatch(Level2Event({})); - if (child) { - await child.done(); - } - }); - - bus.on(Level2Event, async (event) => { - const store = storage.getStore() as ContextStore | undefined; - results.push({ - level: 2, - request_id: store?.request_id, - event_id: event.event_id, - parent_id: event.event_parent_id - }); - const child = event.bus?.dispatch(Level3Event({})); - if (child) { - await child.done(); - } - }); - - bus.on(Level3Event, (event) => { - const store = storage.getStore() as ContextStore | undefined; - results.push({ - level: 3, - request_id: store?.request_id, - event_id: event.event_id, - parent_id: event.event_parent_id - }); - }); - - await storage.run({ request_id: "req-deep-nesting" }, async () => { - const event = bus.dispatch(SimpleEvent({})); - await event.done(); - }); - - assert.equal(results.length, 3); - for (const result of results) { - assert.equal(result.request_id, "req-deep-nesting"); + bus.on(ChildEvent, () => { + if (!storage.enterWith) { + throw new Error('AsyncLocalStorage.enterWith is required for this test') + } + storage.enterWith({ request_id: 'child-modified' }) + }) + + await storage.run({}, async () => { + const event = bus.dispatch(SimpleEvent({})) + await event.done() + }) + + assert.equal(parent_value_after_child, 'parent-value') +}) + +test('event parent_id tracking still works with context propagation', { skip: skip_if_no_async_local_storage }, async () => { + const bus = new EventBus('ParentIdTrackingBus') + const storage = require_async_local_storage() + let parent_event_id: string | undefined + let child_event_parent_id: string | undefined + + bus.on(SimpleEvent, async (event) => { + parent_event_id = event.event_id + const child = event.bus?.dispatch(ChildEvent({})) + if (child) { + await child.done() + } + }) + + bus.on(ChildEvent, (event) => { + child_event_parent_id = event.event_parent_id + }) + + await storage.run({ request_id: 'req-parent-tracking' }, async () => { + const event = bus.dispatch(SimpleEvent({})) + await event.done() + }) + + assert.ok(parent_event_id) + assert.ok(child_event_parent_id) + assert.equal(child_event_parent_id, parent_event_id) +}) + +test('dispatch context and parent_id both work together', { skip: skip_if_no_async_local_storage }, async () => { + const bus = new EventBus('CombinedContextBus') + const storage = require_async_local_storage() + const results: Record = {} + + bus.on(SimpleEvent, async (event) => { + const store = storage.getStore() as ContextStore | undefined + results.parent_request_id = store?.request_id + results.parent_event_id = event.event_id + const child = event.bus?.dispatch(ChildEvent({})) + if (child) { + await child.done() + } + }) + + bus.on(ChildEvent, (event) => { + const store = storage.getStore() as ContextStore | undefined + results.child_request_id = store?.request_id + results.child_event_parent_id = event.event_parent_id + }) + + await storage.run({ request_id: 'req-combined-test' }, async () => { + const event = bus.dispatch(SimpleEvent({})) + await event.done() + }) + + assert.equal(results.parent_request_id, 'req-combined-test') + assert.equal(results.child_request_id, 'req-combined-test') + assert.equal(results.child_event_parent_id, results.parent_event_id) +}) + +test('deeply nested context and parent tracking', { skip: skip_if_no_async_local_storage }, async () => { + const bus = new EventBus('DeepNestingBus') + const storage = require_async_local_storage() + const results: Array<{ + level: number + request_id?: string + event_id: string + parent_id?: string + }> = [] + + const Level2Event = BaseEvent.extend('Level2Event', {}) + const Level3Event = BaseEvent.extend('Level3Event', {}) + + bus.on(SimpleEvent, async (event) => { + const store = storage.getStore() as ContextStore | undefined + results.push({ + level: 1, + request_id: store?.request_id, + event_id: event.event_id, + parent_id: event.event_parent_id, + }) + const child = event.bus?.dispatch(Level2Event({})) + if (child) { + await child.done() + } + }) + + bus.on(Level2Event, async (event) => { + const store = storage.getStore() as ContextStore | undefined + results.push({ + level: 2, + request_id: store?.request_id, + event_id: event.event_id, + parent_id: event.event_parent_id, + }) + const child = event.bus?.dispatch(Level3Event({})) + if (child) { + await child.done() } - assert.equal(results[0].parent_id, undefined); - assert.equal(results[1].parent_id, results[0].event_id); - assert.equal(results[2].parent_id, results[1].event_id); + }) + + bus.on(Level3Event, (event) => { + const store = storage.getStore() as ContextStore | undefined + results.push({ + level: 3, + request_id: store?.request_id, + event_id: event.event_id, + parent_id: event.event_parent_id, + }) + }) + + await storage.run({ request_id: 'req-deep-nesting' }, async () => { + const event = bus.dispatch(SimpleEvent({})) + await event.done() + }) + + assert.equal(results.length, 3) + for (const result of results) { + assert.equal(result.request_id, 'req-deep-nesting') } -); + assert.equal(results[0].parent_id, undefined) + assert.equal(results[1].parent_id, results[0].event_id) + assert.equal(results[2].parent_id, results[1].event_id) +}) diff --git a/bubus-ts/tests/debounce.test.ts b/bubus-ts/tests/debounce.test.ts index d45de1f..54bd49f 100644 --- a/bubus-ts/tests/debounce.test.ts +++ b/bubus-ts/tests/debounce.test.ts @@ -1,134 +1,112 @@ -import assert from "node:assert/strict"; -import { test } from "node:test"; +import assert from 'node:assert/strict' +import { test } from 'node:test' -import { z } from "zod"; +import { z } from 'zod' -import { BaseEvent, EventBus } from "../src/index.js"; +import { BaseEvent, EventBus } from '../src/index.js' -const ParentEvent = BaseEvent.extend("ParentEvent", {}); +const ParentEvent = BaseEvent.extend('ParentEvent', {}) -const ScreenshotEvent = BaseEvent.extend("ScreenshotEvent", { target_id: z.string() }); +const ScreenshotEvent = BaseEvent.extend('ScreenshotEvent', { target_id: z.string() }) -const SyncEvent = BaseEvent.extend("SyncEvent", {}); +const SyncEvent = BaseEvent.extend('SyncEvent', {}) -test("simple debounce uses recent history or dispatches new", async () => { - const bus = new EventBus("DebounceBus"); +test('simple debounce uses recent history or dispatches new', async () => { + const bus = new EventBus('DebounceBus') - const parent_event = bus.dispatch(ParentEvent({})); - await parent_event.done(); + const parent_event = bus.dispatch(ParentEvent({})) + await parent_event.done() - const child_event = parent_event.bus?.emit(ScreenshotEvent({ target_id: "tab-1" })); - assert.ok(child_event); - await child_event.done(); + const child_event = parent_event.bus?.emit(ScreenshotEvent({ target_id: 'tab-1' })) + assert.ok(child_event) + await child_event.done() const reused_event = (await bus.find(ScreenshotEvent, { past: 10, future: false, - child_of: parent_event - })) ?? (await bus.dispatch(ScreenshotEvent({ target_id: "fallback" })).done()); + child_of: parent_event, + })) ?? (await bus.dispatch(ScreenshotEvent({ target_id: 'fallback' })).done()) - assert.equal(reused_event.event_id, child_event.event_id); - assert.equal(reused_event.event_parent_id, parent_event.event_id); -}); + assert.equal(reused_event.event_id, child_event.event_id) + assert.equal(reused_event.event_parent_id, parent_event.event_id) +}) -test("advanced debounce prefers history, then waits for future, then dispatches", async () => { - const bus = new EventBus("AdvancedDebounceBus"); +test('advanced debounce prefers history, then waits for future, then dispatches', async () => { + const bus = new EventBus('AdvancedDebounceBus') - const pending_event = bus.find(SyncEvent, { past: false, future: 0.5 }); + const pending_event = bus.find(SyncEvent, { past: false, future: 0.5 }) setTimeout(() => { - bus.dispatch(SyncEvent({})); - }, 50); + bus.dispatch(SyncEvent({})) + }, 50) const resolved_event = - (await bus.find(SyncEvent, { past: true, future: false })) ?? - (await pending_event) ?? - (await bus.dispatch(SyncEvent({})).done()); + (await bus.find(SyncEvent, { past: true, future: false })) ?? (await pending_event) ?? (await bus.dispatch(SyncEvent({})).done()) - assert.ok(resolved_event); - assert.equal(resolved_event.event_type, "SyncEvent"); -}); + assert.ok(resolved_event) + assert.equal(resolved_event.event_type, 'SyncEvent') +}) -test("debounce returns existing fresh event", async () => { - const bus = new EventBus("DebounceFreshBus"); +test('debounce returns existing fresh event', async () => { + const bus = new EventBus('DebounceFreshBus') - const original = await bus.dispatch(ScreenshotEvent({ target_id: "tab1" })).done(); + const original = await bus.dispatch(ScreenshotEvent({ target_id: 'tab1' })).done() const is_fresh = (event: typeof original): boolean => { - const completed_at = event.event_completed_at ? Date.parse(event.event_completed_at) : 0; - return Date.now() - completed_at < 5000; - }; + const completed_at = event.event_completed_at ? Date.parse(event.event_completed_at) : 0 + return Date.now() - completed_at < 5000 + } const result = - (await bus.find( - ScreenshotEvent, - (event) => event.target_id === "tab1" && is_fresh(event), - { past: true, future: false } - )) ?? (await bus.dispatch(ScreenshotEvent({ target_id: "tab1" })).done()); + (await bus.find(ScreenshotEvent, (event) => event.target_id === 'tab1' && is_fresh(event), { past: true, future: false })) ?? + (await bus.dispatch(ScreenshotEvent({ target_id: 'tab1' })).done()) - assert.equal(result.event_id, original.event_id); -}); + assert.equal(result.event_id, original.event_id) +}) -test("debounce dispatches new when no match", async () => { - const bus = new EventBus("DebounceNoMatchBus"); +test('debounce dispatches new when no match', async () => { + const bus = new EventBus('DebounceNoMatchBus') const result = - (await bus.find( - ScreenshotEvent, - (event) => event.target_id === "tab1", - { past: true, future: false } - )) ?? (await bus.dispatch(ScreenshotEvent({ target_id: "tab1" })).done()); + (await bus.find(ScreenshotEvent, (event) => event.target_id === 'tab1', { past: true, future: false })) ?? + (await bus.dispatch(ScreenshotEvent({ target_id: 'tab1' })).done()) - assert.ok(result); - assert.equal(result.target_id, "tab1"); - assert.equal(result.event_status, "completed"); -}); + assert.ok(result) + assert.equal(result.target_id, 'tab1') + assert.equal(result.event_status, 'completed') +}) -test("debounce dispatches new when existing is stale", async () => { - const bus = new EventBus("DebounceStaleBus"); +test('debounce dispatches new when existing is stale', async () => { + const bus = new EventBus('DebounceStaleBus') - await bus.dispatch(ScreenshotEvent({ target_id: "tab1" })).done(); + await bus.dispatch(ScreenshotEvent({ target_id: 'tab1' })).done() const result = - (await bus.find( - ScreenshotEvent, - (event) => event.target_id === "tab1" && false, - { past: true, future: false } - )) ?? (await bus.dispatch(ScreenshotEvent({ target_id: "tab1" })).done()); - - assert.ok(result); - const screenshots = Array.from(bus.event_history.values()).filter( - (event) => event.event_type === "ScreenshotEvent" - ); - assert.equal(screenshots.length, 2); -}); - -test("debounce or-chain handles sequential lookups without blocking", async () => { - const bus = new EventBus("DebounceSequentialBus"); + (await bus.find(ScreenshotEvent, (event) => event.target_id === 'tab1' && false, { past: true, future: false })) ?? + (await bus.dispatch(ScreenshotEvent({ target_id: 'tab1' })).done()) + + assert.ok(result) + const screenshots = Array.from(bus.event_history.values()).filter((event) => event.event_type === 'ScreenshotEvent') + assert.equal(screenshots.length, 2) +}) + +test('debounce or-chain handles sequential lookups without blocking', async () => { + const bus = new EventBus('DebounceSequentialBus') const result1 = - (await bus.find( - ScreenshotEvent, - (event) => event.target_id === "tab1", - { past: true, future: false } - )) ?? (await bus.dispatch(ScreenshotEvent({ target_id: "tab1" })).done()); + (await bus.find(ScreenshotEvent, (event) => event.target_id === 'tab1', { past: true, future: false })) ?? + (await bus.dispatch(ScreenshotEvent({ target_id: 'tab1' })).done()) const result2 = - (await bus.find( - ScreenshotEvent, - (event) => event.target_id === "tab1", - { past: true, future: false } - )) ?? (await bus.dispatch(ScreenshotEvent({ target_id: "tab1" })).done()); + (await bus.find(ScreenshotEvent, (event) => event.target_id === 'tab1', { past: true, future: false })) ?? + (await bus.dispatch(ScreenshotEvent({ target_id: 'tab1' })).done()) const result3 = - (await bus.find( - ScreenshotEvent, - (event) => event.target_id === "tab2", - { past: true, future: false } - )) ?? (await bus.dispatch(ScreenshotEvent({ target_id: "tab2" })).done()); - - assert.equal(result1.event_id, result2.event_id); - assert.notEqual(result1.event_id, result3.event_id); - assert.equal(result3.target_id, "tab2"); -}); + (await bus.find(ScreenshotEvent, (event) => event.target_id === 'tab2', { past: true, future: false })) ?? + (await bus.dispatch(ScreenshotEvent({ target_id: 'tab2' })).done()) + + assert.equal(result1.event_id, result2.event_id) + assert.notEqual(result1.event_id, result3.event_id) + assert.equal(result3.target_id, 'tab2') +}) diff --git a/bubus-ts/tests/error_handling.test.ts b/bubus-ts/tests/error_handling.test.ts index b014703..a3ca425 100644 --- a/bubus-ts/tests/error_handling.test.ts +++ b/bubus-ts/tests/error_handling.test.ts @@ -1,228 +1,221 @@ -import assert from "node:assert/strict"; -import { test } from "node:test"; +import assert from 'node:assert/strict' +import { test } from 'node:test' -import { BaseEvent, EventBus } from "../src/index.js"; +import { BaseEvent, EventBus } from '../src/index.js' -const TestEvent = BaseEvent.extend("TestEvent", {}); +const TestEvent = BaseEvent.extend('TestEvent', {}) const delay = (ms: number): Promise => new Promise((resolve) => { - setTimeout(resolve, ms); - }); + setTimeout(resolve, ms) + }) -test("handler error is captured and does not prevent other handlers from running", async () => { - const bus = new EventBus("ErrorIsolationBus"); - const results: string[] = []; +test('handler error is captured and does not prevent other handlers from running', async () => { + const bus = new EventBus('ErrorIsolationBus') + const results: string[] = [] const failing_handler = (): string => { - throw new Error("Expected to fail - testing error handling"); - }; + throw new Error('Expected to fail - testing error handling') + } const working_handler = (): string => { - results.push("success"); - return "worked"; - }; + results.push('success') + return 'worked' + } - bus.on(TestEvent, failing_handler); - bus.on(TestEvent, working_handler); + bus.on(TestEvent, failing_handler) + bus.on(TestEvent, working_handler) - const event = bus.dispatch(TestEvent({})); - await event.done(); + const event = bus.dispatch(TestEvent({})) + await event.done() // Both handlers should have run and produced results - assert.equal(event.event_results.size, 2); - - const failing_result = Array.from(event.event_results.values()).find( - (r) => r.handler_name === "failing_handler" - ); - assert.ok(failing_result, "failing_handler result should exist"); - assert.equal(failing_result.status, "error"); - assert.ok(failing_result.error instanceof Error); - assert.ok( - (failing_result.error as Error).message.includes("Expected to fail"), - "error message should contain the thrown message" - ); - - const working_result = Array.from(event.event_results.values()).find( - (r) => r.handler_name === "working_handler" - ); - assert.ok(working_result, "working_handler result should exist"); - assert.equal(working_result.status, "completed"); - assert.equal(working_result.result, "worked"); + assert.equal(event.event_results.size, 2) + + const failing_result = Array.from(event.event_results.values()).find((r) => r.handler_name === 'failing_handler') + assert.ok(failing_result, 'failing_handler result should exist') + assert.equal(failing_result.status, 'error') + assert.ok(failing_result.error instanceof Error) + assert.ok((failing_result.error as Error).message.includes('Expected to fail'), 'error message should contain the thrown message') + + const working_result = Array.from(event.event_results.values()).find((r) => r.handler_name === 'working_handler') + assert.ok(working_result, 'working_handler result should exist') + assert.equal(working_result.status, 'completed') + assert.equal(working_result.result, 'worked') // The working handler actually ran - assert.deepEqual(results, ["success"]); -}); + assert.deepEqual(results, ['success']) +}) -test("event.event_errors collects handler errors", async () => { - const bus = new EventBus("ErrorCollectionBus"); +test('event.event_errors collects handler errors', async () => { + const bus = new EventBus('ErrorCollectionBus') const handler_a = (): void => { - throw new Error("error_a"); - }; + throw new Error('error_a') + } const handler_b = (): void => { - throw new TypeError("error_b"); - }; + throw new TypeError('error_b') + } const handler_c = (): string => { - return "ok"; - }; + return 'ok' + } - bus.on(TestEvent, handler_a); - bus.on(TestEvent, handler_b); - bus.on(TestEvent, handler_c); + bus.on(TestEvent, handler_a) + bus.on(TestEvent, handler_b) + bus.on(TestEvent, handler_c) - const event = bus.dispatch(TestEvent({})); - await event.done(); + const event = bus.dispatch(TestEvent({})) + await event.done() // Two errors should be collected - assert.equal(event.event_errors.length, 2); - const error_messages = event.event_errors.map((e) => (e as Error).message); - assert.ok(error_messages.includes("error_a")); - assert.ok(error_messages.includes("error_b")); -}); + assert.equal(event.event_errors.length, 2) + const error_messages = event.event_errors.map((e) => (e as Error).message) + assert.ok(error_messages.includes('error_a')) + assert.ok(error_messages.includes('error_b')) +}) -test("handler error does not prevent event completion", async () => { - const bus = new EventBus("ErrorCompletionBus"); +test('handler error does not prevent event completion', async () => { + const bus = new EventBus('ErrorCompletionBus') bus.on(TestEvent, () => { - throw new Error("handler failed"); - }); + throw new Error('handler failed') + }) - const event = bus.dispatch(TestEvent({})); - await event.done(); + const event = bus.dispatch(TestEvent({})) + await event.done() // Event should still complete even though handler errored - assert.equal(event.event_status, "completed"); - assert.ok(event.event_completed_at, "event_completed_at should be set"); - assert.equal(event.event_errors.length, 1); -}); + assert.equal(event.event_status, 'completed') + assert.ok(event.event_completed_at, 'event_completed_at should be set') + assert.equal(event.event_errors.length, 1) +}) -test("error in one event does not affect subsequent queued events", async () => { - const bus = new EventBus("ErrorQueueBus"); - const Event1 = BaseEvent.extend("Event1", {}); - const Event2 = BaseEvent.extend("Event2", {}); +test('error in one event does not affect subsequent queued events', async () => { + const bus = new EventBus('ErrorQueueBus') + const Event1 = BaseEvent.extend('Event1', {}) + const Event2 = BaseEvent.extend('Event2', {}) bus.on(Event1, () => { - throw new Error("event1 handler failed"); - }); + throw new Error('event1 handler failed') + }) bus.on(Event2, () => { - return "event2 ok"; - }); + return 'event2 ok' + }) - const event_1 = bus.dispatch(Event1({})); - const event_2 = bus.dispatch(Event2({})); + const event_1 = bus.dispatch(Event1({})) + const event_2 = bus.dispatch(Event2({})) - await bus.waitUntilIdle(); + await bus.waitUntilIdle() // Event1 completed with error - assert.equal(event_1.event_status, "completed"); - assert.equal(event_1.event_errors.length, 1); + assert.equal(event_1.event_status, 'completed') + assert.equal(event_1.event_errors.length, 1) // Event2 completed successfully and was not affected by Event1's error - assert.equal(event_2.event_status, "completed"); - assert.equal(event_2.event_errors.length, 0); - const result = Array.from(event_2.event_results.values())[0]; - assert.equal(result.status, "completed"); - assert.equal(result.result, "event2 ok"); -}); + assert.equal(event_2.event_status, 'completed') + assert.equal(event_2.event_errors.length, 0) + const result = Array.from(event_2.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.equal(result.result, 'event2 ok') +}) -test("async handler rejection is captured as error", async () => { - const bus = new EventBus("AsyncErrorBus"); +test('async handler rejection is captured as error', async () => { + const bus = new EventBus('AsyncErrorBus') const async_failing_handler = async (): Promise => { - await delay(1); - throw new Error("async rejection"); - }; + await delay(1) + throw new Error('async rejection') + } - bus.on(TestEvent, async_failing_handler); + bus.on(TestEvent, async_failing_handler) - const event = bus.dispatch(TestEvent({})); - await event.done(); + const event = bus.dispatch(TestEvent({})) + await event.done() - assert.equal(event.event_status, "completed"); - assert.equal(event.event_errors.length, 1); - assert.ok((event.event_errors[0] as Error).message.includes("async rejection")); + assert.equal(event.event_status, 'completed') + assert.equal(event.event_errors.length, 1) + assert.ok((event.event_errors[0] as Error).message.includes('async rejection')) - const result = Array.from(event.event_results.values())[0]; - assert.equal(result.status, "error"); -}); + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'error') +}) -test("error in forwarded event handler does not block source bus", async () => { - const bus_a = new EventBus("ErrorForwardA"); - const bus_b = new EventBus("ErrorForwardB"); +test('error in forwarded event handler does not block source bus', async () => { + const bus_a = new EventBus('ErrorForwardA') + const bus_b = new EventBus('ErrorForwardB') - const ForwardEvent = BaseEvent.extend("ForwardEvent", {}); + const ForwardEvent = BaseEvent.extend('ForwardEvent', {}) // Forward from A to B - bus_a.on("*", bus_b.dispatch); + bus_a.on('*', bus_b.dispatch) // Handler on bus_b throws bus_b.on(ForwardEvent, () => { - throw new Error("bus_b handler failed"); - }); + throw new Error('bus_b handler failed') + }) // Handler on bus_a succeeds bus_a.on(ForwardEvent, () => { - return "bus_a ok"; - }); + return 'bus_a ok' + }) - const event = bus_a.dispatch(ForwardEvent({})); - await event.done(); + const event = bus_a.dispatch(ForwardEvent({})) + await event.done() - assert.equal(event.event_status, "completed"); + assert.equal(event.event_status, 'completed') // bus_a's handler succeeded const bus_a_result = Array.from(event.event_results.values()).find( - (r) => r.eventbus_name === "ErrorForwardA" && r.handler_name !== "dispatch" - ); - assert.ok(bus_a_result); - assert.equal(bus_a_result.status, "completed"); - assert.equal(bus_a_result.result, "bus_a ok"); + (r) => r.eventbus_name === 'ErrorForwardA' && r.handler_name !== 'dispatch' + ) + assert.ok(bus_a_result) + assert.equal(bus_a_result.status, 'completed') + assert.equal(bus_a_result.result, 'bus_a ok') // bus_b's handler errored const bus_b_result = Array.from(event.event_results.values()).find( - (r) => r.eventbus_name === "ErrorForwardB" && r.handler_name !== "dispatch" - ); - assert.ok(bus_b_result); - assert.equal(bus_b_result.status, "error"); + (r) => r.eventbus_name === 'ErrorForwardB' && r.handler_name !== 'dispatch' + ) + assert.ok(bus_b_result) + assert.equal(bus_b_result.status, 'error') // Both errors tracked - assert.ok(event.event_errors.length >= 1); -}); + assert.ok(event.event_errors.length >= 1) +}) -test("event with no handlers completes without errors", async () => { - const bus = new EventBus("NoHandlerBus"); - const OrphanEvent = BaseEvent.extend("OrphanEvent", {}); +test('event with no handlers completes without errors', async () => { + const bus = new EventBus('NoHandlerBus') + const OrphanEvent = BaseEvent.extend('OrphanEvent', {}) - const event = bus.dispatch(OrphanEvent({})); - await event.done(); + const event = bus.dispatch(OrphanEvent({})) + await event.done() - assert.equal(event.event_status, "completed"); - assert.equal(event.event_results.size, 0); - assert.equal(event.event_errors.length, 0); -}); + assert.equal(event.event_status, 'completed') + assert.equal(event.event_results.size, 0) + assert.equal(event.event_errors.length, 0) +}) -test("error handler result fields are populated correctly", async () => { - const bus = new EventBus("ErrorFieldsBus"); +test('error handler result fields are populated correctly', async () => { + const bus = new EventBus('ErrorFieldsBus') const my_handler = (): void => { - throw new RangeError("out of range"); - }; - - bus.on(TestEvent, my_handler); - - const event = bus.dispatch(TestEvent({})); - await event.done(); - - const result = Array.from(event.event_results.values())[0]; - assert.equal(result.status, "error"); - assert.equal(result.handler_name, "my_handler"); - assert.equal(result.eventbus_name, "ErrorFieldsBus"); - assert.ok(result.error instanceof RangeError); - assert.equal((result.error as RangeError).message, "out of range"); - assert.ok(result.started_at, "started_at should be set"); - assert.ok(result.completed_at, "completed_at should be set even on error"); -}); + throw new RangeError('out of range') + } + + bus.on(TestEvent, my_handler) + + const event = bus.dispatch(TestEvent({})) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'error') + assert.equal(result.handler_name, 'my_handler') + assert.equal(result.eventbus_name, 'ErrorFieldsBus') + assert.ok(result.error instanceof RangeError) + assert.equal((result.error as RangeError).message, 'out of range') + assert.ok(result.started_at, 'started_at should be set') + assert.ok(result.completed_at, 'completed_at should be set even on error') +}) diff --git a/bubus-ts/tests/event_bus_proxy.test.ts b/bubus-ts/tests/event_bus_proxy.test.ts index eba95e3..02e8159 100644 --- a/bubus-ts/tests/event_bus_proxy.test.ts +++ b/bubus-ts/tests/event_bus_proxy.test.ts @@ -1,241 +1,229 @@ -import assert from "node:assert/strict"; -import { test } from "node:test"; +import assert from 'node:assert/strict' +import { test } from 'node:test' -import { BaseEvent, EventBus } from "../src/index.js"; +import { BaseEvent, EventBus } from '../src/index.js' -const MainEvent = BaseEvent.extend("MainEvent", {}); -const ChildEvent = BaseEvent.extend("ChildEvent", {}); -const GrandchildEvent = BaseEvent.extend("GrandchildEvent", {}); +const MainEvent = BaseEvent.extend('MainEvent', {}) +const ChildEvent = BaseEvent.extend('ChildEvent', {}) +const GrandchildEvent = BaseEvent.extend('GrandchildEvent', {}) -test("event.bus inside handler returns the dispatching bus", async () => { - const bus = new EventBus("TestBus"); +test('event.bus inside handler returns the dispatching bus', async () => { + const bus = new EventBus('TestBus') - let handler_called = false; - let handler_bus_name: string | undefined; - let child_event: BaseEvent | undefined; + let handler_called = false + let handler_bus_name: string | undefined + let child_event: BaseEvent | undefined bus.on(MainEvent, (event) => { - handler_called = true; - handler_bus_name = event.bus?.name; + handler_called = true + handler_bus_name = event.bus?.name // Should be able to dispatch child events using event.bus - child_event = event.bus?.emit(ChildEvent({})); - }); + child_event = event.bus?.emit(ChildEvent({})) + }) - bus.on(ChildEvent, () => {}); + bus.on(ChildEvent, () => {}) - bus.dispatch(MainEvent({})); - await bus.waitUntilIdle(); + bus.dispatch(MainEvent({})) + await bus.waitUntilIdle() - assert.equal(handler_called, true); - assert.equal(handler_bus_name, "TestBus"); - assert.ok(child_event, "child event should have been dispatched via event.bus"); - assert.equal(child_event!.event_type, "ChildEvent"); -}); + assert.equal(handler_called, true) + assert.equal(handler_bus_name, 'TestBus') + assert.ok(child_event, 'child event should have been dispatched via event.bus') + assert.equal(child_event!.event_type, 'ChildEvent') +}) -test("event.bus returns correct bus when multiple buses exist", async () => { - const bus1 = new EventBus("Bus1"); - const bus2 = new EventBus("Bus2"); +test('event.bus returns correct bus when multiple buses exist', async () => { + const bus1 = new EventBus('Bus1') + const bus2 = new EventBus('Bus2') - let handler1_bus_name: string | undefined; - let handler2_bus_name: string | undefined; + let handler1_bus_name: string | undefined + let handler2_bus_name: string | undefined bus1.on(MainEvent, (event) => { - handler1_bus_name = event.bus?.name; - }); + handler1_bus_name = event.bus?.name + }) bus2.on(MainEvent, (event) => { - handler2_bus_name = event.bus?.name; - }); + handler2_bus_name = event.bus?.name + }) - bus1.dispatch(MainEvent({})); - await bus1.waitUntilIdle(); + bus1.dispatch(MainEvent({})) + await bus1.waitUntilIdle() - bus2.dispatch(MainEvent({})); - await bus2.waitUntilIdle(); + bus2.dispatch(MainEvent({})) + await bus2.waitUntilIdle() - assert.equal(handler1_bus_name, "Bus1"); - assert.equal(handler2_bus_name, "Bus2"); -}); + assert.equal(handler1_bus_name, 'Bus1') + assert.equal(handler2_bus_name, 'Bus2') +}) -test("event.bus reflects the currently-processing bus when forwarded", async () => { - const bus1 = new EventBus("Bus1"); - const bus2 = new EventBus("Bus2"); +test('event.bus reflects the currently-processing bus when forwarded', async () => { + const bus1 = new EventBus('Bus1') + const bus2 = new EventBus('Bus2') // Forward all events from bus1 to bus2 - bus1.on("*", bus2.dispatch); + bus1.on('*', bus2.dispatch) - let bus2_handler_bus_name: string | undefined; + let bus2_handler_bus_name: string | undefined bus2.on(MainEvent, (event) => { - bus2_handler_bus_name = event.bus?.name; - }); + bus2_handler_bus_name = event.bus?.name + }) - const event = bus1.dispatch(MainEvent({})); - await bus1.waitUntilIdle(); - await bus2.waitUntilIdle(); + const event = bus1.dispatch(MainEvent({})) + await bus1.waitUntilIdle() + await bus2.waitUntilIdle() // The handler on bus2 should see bus2 as event.bus, not bus1 - assert.equal(bus2_handler_bus_name, "Bus2"); - assert.deepEqual(event.event_path, ["Bus1", "Bus2"]); -}); + assert.equal(bus2_handler_bus_name, 'Bus2') + assert.deepEqual(event.event_path, ['Bus1', 'Bus2']) +}) -test("event.bus in nested handlers sees the same bus", async () => { - const bus = new EventBus("MainBus"); +test('event.bus in nested handlers sees the same bus', async () => { + const bus = new EventBus('MainBus') - let outer_bus_name: string | undefined; - let inner_bus_name: string | undefined; + let outer_bus_name: string | undefined + let inner_bus_name: string | undefined bus.on(MainEvent, async (event) => { - outer_bus_name = event.bus?.name; + outer_bus_name = event.bus?.name // Dispatch child using event.bus - const child = event.bus!.emit(ChildEvent({})); - await child.done(); - }); + const child = event.bus!.emit(ChildEvent({})) + await child.done() + }) bus.on(ChildEvent, (event) => { - inner_bus_name = event.bus?.name; - }); + inner_bus_name = event.bus?.name + }) - const parent = bus.dispatch(MainEvent({})); - await parent.done(); + const parent = bus.dispatch(MainEvent({})) + await parent.done() - assert.equal(outer_bus_name, "MainBus"); - assert.equal(inner_bus_name, "MainBus"); -}); + assert.equal(outer_bus_name, 'MainBus') + assert.equal(inner_bus_name, 'MainBus') +}) -test("event.bus.dispatch sets parent-child relationships through 3 levels", async () => { - const bus = new EventBus("MainBus"); +test('event.bus.dispatch sets parent-child relationships through 3 levels', async () => { + const bus = new EventBus('MainBus') - const execution_order: string[] = []; - let child_ref: BaseEvent | undefined; - let grandchild_ref: BaseEvent | undefined; + const execution_order: string[] = [] + let child_ref: BaseEvent | undefined + let grandchild_ref: BaseEvent | undefined bus.on(MainEvent, async (event) => { - execution_order.push("parent_start"); - assert.equal(event.bus?.name, "MainBus"); + execution_order.push('parent_start') + assert.equal(event.bus?.name, 'MainBus') - child_ref = event.bus!.emit(ChildEvent({})); - await child_ref.done(); + child_ref = event.bus!.emit(ChildEvent({})) + await child_ref.done() - execution_order.push("parent_end"); - }); + execution_order.push('parent_end') + }) bus.on(ChildEvent, async (event) => { - execution_order.push("child_start"); - assert.equal(event.bus?.name, "MainBus"); + execution_order.push('child_start') + assert.equal(event.bus?.name, 'MainBus') - grandchild_ref = event.bus!.emit(GrandchildEvent({})); - await grandchild_ref.done(); + grandchild_ref = event.bus!.emit(GrandchildEvent({})) + await grandchild_ref.done() - execution_order.push("child_end"); - }); + execution_order.push('child_end') + }) bus.on(GrandchildEvent, (event) => { - execution_order.push("grandchild_start"); - assert.equal(event.bus?.name, "MainBus"); - execution_order.push("grandchild_end"); - }); + execution_order.push('grandchild_start') + assert.equal(event.bus?.name, 'MainBus') + execution_order.push('grandchild_end') + }) - const parent_event = bus.dispatch(MainEvent({})); - await parent_event.done(); + const parent_event = bus.dispatch(MainEvent({})) + await parent_event.done() // Child events should queue-jump and complete before their parents return - assert.deepEqual(execution_order, [ - "parent_start", - "child_start", - "grandchild_start", - "grandchild_end", - "child_end", - "parent_end" - ]); + assert.deepEqual(execution_order, ['parent_start', 'child_start', 'grandchild_start', 'grandchild_end', 'child_end', 'parent_end']) // All events completed - assert.equal(parent_event.event_status, "completed"); - assert.ok(child_ref); - assert.equal(child_ref!.event_status, "completed"); - assert.ok(grandchild_ref); - assert.equal(grandchild_ref!.event_status, "completed"); + assert.equal(parent_event.event_status, 'completed') + assert.ok(child_ref) + assert.equal(child_ref!.event_status, 'completed') + assert.ok(grandchild_ref) + assert.equal(grandchild_ref!.event_status, 'completed') // Parent-child relationships are set correctly - assert.equal(child_ref!.event_parent_id, parent_event.event_id); - assert.equal(grandchild_ref!.event_parent_id, child_ref!.event_id); -}); + assert.equal(child_ref!.event_parent_id, parent_event.event_id) + assert.equal(grandchild_ref!.event_parent_id, child_ref!.event_id) +}) -test("event.bus with forwarding: child dispatched via event.bus goes to the correct bus", async () => { - const bus1 = new EventBus("Bus1"); - const bus2 = new EventBus("Bus2"); +test('event.bus with forwarding: child dispatched via event.bus goes to the correct bus', async () => { + const bus1 = new EventBus('Bus1') + const bus2 = new EventBus('Bus2') // Forward all events from bus1 to bus2 - bus1.on("*", bus2.dispatch); + bus1.on('*', bus2.dispatch) - let child_handler_bus_name: string | undefined; + let child_handler_bus_name: string | undefined // Handlers only on bus2 bus2.on(MainEvent, async (event) => { // Handler runs on bus2 (forwarded from bus1) - assert.equal(event.bus?.name, "Bus2"); + assert.equal(event.bus?.name, 'Bus2') // Child dispatched via event.bus should go to bus2 - const child = event.bus!.emit(ChildEvent({})); - await child.done(); - }); + const child = event.bus!.emit(ChildEvent({})) + await child.done() + }) bus2.on(ChildEvent, (event) => { - child_handler_bus_name = event.bus?.name; - }); + child_handler_bus_name = event.bus?.name + }) - const parent_event = bus1.dispatch(MainEvent({})); - await bus1.waitUntilIdle(); - await bus2.waitUntilIdle(); + bus1.dispatch(MainEvent({})) + await bus1.waitUntilIdle() + await bus2.waitUntilIdle() // Child handler should have seen bus2 - assert.equal(child_handler_bus_name, "Bus2"); -}); + assert.equal(child_handler_bus_name, 'Bus2') +}) -test("event.bus is set on the event after dispatch (outside handler)", async () => { - const bus = new EventBus("TestBus"); +test('event.bus is set on the event after dispatch (outside handler)', async () => { + const bus = new EventBus('TestBus') // Before dispatch, bus is not set - const raw_event = MainEvent({}); - assert.equal(raw_event.bus, undefined); + const raw_event = MainEvent({}) + assert.equal(raw_event.bus, undefined) // After dispatch, bus is set on the original event - const dispatched = bus.dispatch(raw_event); - assert.ok(dispatched.bus, "event.bus should be set after dispatch"); + const dispatched = bus.dispatch(raw_event) + assert.ok(dispatched.bus, 'event.bus should be set after dispatch') - await bus.waitUntilIdle(); -}); + await bus.waitUntilIdle() +}) -test("event.bus.dispatch from handler correctly attributes event_emitted_by_handler_id", async () => { - const bus = new EventBus("TestBus"); - - let child_emitted_by_handler_id: string | undefined; +test('event.bus.dispatch from handler correctly attributes event_emitted_by_handler_id', async () => { + const bus = new EventBus('TestBus') bus.on(MainEvent, (event) => { - event.bus?.emit(ChildEvent({})); - }); + event.bus?.emit(ChildEvent({})) + }) - bus.on(ChildEvent, () => {}); + bus.on(ChildEvent, () => {}) - const parent = bus.dispatch(MainEvent({})); - await bus.waitUntilIdle(); + const parent = bus.dispatch(MainEvent({})) + await bus.waitUntilIdle() // Find the child event in history - const child = Array.from(bus.event_history.values()).find((e) => e.event_type === "ChildEvent"); - assert.ok(child, "child event should be in history"); - assert.equal(child!.event_parent_id, parent.event_id); + const child = Array.from(bus.event_history.values()).find((e) => e.event_type === 'ChildEvent') + assert.ok(child, 'child event should be in history') + assert.equal(child!.event_parent_id, parent.event_id) // The child should have event_emitted_by_handler_id set to the handler that emitted it - assert.ok( - child!.event_emitted_by_handler_id, - "event_emitted_by_handler_id should be set on child events dispatched via event.bus" - ); + assert.ok(child!.event_emitted_by_handler_id, 'event_emitted_by_handler_id should be set on child events dispatched via event.bus') // The handler id should correspond to a handler result on the parent event - const parent_from_history = Array.from(bus.event_history.values()).find((e) => e.event_type === "MainEvent"); - assert.ok(parent_from_history); - const handler_result = parent_from_history!.event_results.get(child!.event_emitted_by_handler_id!); - assert.ok(handler_result, "handler_id on child should match a handler result on the parent"); -}); + const parent_from_history = Array.from(bus.event_history.values()).find((e) => e.event_type === 'MainEvent') + assert.ok(parent_from_history) + const handler_result = parent_from_history!.event_results.get(child!.event_emitted_by_handler_id!) + assert.ok(handler_result, 'handler_id on child should match a handler result on the parent') +}) diff --git a/bubus-ts/tests/event_results.test.ts b/bubus-ts/tests/event_results.test.ts index 977d687..14c67c1 100644 --- a/bubus-ts/tests/event_results.test.ts +++ b/bubus-ts/tests/event_results.test.ts @@ -1,70 +1,70 @@ -import assert from "node:assert/strict"; -import { test } from "node:test"; +import assert from 'node:assert/strict' +import { test } from 'node:test' -import { z } from "zod"; +import { z } from 'zod' -import { BaseEvent, EventBus } from "../src/index.js"; +import { BaseEvent, EventBus } from '../src/index.js' -const StringResultEvent = BaseEvent.extend("StringResultEvent", { +const StringResultEvent = BaseEvent.extend('StringResultEvent', { event_result_schema: z.string(), - event_result_type: "string" -}); + event_result_type: 'string', +}) -const ObjectResultEvent = BaseEvent.extend("ObjectResultEvent", { - event_result_schema: z.object({ value: z.string(), count: z.number() }) -}); +const ObjectResultEvent = BaseEvent.extend('ObjectResultEvent', { + event_result_schema: z.object({ value: z.string(), count: z.number() }), +}) -const NoResultSchemaEvent = BaseEvent.extend("NoResultSchemaEvent", {}); +const NoResultSchemaEvent = BaseEvent.extend('NoResultSchemaEvent', {}) -test("event results capture handler return values", async () => { - const bus = new EventBus("ResultCaptureBus"); +test('event results capture handler return values', async () => { + const bus = new EventBus('ResultCaptureBus') - bus.on(StringResultEvent, () => "ok"); + bus.on(StringResultEvent, () => 'ok') - const event = bus.dispatch(StringResultEvent({})); - await event.done(); + const event = bus.dispatch(StringResultEvent({})) + await event.done() - assert.equal(event.event_results.size, 1); - const result = Array.from(event.event_results.values())[0]; - assert.equal(result.status, "completed"); - assert.equal(result.result, "ok"); -}); + assert.equal(event.event_results.size, 1) + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.equal(result.result, 'ok') +}) -test("event_result_schema validates handler results", async () => { - const bus = new EventBus("ResultSchemaBus"); +test('event_result_schema validates handler results', async () => { + const bus = new EventBus('ResultSchemaBus') - bus.on(ObjectResultEvent, () => ({ value: "hello", count: 2 })); + bus.on(ObjectResultEvent, () => ({ value: 'hello', count: 2 })) - const event = bus.dispatch(ObjectResultEvent({})); - await event.done(); + const event = bus.dispatch(ObjectResultEvent({})) + await event.done() - const result = Array.from(event.event_results.values())[0]; - assert.equal(result.status, "completed"); - assert.deepEqual(result.result, { value: "hello", count: 2 }); -}); + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.deepEqual(result.result, { value: 'hello', count: 2 }) +}) -test("invalid result marks handler error", async () => { - const bus = new EventBus("ResultSchemaErrorBus"); +test('invalid result marks handler error', async () => { + const bus = new EventBus('ResultSchemaErrorBus') - bus.on(ObjectResultEvent, () => ({ value: "bad", count: "nope" } as unknown)); + bus.on(ObjectResultEvent, () => ({ value: 'bad', count: 'nope' }) as unknown) - const event = bus.dispatch(ObjectResultEvent({})); - await event.done(); + const event = bus.dispatch(ObjectResultEvent({})) + await event.done() - const result = Array.from(event.event_results.values())[0]; - assert.equal(result.status, "error"); - assert.ok(result.error instanceof Error); -}); + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'error') + assert.ok(result.error instanceof Error) +}) -test("event with no result schema stores raw values", async () => { - const bus = new EventBus("NoSchemaBus"); +test('event with no result schema stores raw values', async () => { + const bus = new EventBus('NoSchemaBus') - bus.on(NoResultSchemaEvent, () => ({ raw: true })); + bus.on(NoResultSchemaEvent, () => ({ raw: true })) - const event = bus.dispatch(NoResultSchemaEvent({})); - await event.done(); + const event = bus.dispatch(NoResultSchemaEvent({})) + await event.done() - const result = Array.from(event.event_results.values())[0]; - assert.equal(result.status, "completed"); - assert.deepEqual(result.result, { raw: true }); -}); + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.deepEqual(result.result, { raw: true }) +}) diff --git a/bubus-ts/tests/eventbus_basics.test.ts b/bubus-ts/tests/eventbus_basics.test.ts index dd6753f..060d7a5 100644 --- a/bubus-ts/tests/eventbus_basics.test.ts +++ b/bubus-ts/tests/eventbus_basics.test.ts @@ -1,423 +1,432 @@ -import assert from "node:assert/strict"; -import { test } from "node:test"; +import assert from 'node:assert/strict' +import { test } from 'node:test' -import { BaseEvent, EventBus } from "../src/index.js"; -import { z } from "zod"; +import { BaseEvent, EventBus } from '../src/index.js' +import { z } from 'zod' const delay = (ms: number): Promise => new Promise((resolve) => { - setTimeout(resolve, ms); - }); + setTimeout(resolve, ms) + }) // ─── Constructor defaults ──────────────────────────────────────────────────── -test("EventBus initializes with correct defaults", () => { - const bus = new EventBus("DefaultsBus"); - - assert.equal(bus.name, "DefaultsBus"); - assert.equal(bus.max_history_size, 100); - assert.equal(bus.event_concurrency_default, "bus-serial"); - assert.equal(bus.handler_concurrency_default, "bus-serial"); - assert.equal(bus.event_timeout_default, 60); - assert.equal(bus.event_history.size, 0); - assert.equal(bus.pending_event_queue.length, 0); - assert.equal(bus.in_flight_event_ids.size, 0); - assert.ok(EventBus.instances.has(bus)); -}); - -test("EventBus applies custom options", () => { - const bus = new EventBus("CustomBus", { +test('EventBus initializes with correct defaults', () => { + const bus = new EventBus('DefaultsBus') + + assert.equal(bus.name, 'DefaultsBus') + assert.equal(bus.max_history_size, 100) + assert.equal(bus.event_concurrency_default, 'bus-serial') + assert.equal(bus.handler_concurrency_default, 'bus-serial') + assert.equal(bus.event_timeout_default, 60) + assert.equal(bus.event_history.size, 0) + assert.equal(bus.pending_event_queue.length, 0) + assert.equal(bus.in_flight_event_ids.size, 0) + assert.ok(EventBus.instances.has(bus)) +}) + +test('EventBus applies custom options', () => { + const bus = new EventBus('CustomBus', { max_history_size: 500, - event_concurrency: "parallel", - handler_concurrency: "global-serial", - event_timeout: 30 - }); - - assert.equal(bus.max_history_size, 500); - assert.equal(bus.event_concurrency_default, "parallel"); - assert.equal(bus.handler_concurrency_default, "global-serial"); - assert.equal(bus.event_timeout_default, 30); -}); - -test("EventBus with null max_history_size means unlimited", () => { - const bus = new EventBus("UnlimitedBus", { max_history_size: null }); - assert.equal(bus.max_history_size, null); -}); - -test("EventBus with null event_timeout disables timeouts", () => { - const bus = new EventBus("NoTimeoutBus", { event_timeout: null }); - assert.equal(bus.event_timeout_default, null); -}); - -test("EventBus auto-generates name when not provided", () => { - const bus = new EventBus(); - assert.equal(bus.name, "EventBus"); -}); + event_concurrency: 'parallel', + handler_concurrency: 'global-serial', + event_timeout: 30, + }) + + assert.equal(bus.max_history_size, 500) + assert.equal(bus.event_concurrency_default, 'parallel') + assert.equal(bus.handler_concurrency_default, 'global-serial') + assert.equal(bus.event_timeout_default, 30) +}) + +test('EventBus with null max_history_size means unlimited', () => { + const bus = new EventBus('UnlimitedBus', { max_history_size: null }) + assert.equal(bus.max_history_size, null) +}) + +test('EventBus with null event_timeout disables timeouts', () => { + const bus = new EventBus('NoTimeoutBus', { event_timeout: null }) + assert.equal(bus.event_timeout_default, null) +}) + +test('EventBus auto-generates name when not provided', () => { + const bus = new EventBus() + assert.equal(bus.name, 'EventBus') +}) // ─── Event dispatch and status lifecycle ───────────────────────────────────── -test("dispatch returns pending event with correct initial state", async () => { - const bus = new EventBus("LifecycleBus", { max_history_size: 100 }); - const TestEvent = BaseEvent.extend("TestEvent", { data: z.string() }); +test('dispatch returns pending event with correct initial state', async () => { + const bus = new EventBus('LifecycleBus', { max_history_size: 100 }) + const TestEvent = BaseEvent.extend('TestEvent', { data: z.string() }) - const event = bus.dispatch(TestEvent({ data: "hello" })); + const event = bus.dispatch(TestEvent({ data: 'hello' })) // Immediate state after dispatch (before any microtask runs) - assert.equal(event.event_type, "TestEvent"); - assert.ok(event.event_id); - assert.ok(event.event_created_at); - assert.equal((event as any).data, "hello"); + assert.equal(event.event_type, 'TestEvent') + assert.ok(event.event_id) + assert.ok(event.event_created_at) + assert.equal((event as any).data, 'hello') // event_path should include the bus name - const original = event._original_event ?? event; - assert.ok(original.event_path.includes("LifecycleBus")); + const original = event._original_event ?? event + assert.ok(original.event_path.includes('LifecycleBus')) - await bus.waitUntilIdle(); -}); + await bus.waitUntilIdle() +}) -test("event transitions through pending -> started -> completed", async () => { - const bus = new EventBus("StatusBus", { max_history_size: 100 }); - const TestEvent = BaseEvent.extend("TestEvent", {}); - let status_during_handler: string | undefined; +test('event transitions through pending -> started -> completed', async () => { + const bus = new EventBus('StatusBus', { max_history_size: 100 }) + const TestEvent = BaseEvent.extend('TestEvent', {}) + let status_during_handler: string | undefined bus.on(TestEvent, (event: BaseEvent) => { - status_during_handler = event.event_status; - return "done"; - }); + status_during_handler = event.event_status + return 'done' + }) - const event = bus.dispatch(TestEvent({})); - const original = event._original_event ?? event; + const event = bus.dispatch(TestEvent({})) + const original = event._original_event ?? event - await event.done(); + await event.done() - assert.equal(status_during_handler, "started"); - assert.equal(original.event_status, "completed"); - assert.ok(original.event_started_at, "event_started_at should be set"); - assert.ok(original.event_completed_at, "event_completed_at should be set"); -}); + assert.equal(status_during_handler, 'started') + assert.equal(original.event_status, 'completed') + assert.ok(original.event_started_at, 'event_started_at should be set') + assert.ok(original.event_completed_at, 'event_completed_at should be set') +}) -test("event with no handlers completes immediately", async () => { - const bus = new EventBus("NoHandlerBus", { max_history_size: 100 }); - const OrphanEvent = BaseEvent.extend("OrphanEvent", {}); +test('event with no handlers completes immediately', async () => { + const bus = new EventBus('NoHandlerBus', { max_history_size: 100 }) + const OrphanEvent = BaseEvent.extend('OrphanEvent', {}) - const event = bus.dispatch(OrphanEvent({})); - await event.done(); + const event = bus.dispatch(OrphanEvent({})) + await event.done() - const original = event._original_event ?? event; - assert.equal(original.event_status, "completed"); - assert.equal(original.event_results.size, 0); -}); + const original = event._original_event ?? event + assert.equal(original.event_status, 'completed') + assert.equal(original.event_results.size, 0) +}) // ─── Event history tracking ────────────────────────────────────────────────── -test("dispatched events appear in event_history", async () => { - const bus = new EventBus("HistoryBus", { max_history_size: 100 }); - const EventA = BaseEvent.extend("EventA", {}); - const EventB = BaseEvent.extend("EventB", {}); +test('dispatched events appear in event_history', async () => { + const bus = new EventBus('HistoryBus', { max_history_size: 100 }) + const EventA = BaseEvent.extend('EventA', {}) + const EventB = BaseEvent.extend('EventB', {}) - bus.dispatch(EventA({})); - bus.dispatch(EventB({})); - await bus.waitUntilIdle(); + bus.dispatch(EventA({})) + bus.dispatch(EventB({})) + await bus.waitUntilIdle() - assert.equal(bus.event_history.size, 2); - const history = Array.from(bus.event_history.values()); - assert.equal(history[0].event_type, "EventA"); - assert.equal(history[1].event_type, "EventB"); + assert.equal(bus.event_history.size, 2) + const history = Array.from(bus.event_history.values()) + assert.equal(history[0].event_type, 'EventA') + assert.equal(history[1].event_type, 'EventB') // All events are accessible by id for (const event of bus.event_history.values()) { - assert.ok(bus.event_history.has(event.event_id)); + assert.ok(bus.event_history.has(event.event_id)) } -}); +}) // ─── History trimming (max_history_size) ───────────────────────────────────── -test("history is trimmed to max_history_size, completed events removed first", async () => { - const bus = new EventBus("TrimBus", { max_history_size: 5 }); - const TrimEvent = BaseEvent.extend("TrimEvent", { seq: z.number() }); +test('history is trimmed to max_history_size, completed events removed first', async () => { + const bus = new EventBus('TrimBus', { max_history_size: 5 }) + const TrimEvent = BaseEvent.extend('TrimEvent', { seq: z.number() }) - bus.on(TrimEvent, () => "ok"); + bus.on(TrimEvent, () => 'ok') // Dispatch 10 events; they'll process and complete in FIFO order for (let i = 0; i < 10; i++) { - bus.dispatch(TrimEvent({ seq: i })); + bus.dispatch(TrimEvent({ seq: i })) } - await bus.waitUntilIdle(); + await bus.waitUntilIdle() // History should be trimmed to at most max_history_size - assert.ok(bus.event_history.size <= 5, `expected <= 5, got ${bus.event_history.size}`); + assert.ok(bus.event_history.size <= 5, `expected <= 5, got ${bus.event_history.size}`) // The remaining events should be the MOST RECENT ones (oldest completed removed first) - const seqs = Array.from(bus.event_history.values()).map((e) => (e as any).seq as number); + const seqs = Array.from(bus.event_history.values()).map((e) => (e as any).seq as number) for (let i = 1; i < seqs.length; i++) { - assert.ok(seqs[i] > seqs[i - 1], "remaining history should be in order"); + assert.ok(seqs[i] > seqs[i - 1], 'remaining history should be in order') } -}); +}) -test("unlimited history (max_history_size: null) keeps all events", async () => { - const bus = new EventBus("UnlimitedHistBus", { max_history_size: null }); - const PingEvent = BaseEvent.extend("PingEvent", {}); +test('unlimited history (max_history_size: null) keeps all events', async () => { + const bus = new EventBus('UnlimitedHistBus', { max_history_size: null }) + const PingEvent = BaseEvent.extend('PingEvent', {}) - bus.on(PingEvent, () => "pong"); + bus.on(PingEvent, () => 'pong') for (let i = 0; i < 150; i++) { - bus.dispatch(PingEvent({})); + bus.dispatch(PingEvent({})) } - await bus.waitUntilIdle(); + await bus.waitUntilIdle() - assert.equal(bus.event_history.size, 150); + assert.equal(bus.event_history.size, 150) // All completed for (const event of bus.event_history.values()) { - assert.equal(event.event_status, "completed"); + assert.equal(event.event_status, 'completed') } -}); +}) // ─── Event type derivation ─────────────────────────────────────────────────── -test("event_type is derived from extend() name argument", () => { - const MyCustomEvent = BaseEvent.extend("MyCustomEvent", { val: z.number() }); - const event = MyCustomEvent({ val: 42 }); - assert.equal(event.event_type, "MyCustomEvent"); -}); +test('event_type is derived from extend() name argument', () => { + const MyCustomEvent = BaseEvent.extend('MyCustomEvent', { val: z.number() }) + const event = MyCustomEvent({ val: 42 }) + assert.equal(event.event_type, 'MyCustomEvent') +}) -test("event_type can be overridden at instantiation", () => { - const FlexEvent = BaseEvent.extend("FlexEvent", {}); - const event = FlexEvent({ event_type: "OverriddenType" }); - assert.equal(event.event_type, "OverriddenType"); -}); +test('event_type can be overridden at instantiation', () => { + const FlexEvent = BaseEvent.extend('FlexEvent', {}) + const event = FlexEvent({ event_type: 'OverriddenType' }) + assert.equal(event.event_type, 'OverriddenType') +}) -test("handler registration by string matches extend() name", async () => { - const bus = new EventBus("StringMatchBus", { max_history_size: 100 }); - const NamedEvent = BaseEvent.extend("NamedEvent", {}); - const received: string[] = []; +test('handler registration by string matches extend() name', async () => { + const bus = new EventBus('StringMatchBus', { max_history_size: 100 }) + const NamedEvent = BaseEvent.extend('NamedEvent', {}) + const received: string[] = [] - bus.on("NamedEvent", () => { - received.push("string_handler"); - }); + bus.on('NamedEvent', () => { + received.push('string_handler') + }) - bus.dispatch(NamedEvent({})); - await bus.waitUntilIdle(); + bus.dispatch(NamedEvent({})) + await bus.waitUntilIdle() - assert.equal(received.length, 1); - assert.equal(received[0], "string_handler"); -}); + assert.equal(received.length, 1) + assert.equal(received[0], 'string_handler') +}) -test("wildcard handler receives all events", async () => { - const bus = new EventBus("WildcardBus", { max_history_size: 100 }); - const EventA = BaseEvent.extend("EventA", {}); - const EventB = BaseEvent.extend("EventB", {}); - const types: string[] = []; +test('wildcard handler receives all events', async () => { + const bus = new EventBus('WildcardBus', { max_history_size: 100 }) + const EventA = BaseEvent.extend('EventA', {}) + const EventB = BaseEvent.extend('EventB', {}) + const types: string[] = [] - bus.on("*", (event: BaseEvent) => { - types.push(event.event_type); - }); + bus.on('*', (event: BaseEvent) => { + types.push(event.event_type) + }) - bus.dispatch(EventA({})); - bus.dispatch(EventB({})); - await bus.waitUntilIdle(); + bus.dispatch(EventA({})) + bus.dispatch(EventB({})) + await bus.waitUntilIdle() - assert.deepEqual(types, ["EventA", "EventB"]); -}); + assert.deepEqual(types, ['EventA', 'EventB']) +}) // ─── Error handling and isolation ──────────────────────────────────────────── -test("handler error is captured without crashing the bus", async () => { - const bus = new EventBus("ErrorBus", { max_history_size: 100 }); - const ErrorEvent = BaseEvent.extend("ErrorEvent", {}); +test('handler error is captured without crashing the bus', async () => { + const bus = new EventBus('ErrorBus', { max_history_size: 100 }) + const ErrorEvent = BaseEvent.extend('ErrorEvent', {}) bus.on(ErrorEvent, () => { - throw new Error("handler blew up"); - }); + throw new Error('handler blew up') + }) - const event = bus.dispatch(ErrorEvent({})); - await event.done(); + const event = bus.dispatch(ErrorEvent({})) + await event.done() - const original = event._original_event ?? event; - assert.equal(original.event_status, "completed"); - assert.ok(original.event_errors.length > 0, "event should record the error"); + const original = event._original_event ?? event + assert.equal(original.event_status, 'completed') + assert.ok(original.event_errors.length > 0, 'event should record the error') // The handler result should have error status - const results = Array.from(original.event_results.values()); - assert.equal(results.length, 1); - assert.equal(results[0].status, "error"); - assert.ok(results[0].error instanceof Error); - assert.equal((results[0].error as Error).message, "handler blew up"); -}); - -test("one handler error does not prevent other handlers from running", async () => { - const bus = new EventBus("IsolationBus", { + const results = Array.from(original.event_results.values()) + assert.equal(results.length, 1) + assert.equal(results[0].status, 'error') + assert.ok(results[0].error instanceof Error) + assert.equal((results[0].error as Error).message, 'handler blew up') +}) + +test('one handler error does not prevent other handlers from running', async () => { + const bus = new EventBus('IsolationBus', { max_history_size: 100, - handler_concurrency: "parallel" - }); - const MultiEvent = BaseEvent.extend("MultiEvent", {}); + handler_concurrency: 'parallel', + }) + const MultiEvent = BaseEvent.extend('MultiEvent', {}) - const results_seen: string[] = []; + const results_seen: string[] = [] bus.on(MultiEvent, () => { - results_seen.push("handler_1_ok"); - return "result_1"; - }); + results_seen.push('handler_1_ok') + return 'result_1' + }) bus.on(MultiEvent, () => { - throw new Error("handler_2_fails"); - }); + throw new Error('handler_2_fails') + }) bus.on(MultiEvent, () => { - results_seen.push("handler_3_ok"); - return "result_3"; - }); + results_seen.push('handler_3_ok') + return 'result_3' + }) - const event = bus.dispatch(MultiEvent({})); - await event.done(); + const event = bus.dispatch(MultiEvent({})) + await event.done() - const original = event._original_event ?? event; - assert.equal(original.event_status, "completed"); + const original = event._original_event ?? event + assert.equal(original.event_status, 'completed') // Both non-erroring handlers should have run - assert.ok(results_seen.includes("handler_1_ok")); - assert.ok(results_seen.includes("handler_3_ok")); + assert.ok(results_seen.includes('handler_1_ok')) + assert.ok(results_seen.includes('handler_3_ok')) // Check individual results - const all_results = Array.from(original.event_results.values()); - const completed_results = all_results.filter((r) => r.status === "completed"); - const error_results = all_results.filter((r) => r.status === "error"); - assert.equal(completed_results.length, 2); - assert.equal(error_results.length, 1); -}); + const all_results = Array.from(original.event_results.values()) + const completed_results = all_results.filter((r) => r.status === 'completed') + const error_results = all_results.filter((r) => r.status === 'error') + assert.equal(completed_results.length, 2) + assert.equal(error_results.length, 1) +}) // ─── Concurrent dispatch ───────────────────────────────────────────────────── -test("many events dispatched concurrently all complete", async () => { - const bus = new EventBus("ConcurrentBus", { max_history_size: null }); - const BatchEvent = BaseEvent.extend("BatchEvent", { idx: z.number() }); - let processed = 0; +test('many events dispatched concurrently all complete', async () => { + const bus = new EventBus('ConcurrentBus', { max_history_size: null }) + const BatchEvent = BaseEvent.extend('BatchEvent', { idx: z.number() }) + let processed = 0 bus.on(BatchEvent, () => { - processed += 1; - return "ok"; - }); + processed += 1 + return 'ok' + }) - const events: BaseEvent[] = []; + const events: BaseEvent[] = [] for (let i = 0; i < 100; i++) { - events.push(bus.dispatch(BatchEvent({ idx: i }))); + events.push(bus.dispatch(BatchEvent({ idx: i }))) } // Wait for all to complete - await Promise.all(events.map((e) => e.done())); - await bus.waitUntilIdle(); + await Promise.all(events.map((e) => e.done())) + await bus.waitUntilIdle() - assert.equal(processed, 100); - assert.equal(bus.event_history.size, 100); + assert.equal(processed, 100) + assert.equal(bus.event_history.size, 100) for (const event of bus.event_history.values()) { - assert.equal(event.event_status, "completed"); + assert.equal(event.event_status, 'completed') } -}); +}) // ─── event_timeout default application ─────────────────────────────────────── -test("dispatch applies bus event_timeout_default when event has null timeout", async () => { - const bus = new EventBus("TimeoutDefaultBus", { +test('dispatch applies bus event_timeout_default when event has null timeout', async () => { + const bus = new EventBus('TimeoutDefaultBus', { max_history_size: 100, - event_timeout: 42 - }); - const TEvent = BaseEvent.extend("TEvent", {}); + event_timeout: 42, + }) + const TEvent = BaseEvent.extend('TEvent', {}) - const event = bus.dispatch(TEvent({})); - const original = event._original_event ?? event; + const event = bus.dispatch(TEvent({})) + const original = event._original_event ?? event // The bus should have applied its default timeout - assert.equal(original.event_timeout, 42); + assert.equal(original.event_timeout, 42) - await bus.waitUntilIdle(); -}); + await bus.waitUntilIdle() +}) -test("event with explicit timeout is not overridden by bus default", async () => { - const bus = new EventBus("TimeoutOverrideBus", { +test('event with explicit timeout is not overridden by bus default', async () => { + const bus = new EventBus('TimeoutOverrideBus', { max_history_size: 100, - event_timeout: 42 - }); - const TEvent = BaseEvent.extend("TEvent", {}); + event_timeout: 42, + }) + const TEvent = BaseEvent.extend('TEvent', {}) - const event = bus.dispatch(TEvent({ event_timeout: 10 })); - const original = event._original_event ?? event; + const event = bus.dispatch(TEvent({ event_timeout: 10 })) + const original = event._original_event ?? event - assert.equal(original.event_timeout, 10); + assert.equal(original.event_timeout, 10) - await bus.waitUntilIdle(); -}); + await bus.waitUntilIdle() +}) // ─── EventBus.instances tracking ───────────────────────────────────────────── -test("EventBus.instances tracks all created buses", () => { - const initial_count = EventBus.instances.size; - const bus_a = new EventBus("TrackA"); - const bus_b = new EventBus("TrackB"); +test('EventBus.instances tracks all created buses', () => { + const initial_count = EventBus.instances.size + const bus_a = new EventBus('TrackA') + const bus_b = new EventBus('TrackB') - assert.ok(EventBus.instances.has(bus_a)); - assert.ok(EventBus.instances.has(bus_b)); - assert.equal(EventBus.instances.size, initial_count + 2); -}); + assert.ok(EventBus.instances.has(bus_a)) + assert.ok(EventBus.instances.has(bus_b)) + assert.equal(EventBus.instances.size, initial_count + 2) +}) // ─── Circular forwarding prevention ────────────────────────────────────────── -test("circular forwarding does not cause infinite loop", async () => { - const bus_a = new EventBus("CircA", { max_history_size: 100 }); - const bus_b = new EventBus("CircB", { max_history_size: 100 }); - const bus_c = new EventBus("CircC", { max_history_size: 100 }); +test('circular forwarding does not cause infinite loop', async () => { + const bus_a = new EventBus('CircA', { max_history_size: 100 }) + const bus_b = new EventBus('CircB', { max_history_size: 100 }) + const bus_c = new EventBus('CircC', { max_history_size: 100 }) // A -> B -> C -> A (circular) - bus_a.on("*", bus_b.dispatch); - bus_b.on("*", bus_c.dispatch); - bus_c.on("*", bus_a.dispatch); + bus_a.on('*', bus_b.dispatch) + bus_b.on('*', bus_c.dispatch) + bus_c.on('*', bus_a.dispatch) - const CircEvent = BaseEvent.extend("CircEvent", {}); - const handler_calls: string[] = []; + const CircEvent = BaseEvent.extend('CircEvent', {}) + const handler_calls: string[] = [] // Register real handlers on each bus - bus_a.on(CircEvent, () => { handler_calls.push("A"); return "a"; }); - bus_b.on(CircEvent, () => { handler_calls.push("B"); return "b"; }); - bus_c.on(CircEvent, () => { handler_calls.push("C"); return "c"; }); - - const event = bus_a.dispatch(CircEvent({})); - await event.done(); - await bus_a.waitUntilIdle(); - await bus_b.waitUntilIdle(); - await bus_c.waitUntilIdle(); + bus_a.on(CircEvent, () => { + handler_calls.push('A') + return 'a' + }) + bus_b.on(CircEvent, () => { + handler_calls.push('B') + return 'b' + }) + bus_c.on(CircEvent, () => { + handler_calls.push('C') + return 'c' + }) + + const event = bus_a.dispatch(CircEvent({})) + await event.done() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + await bus_c.waitUntilIdle() // Each bus should process the event exactly once (loop prevention via event_path) - assert.equal(handler_calls.filter((h) => h === "A").length, 1); - assert.equal(handler_calls.filter((h) => h === "B").length, 1); - assert.equal(handler_calls.filter((h) => h === "C").length, 1); + assert.equal(handler_calls.filter((h) => h === 'A').length, 1) + assert.equal(handler_calls.filter((h) => h === 'B').length, 1) + assert.equal(handler_calls.filter((h) => h === 'C').length, 1) // event_path should contain all three buses - const original = event._original_event ?? event; - assert.ok(original.event_path.includes("CircA")); - assert.ok(original.event_path.includes("CircB")); - assert.ok(original.event_path.includes("CircC")); -}); + const original = event._original_event ?? event + assert.ok(original.event_path.includes('CircA')) + assert.ok(original.event_path.includes('CircB')) + assert.ok(original.event_path.includes('CircC')) +}) // ─── EventBus GC / memory leak ─────────────────────────────────────────────── -test("unreferenced EventBus can be garbage collected (not retained by instances)", async () => { +test('unreferenced EventBus can be garbage collected (not retained by instances)', async () => { // This test requires --expose-gc to force garbage collection - const gc = globalThis.gc as (() => void) | undefined; - if (typeof gc !== "function") { + const gc = globalThis.gc as (() => void) | undefined + if (typeof gc !== 'function') { // Can't test GC without --expose-gc; skip gracefully - return; + return } - let weak_ref: WeakRef; + let weak_ref: WeakRef // Create a bus inside an IIFE so the only reference is the WeakRef - (() => { - const bus = new EventBus("GCTestBus"); - weak_ref = new WeakRef(bus); - })(); + ;(() => { + const bus = new EventBus('GCTestBus') + weak_ref = new WeakRef(bus) + })() // Force garbage collection - gc(); - await delay(50); - gc(); + gc() + await delay(50) + gc() // If EventBus.instances holds a strong reference (Set), // the bus will NOT be collected β€” proving the memory leak. @@ -425,89 +434,95 @@ test("unreferenced EventBus can be garbage collected (not retained by instances) assert.equal( weak_ref!.deref(), undefined, - "bus should be garbage collected when no external references remain β€” " + - "EventBus.instances is holding a strong reference (memory leak)" - ); -}); + 'bus should be garbage collected when no external references remain β€” ' + + 'EventBus.instances is holding a strong reference (memory leak)' + ) +}) // ─── off() handler deregistration ──────────────────────────────────────────── -test("off() removes a handler so it no longer fires", async () => { - const bus = new EventBus("OffBus", { max_history_size: 100 }); - const OffEvent = BaseEvent.extend("OffEvent", {}); - let call_count = 0; +test('off() removes a handler so it no longer fires', async () => { + const bus = new EventBus('OffBus', { max_history_size: 100 }) + const OffEvent = BaseEvent.extend('OffEvent', {}) + let call_count = 0 const handler = () => { - call_count += 1; - }; + call_count += 1 + } - bus.on(OffEvent, handler); - bus.dispatch(OffEvent({})); - await bus.waitUntilIdle(); - assert.equal(call_count, 1); + bus.on(OffEvent, handler) + bus.dispatch(OffEvent({})) + await bus.waitUntilIdle() + assert.equal(call_count, 1) - bus.off(OffEvent, handler); - bus.dispatch(OffEvent({})); - await bus.waitUntilIdle(); - assert.equal(call_count, 1, "handler should not fire after off()"); -}); + bus.off(OffEvent, handler) + bus.dispatch(OffEvent({})) + await bus.waitUntilIdle() + assert.equal(call_count, 1, 'handler should not fire after off()') +}) -test("off() removes a handler by handler_id string", async () => { - const bus = new EventBus("OffByIdBus", { max_history_size: 100 }); - const OffIdEvent = BaseEvent.extend("OffIdEvent", {}); - let call_count = 0; +test('off() removes a handler by handler_id string', async () => { + const bus = new EventBus('OffByIdBus', { max_history_size: 100 }) + const OffIdEvent = BaseEvent.extend('OffIdEvent', {}) + let call_count = 0 bus.on(OffIdEvent, function my_handler() { - call_count += 1; - }); + call_count += 1 + }) // Dispatch once so we can find the handler_id from the event results - const event1 = bus.dispatch(OffIdEvent({})); - await bus.waitUntilIdle(); - assert.equal(call_count, 1); + const event1 = bus.dispatch(OffIdEvent({})) + await bus.waitUntilIdle() + assert.equal(call_count, 1) // Get the handler_id from the event's results - const results = Array.from(event1.event_results.values()); - assert.equal(results.length, 1, "should have exactly one handler result"); - const handler_id = results[0].handler_id; - assert.ok(handler_id, "handler_id should exist"); + const results = Array.from(event1.event_results.values()) + assert.equal(results.length, 1, 'should have exactly one handler result') + const handler_id = results[0].handler_id + assert.ok(handler_id, 'handler_id should exist') // Remove by handler_id string - bus.off(OffIdEvent, handler_id); + bus.off(OffIdEvent, handler_id) // Dispatch again β€” handler should NOT fire - bus.dispatch(OffIdEvent({})); - await bus.waitUntilIdle(); - assert.equal(call_count, 1, "handler should not fire after off() by handler_id"); -}); - -test("off() with no handler removes all handlers for that event", async () => { - const bus = new EventBus("OffAllBus", { max_history_size: 100 }); - const OffAllEvent = BaseEvent.extend("OffAllEvent", {}); - const OtherEvent = BaseEvent.extend("OffAllOther", {}); - let call_count_a = 0; - let call_count_b = 0; - let other_count = 0; - - bus.on(OffAllEvent, () => { call_count_a += 1; }); - bus.on(OffAllEvent, () => { call_count_b += 1; }); - bus.on(OtherEvent, () => { other_count += 1; }); - - bus.dispatch(OffAllEvent({})); - await bus.waitUntilIdle(); - assert.equal(call_count_a, 1); - assert.equal(call_count_b, 1); + bus.dispatch(OffIdEvent({})) + await bus.waitUntilIdle() + assert.equal(call_count, 1, 'handler should not fire after off() by handler_id') +}) + +test('off() with no handler removes all handlers for that event', async () => { + const bus = new EventBus('OffAllBus', { max_history_size: 100 }) + const OffAllEvent = BaseEvent.extend('OffAllEvent', {}) + const OtherEvent = BaseEvent.extend('OffAllOther', {}) + let call_count_a = 0 + let call_count_b = 0 + let other_count = 0 + + bus.on(OffAllEvent, () => { + call_count_a += 1 + }) + bus.on(OffAllEvent, () => { + call_count_b += 1 + }) + bus.on(OtherEvent, () => { + other_count += 1 + }) + + bus.dispatch(OffAllEvent({})) + await bus.waitUntilIdle() + assert.equal(call_count_a, 1) + assert.equal(call_count_b, 1) // Remove ALL handlers for OffAllEvent - bus.off(OffAllEvent); + bus.off(OffAllEvent) - bus.dispatch(OffAllEvent({})); - bus.dispatch(OtherEvent({})); - await bus.waitUntilIdle(); + bus.dispatch(OffAllEvent({})) + bus.dispatch(OtherEvent({})) + await bus.waitUntilIdle() // Neither OffAllEvent handler should fire - assert.equal(call_count_a, 1, "handler A should not fire after off(event)"); - assert.equal(call_count_b, 1, "handler B should not fire after off(event)"); + assert.equal(call_count_a, 1, 'handler A should not fire after off(event)') + assert.equal(call_count_b, 1, 'handler B should not fire after off(event)') // OtherEvent handler should still work - assert.equal(other_count, 1, "unrelated handler should still fire"); -}); + assert.equal(other_count, 1, 'unrelated handler should still fire') +}) diff --git a/bubus-ts/tests/fifo.test.ts b/bubus-ts/tests/fifo.test.ts index 5efede7..80042fd 100644 --- a/bubus-ts/tests/fifo.test.ts +++ b/bubus-ts/tests/fifo.test.ts @@ -1,41 +1,44 @@ -import assert from "node:assert/strict"; -import { test } from "node:test"; +import assert from 'node:assert/strict' +import { test } from 'node:test' -import { z } from "zod"; +import { z } from 'zod' -import { BaseEvent, EventBus } from "../src/index.js"; +import { BaseEvent, EventBus } from '../src/index.js' -const OrderEvent = BaseEvent.extend("OrderEvent", { order: z.number() }); +const OrderEvent = BaseEvent.extend('OrderEvent', { order: z.number() }) const delay = (ms: number): Promise => new Promise((resolve) => { - setTimeout(resolve, ms); - }); + setTimeout(resolve, ms) + }) -test("events are processed in FIFO order", async () => { - const bus = new EventBus("FifoBus"); +test('events are processed in FIFO order', async () => { + const bus = new EventBus('FifoBus') - const processed_orders: number[] = []; - const handler_start_times: number[] = []; + const processed_orders: number[] = [] + const handler_start_times: number[] = [] bus.on(OrderEvent, async (event) => { - handler_start_times.push(Date.now()); + handler_start_times.push(Date.now()) if (event.order % 2 === 0) { - await delay(30); + await delay(30) } else { - await delay(5); + await delay(5) } - processed_orders.push(event.order); - }); + processed_orders.push(event.order) + }) for (let i = 0; i < 10; i += 1) { - bus.dispatch(OrderEvent({ order: i })); + bus.dispatch(OrderEvent({ order: i })) } - await bus.waitUntilIdle(); + await bus.waitUntilIdle() - assert.deepEqual(processed_orders, Array.from({ length: 10 }, (_, i) => i)); + assert.deepEqual( + processed_orders, + Array.from({ length: 10 }, (_, i) => i) + ) for (let i = 1; i < handler_start_times.length; i += 1) { - assert.ok(handler_start_times[i] >= handler_start_times[i - 1]); + assert.ok(handler_start_times[i] >= handler_start_times[i - 1]) } -}); +}) diff --git a/bubus-ts/tests/find.test.ts b/bubus-ts/tests/find.test.ts index b56107d..ea160a5 100644 --- a/bubus-ts/tests/find.test.ts +++ b/bubus-ts/tests/find.test.ts @@ -1,583 +1,551 @@ -import assert from "node:assert/strict"; -import { test } from "node:test"; - -import { z } from "zod"; - -import { BaseEvent, EventBus } from "../src/index.js"; - -const ParentEvent = BaseEvent.extend("ParentEvent", {}); -const ChildEvent = BaseEvent.extend("ChildEvent", {}); -const GrandchildEvent = BaseEvent.extend("GrandchildEvent", {}); -const UnrelatedEvent = BaseEvent.extend("UnrelatedEvent", {}); -const ScreenshotEvent = BaseEvent.extend("ScreenshotEvent", { target_id: z.string() }); -const NavigateEvent = BaseEvent.extend("NavigateEvent", { url: z.string() }); -const TabCreatedEvent = BaseEvent.extend("TabCreatedEvent", { tab_id: z.string() }); -const SystemEvent = BaseEvent.extend("SystemEvent", {}); -const UserActionEvent = BaseEvent.extend("UserActionEvent", { +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { z } from 'zod' + +import { BaseEvent, EventBus } from '../src/index.js' + +const ParentEvent = BaseEvent.extend('ParentEvent', {}) +const ChildEvent = BaseEvent.extend('ChildEvent', {}) +const GrandchildEvent = BaseEvent.extend('GrandchildEvent', {}) +const UnrelatedEvent = BaseEvent.extend('UnrelatedEvent', {}) +const ScreenshotEvent = BaseEvent.extend('ScreenshotEvent', { target_id: z.string() }) +const NavigateEvent = BaseEvent.extend('NavigateEvent', { url: z.string() }) +const TabCreatedEvent = BaseEvent.extend('TabCreatedEvent', { tab_id: z.string() }) +const SystemEvent = BaseEvent.extend('SystemEvent', {}) +const UserActionEvent = BaseEvent.extend('UserActionEvent', { action: z.string(), - user_id: z.string() -}); + user_id: z.string(), +}) const delay = (ms: number): Promise => new Promise((resolve) => { - setTimeout(resolve, ms); - }); + setTimeout(resolve, ms) + }) -test("find past returns most recent completed event", async () => { - const bus = new EventBus("FindPastBus"); +test('find past returns most recent completed event', async () => { + const bus = new EventBus('FindPastBus') - const first_event = bus.dispatch(ParentEvent({})); - await first_event.done(); - await delay(20); - const second_event = bus.dispatch(ParentEvent({})); - await second_event.done(); + const first_event = bus.dispatch(ParentEvent({})) + await first_event.done() + await delay(20) + const second_event = bus.dispatch(ParentEvent({})) + await second_event.done() - const found_event = await bus.find(ParentEvent, { past: true, future: false }); - assert.ok(found_event); - assert.equal(found_event.event_id, second_event.event_id); -}); + const found_event = await bus.find(ParentEvent, { past: true, future: false }) + assert.ok(found_event) + assert.equal(found_event.event_id, second_event.event_id) +}) -test("find past returns null when no matching event exists", async () => { - const bus = new EventBus("FindPastNoneBus"); +test('find past returns null when no matching event exists', async () => { + const bus = new EventBus('FindPastNoneBus') - const start = Date.now(); - const found_event = await bus.find(ParentEvent, { past: true, future: false }); - const elapsed_ms = Date.now() - start; + const start = Date.now() + const found_event = await bus.find(ParentEvent, { past: true, future: false }) + const elapsed_ms = Date.now() - start - assert.equal(found_event, null); - assert.ok(elapsed_ms < 100); -}); + assert.equal(found_event, null) + assert.ok(elapsed_ms < 100) +}) -test("find past window filters by time", async () => { - const bus = new EventBus("FindWindowBus"); +test('find past window filters by time', async () => { + const bus = new EventBus('FindWindowBus') - const old_event = bus.dispatch(ParentEvent({})); - await old_event.done(); - await delay(120); - const new_event = bus.dispatch(ParentEvent({})); - await new_event.done(); + const old_event = bus.dispatch(ParentEvent({})) + await old_event.done() + await delay(120) + const new_event = bus.dispatch(ParentEvent({})) + await new_event.done() - const found_event = await bus.find(ParentEvent, { past: 0.1, future: false }); - assert.ok(found_event); - assert.equal(found_event.event_id, new_event.event_id); -}); + const found_event = await bus.find(ParentEvent, { past: 0.1, future: false }) + assert.ok(found_event) + assert.equal(found_event.event_id, new_event.event_id) +}) -test("find past returns null when all events are too old", async () => { - const bus = new EventBus("FindTooOldBus"); +test('find past returns null when all events are too old', async () => { + const bus = new EventBus('FindTooOldBus') - const old_event = bus.dispatch(ParentEvent({})); - await old_event.done(); - await delay(120); + const old_event = bus.dispatch(ParentEvent({})) + await old_event.done() + await delay(120) - const found_event = await bus.find(ParentEvent, { past: 0.05, future: false }); - assert.equal(found_event, null); -}); + const found_event = await bus.find(ParentEvent, { past: 0.05, future: false }) + assert.equal(found_event, null) +}) -test("find future waits for event", async () => { - const bus = new EventBus("FindFutureBus"); +test('find future waits for event', async () => { + const bus = new EventBus('FindFutureBus') - const find_promise = bus.find(ParentEvent, { past: false, future: 0.5 }); + const find_promise = bus.find(ParentEvent, { past: false, future: 0.5 }) setTimeout(() => { - bus.dispatch(ParentEvent({})); - }, 50); + bus.dispatch(ParentEvent({})) + }, 50) - const found_event = await find_promise; - assert.ok(found_event); - assert.equal(found_event.event_type, "ParentEvent"); -}); + const found_event = await find_promise + assert.ok(found_event) + assert.equal(found_event.event_type, 'ParentEvent') +}) -test("find future works with string event keys", async () => { - const bus = new EventBus("FindFutureStringBus"); +test('find future works with string event keys', async () => { + const bus = new EventBus('FindFutureStringBus') - const find_promise = bus.find("ParentEvent", { past: false, future: 0.5 }); + const find_promise = bus.find('ParentEvent', { past: false, future: 0.5 }) setTimeout(() => { - bus.dispatch(ParentEvent({})); - }, 30); + bus.dispatch(ParentEvent({})) + }, 30) - const found_event = await find_promise; - assert.ok(found_event); - assert.equal(found_event.event_type, "ParentEvent"); -}); + const found_event = await find_promise + assert.ok(found_event) + assert.equal(found_event.event_type, 'ParentEvent') +}) -test("find future ignores past events", async () => { - const bus = new EventBus("FindFutureIgnoresPastBus"); +test('find future ignores past events', async () => { + const bus = new EventBus('FindFutureIgnoresPastBus') - const prior = bus.dispatch(ParentEvent({})); - await prior.done(); + const prior = bus.dispatch(ParentEvent({})) + await prior.done() - const found_event = await bus.find(ParentEvent, { past: false, future: 0.05 }); - assert.equal(found_event, null); -}); + const found_event = await bus.find(ParentEvent, { past: false, future: 0.05 }) + assert.equal(found_event, null) +}) -test("find future times out when no event arrives", async () => { - const bus = new EventBus("FindFutureTimeoutBus"); +test('find future times out when no event arrives', async () => { + const bus = new EventBus('FindFutureTimeoutBus') - const found_event = await bus.find(ParentEvent, { past: false, future: 0.05 }); - assert.equal(found_event, null); -}); + const found_event = await bus.find(ParentEvent, { past: false, future: 0.05 }) + assert.equal(found_event, null) +}) -test("find past=false future=false returns null immediately", async () => { - const bus = new EventBus("FindNeitherBus"); +test('find past=false future=false returns null immediately', async () => { + const bus = new EventBus('FindNeitherBus') - const start = Date.now(); - const found_event = await bus.find(ParentEvent, { past: false, future: false }); - const elapsed_ms = Date.now() - start; + const start = Date.now() + const found_event = await bus.find(ParentEvent, { past: false, future: false }) + const elapsed_ms = Date.now() - start - assert.equal(found_event, null); - assert.ok(elapsed_ms < 100); -}); + assert.equal(found_event, null) + assert.ok(elapsed_ms < 100) +}) -test("find past+future returns past event immediately", async () => { - const bus = new EventBus("FindPastFutureBus"); +test('find past+future returns past event immediately', async () => { + const bus = new EventBus('FindPastFutureBus') - const dispatched = bus.dispatch(ParentEvent({})); - await dispatched.done(); + const dispatched = bus.dispatch(ParentEvent({})) + await dispatched.done() - const start = Date.now(); - const found_event = await bus.find(ParentEvent, { past: true, future: 0.5 }); - const elapsed_ms = Date.now() - start; + const start = Date.now() + const found_event = await bus.find(ParentEvent, { past: true, future: 0.5 }) + const elapsed_ms = Date.now() - start - assert.ok(found_event); - assert.equal(found_event.event_id, dispatched.event_id); - assert.ok(elapsed_ms < 100); -}); + assert.ok(found_event) + assert.equal(found_event.event_id, dispatched.event_id) + assert.ok(elapsed_ms < 100) +}) -test("find past+future waits for future when no past match", async () => { - const bus = new EventBus("FindPastFutureWaitBus"); +test('find past+future waits for future when no past match', async () => { + const bus = new EventBus('FindPastFutureWaitBus') - const find_promise = bus.find(ChildEvent, { past: true, future: 0.3 }); + const find_promise = bus.find(ChildEvent, { past: true, future: 0.3 }) setTimeout(() => { - bus.dispatch(ChildEvent({})); - }, 50); + bus.dispatch(ChildEvent({})) + }, 50) - const found_event = await find_promise; - assert.ok(found_event); - assert.equal(found_event.event_type, "ChildEvent"); -}); + const found_event = await find_promise + assert.ok(found_event) + assert.equal(found_event.event_type, 'ChildEvent') +}) -test("find past/future windows are independent", async () => { - const bus = new EventBus("FindWindowIndependentBus"); +test('find past/future windows are independent', async () => { + const bus = new EventBus('FindWindowIndependentBus') - const old_event = bus.dispatch(ParentEvent({})); - await old_event.done(); - await delay(120); + const old_event = bus.dispatch(ParentEvent({})) + await old_event.done() + await delay(120) - const start = Date.now(); - const found_event = await bus.find(ParentEvent, { past: 0.05, future: 0.05 }); - const elapsed_ms = Date.now() - start; + const start = Date.now() + const found_event = await bus.find(ParentEvent, { past: 0.05, future: 0.05 }) + const elapsed_ms = Date.now() - start - assert.equal(found_event, null); - assert.ok(elapsed_ms > 30); -}); + assert.equal(found_event, null) + assert.ok(elapsed_ms > 30) +}) -test("find past true future float returns old event immediately", async () => { - const bus = new EventBus("FindPastTrueFutureFloatBus"); +test('find past true future float returns old event immediately', async () => { + const bus = new EventBus('FindPastTrueFutureFloatBus') - const dispatched = bus.dispatch(ParentEvent({})); - await dispatched.done(); - await delay(120); + const dispatched = bus.dispatch(ParentEvent({})) + await dispatched.done() + await delay(120) - const found_event = await bus.find(ParentEvent, { past: true, future: 0.1 }); - assert.ok(found_event); - assert.equal(found_event.event_id, dispatched.event_id); -}); + const found_event = await bus.find(ParentEvent, { past: true, future: 0.1 }) + assert.ok(found_event) + assert.equal(found_event.event_id, dispatched.event_id) +}) -test("find past float future waits for new event", async () => { - const bus = new EventBus("FindPastFloatFutureWaitBus"); +test('find past float future waits for new event', async () => { + const bus = new EventBus('FindPastFloatFutureWaitBus') - const old_event = bus.dispatch(ParentEvent({})); - await old_event.done(); - await delay(120); + const old_event = bus.dispatch(ParentEvent({})) + await old_event.done() + await delay(120) - const find_promise = bus.find(ParentEvent, { past: 0.05, future: 0.2 }); + const find_promise = bus.find(ParentEvent, { past: 0.05, future: 0.2 }) setTimeout(() => { - bus.dispatch(ParentEvent({})); - }, 50); + bus.dispatch(ParentEvent({})) + }, 50) - const found_event = await find_promise; - assert.ok(found_event); - assert.notEqual(found_event.event_id, old_event.event_id); -}); + const found_event = await find_promise + assert.ok(found_event) + assert.notEqual(found_event.event_id, old_event.event_id) +}) -test("find past true future true returns past event immediately", async () => { - const bus = new EventBus("FindPastTrueFutureTrueBus"); +test('find past true future true returns past event immediately', async () => { + const bus = new EventBus('FindPastTrueFutureTrueBus') - const dispatched = bus.dispatch(ParentEvent({})); - await dispatched.done(); + const dispatched = bus.dispatch(ParentEvent({})) + await dispatched.done() - const start = Date.now(); - const found_event = await bus.find(ParentEvent, { past: true, future: true }); - const elapsed_ms = Date.now() - start; + const start = Date.now() + const found_event = await bus.find(ParentEvent, { past: true, future: true }) + const elapsed_ms = Date.now() - start - assert.ok(found_event); - assert.equal(found_event.event_id, dispatched.event_id); - assert.ok(elapsed_ms < 100); -}); + assert.ok(found_event) + assert.equal(found_event.event_id, dispatched.event_id) + assert.ok(elapsed_ms < 100) +}) -test("find respects where filter", async () => { - const bus = new EventBus("FindWhereBus"); +test('find respects where filter', async () => { + const bus = new EventBus('FindWhereBus') - const event_a = bus.dispatch(ScreenshotEvent({ target_id: "tab-a" })); - const event_b = bus.dispatch(ScreenshotEvent({ target_id: "tab-b" })); - await event_a.done(); - await event_b.done(); + const event_a = bus.dispatch(ScreenshotEvent({ target_id: 'tab-a' })) + const event_b = bus.dispatch(ScreenshotEvent({ target_id: 'tab-b' })) + await event_a.done() + await event_b.done() - const found_event = await bus.find( - ScreenshotEvent, - (event) => event.target_id === "tab-b", - { past: true, future: false } - ); + const found_event = await bus.find(ScreenshotEvent, (event) => event.target_id === 'tab-b', { past: true, future: false }) - assert.ok(found_event); - assert.equal(found_event.event_id, event_b.event_id); -}); + assert.ok(found_event) + assert.equal(found_event.event_id, event_b.event_id) +}) -test("find where filter works with future waiting", async () => { - const bus = new EventBus("FindWhereFutureBus"); +test('find where filter works with future waiting', async () => { + const bus = new EventBus('FindWhereFutureBus') - const find_promise = bus.find( - UserActionEvent, - (event) => event.user_id === "user123", - { past: false, future: 0.3 } - ); + const find_promise = bus.find(UserActionEvent, (event) => event.user_id === 'user123', { past: false, future: 0.3 }) setTimeout(() => { - bus.dispatch(UserActionEvent({ action: "logout", user_id: "user456" })); - bus.dispatch(UserActionEvent({ action: "login", user_id: "user123" })); - }, 50); - - const found_event = await find_promise; - assert.ok(found_event); - assert.equal(found_event.user_id, "user123"); -}); - -test("find with multiple concurrent waiters resolves correct events", async () => { - const bus = new EventBus("FindConcurrentBus"); - - const find_normal = bus.find( - UserActionEvent, - (event) => event.action === "normal", - { past: false, future: 0.5 } - ); - const find_special = bus.find( - UserActionEvent, - (event) => event.action === "special", - { past: false, future: 0.5 } - ); - const find_system = bus.find("SystemEvent", { past: false, future: 0.5 }); + bus.dispatch(UserActionEvent({ action: 'logout', user_id: 'user456' })) + bus.dispatch(UserActionEvent({ action: 'login', user_id: 'user123' })) + }, 50) + + const found_event = await find_promise + assert.ok(found_event) + assert.equal(found_event.user_id, 'user123') +}) + +test('find with multiple concurrent waiters resolves correct events', async () => { + const bus = new EventBus('FindConcurrentBus') + + const find_normal = bus.find(UserActionEvent, (event) => event.action === 'normal', { past: false, future: 0.5 }) + const find_special = bus.find(UserActionEvent, (event) => event.action === 'special', { past: false, future: 0.5 }) + const find_system = bus.find('SystemEvent', { past: false, future: 0.5 }) setTimeout(() => { - bus.dispatch(UserActionEvent({ action: "normal", user_id: "u1" })); - bus.dispatch(SystemEvent({})); - bus.dispatch(UserActionEvent({ action: "special", user_id: "u2" })); - }, 50); - - const [normal, system, special] = await Promise.all([ - find_normal, - find_system, - find_special - ]); - - assert.ok(normal); - assert.equal(normal.action, "normal"); - assert.ok(system); - assert.equal(system.event_type, "SystemEvent"); - assert.ok(special); - assert.equal(special.action, "special"); -}); - -test("find child_of returns child event", async () => { - const bus = new EventBus("FindChildBus"); + bus.dispatch(UserActionEvent({ action: 'normal', user_id: 'u1' })) + bus.dispatch(SystemEvent({})) + bus.dispatch(UserActionEvent({ action: 'special', user_id: 'u2' })) + }, 50) + + const [normal, system, special] = await Promise.all([find_normal, find_system, find_special]) + + assert.ok(normal) + assert.equal(normal.action, 'normal') + assert.ok(system) + assert.equal(system.event_type, 'SystemEvent') + assert.ok(special) + assert.equal(special.action, 'special') +}) + +test('find child_of returns child event', async () => { + const bus = new EventBus('FindChildBus') bus.on(ParentEvent, (event) => { - event.bus?.emit(ChildEvent({})); - }); + event.bus?.emit(ChildEvent({})) + }) - const parent_event = bus.dispatch(ParentEvent({})); - await bus.waitUntilIdle(); + const parent_event = bus.dispatch(ParentEvent({})) + await bus.waitUntilIdle() const child_event = await bus.find(ChildEvent, { past: true, future: false, - child_of: parent_event - }); + child_of: parent_event, + }) - assert.ok(child_event); - assert.equal(child_event.event_parent_id, parent_event.event_id); -}); + assert.ok(child_event) + assert.equal(child_event.event_parent_id, parent_event.event_id) +}) -test("find child_of returns null for non-child", async () => { - const bus = new EventBus("FindNonChildBus"); +test('find child_of returns null for non-child', async () => { + const bus = new EventBus('FindNonChildBus') - const parent_event = bus.dispatch(ParentEvent({})); - const unrelated_event = bus.dispatch(UnrelatedEvent({})); - await parent_event.done(); - await unrelated_event.done(); + const parent_event = bus.dispatch(ParentEvent({})) + const unrelated_event = bus.dispatch(UnrelatedEvent({})) + await parent_event.done() + await unrelated_event.done() const found_event = await bus.find(UnrelatedEvent, { past: true, future: false, - child_of: parent_event - }); + child_of: parent_event, + }) - assert.equal(found_event, null); -}); + assert.equal(found_event, null) +}) -test("find child_of returns grandchild event", async () => { - const bus = new EventBus("FindGrandchildBus"); +test('find child_of returns grandchild event', async () => { + const bus = new EventBus('FindGrandchildBus') - let child_event_id: string | null = null; + let child_event_id: string | null = null bus.on(ParentEvent, async (event) => { - const child = await event.bus?.emit(ChildEvent({})).done(); - child_event_id = child?.event_id ?? null; - }); + const child = await event.bus?.emit(ChildEvent({})).done() + child_event_id = child?.event_id ?? null + }) bus.on(ChildEvent, async (event) => { - await event.bus?.emit(GrandchildEvent({})).done(); - }); + await event.bus?.emit(GrandchildEvent({})).done() + }) - const parent_event = bus.dispatch(ParentEvent({})); - await parent_event.done(); - await bus.waitUntilIdle(); + const parent_event = bus.dispatch(ParentEvent({})) + await parent_event.done() + await bus.waitUntilIdle() const grandchild_event = await bus.find(GrandchildEvent, { past: true, future: false, - child_of: parent_event - }); + child_of: parent_event, + }) - assert.ok(grandchild_event); - assert.equal(grandchild_event.event_parent_id, child_event_id); -}); + assert.ok(grandchild_event) + assert.equal(grandchild_event.event_parent_id, child_event_id) +}) -test("find child_of works across forwarded buses", async () => { - const main_bus = new EventBus("MainBus"); - const auth_bus = new EventBus("AuthBus"); +test('find child_of works across forwarded buses', async () => { + const main_bus = new EventBus('MainBus') + const auth_bus = new EventBus('AuthBus') - let child_event_id: string | null = null; + let child_event_id: string | null = null - main_bus.on(ParentEvent, auth_bus.dispatch); + main_bus.on(ParentEvent, auth_bus.dispatch) auth_bus.on(ParentEvent, async (event) => { - const child = await event.bus?.emit(ChildEvent({})).done(); - child_event_id = child.event_id; - }); + const child = await event.bus?.emit(ChildEvent({})).done() + child_event_id = child.event_id + }) - const parent_event = main_bus.dispatch(ParentEvent({})); - await parent_event.done(); - await main_bus.waitUntilIdle(); - await auth_bus.waitUntilIdle(); + const parent_event = main_bus.dispatch(ParentEvent({})) + await parent_event.done() + await main_bus.waitUntilIdle() + await auth_bus.waitUntilIdle() const found_child = await auth_bus.find(ChildEvent, { past: 5, future: 5, - child_of: parent_event - }); + child_of: parent_event, + }) - assert.ok(found_child); - assert.equal(found_child.event_id, child_event_id); -}); + assert.ok(found_child) + assert.equal(found_child.event_id, child_event_id) +}) -test("find child_of filters to correct parent among siblings", async () => { - const bus = new EventBus("FindCorrectParentBus"); +test('find child_of filters to correct parent among siblings', async () => { + const bus = new EventBus('FindCorrectParentBus') bus.on(NavigateEvent, async (event) => { - await event.bus?.emit(TabCreatedEvent({ tab_id: `tab_for_${event.url}` })).done(); - }); - bus.on(TabCreatedEvent, () => {}); + await event.bus?.emit(TabCreatedEvent({ tab_id: `tab_for_${event.url}` })).done() + }) + bus.on(TabCreatedEvent, () => {}) - const nav_1 = bus.dispatch(NavigateEvent({ url: "site1" })); - const nav_2 = bus.dispatch(NavigateEvent({ url: "site2" })); - await nav_1.done(); - await nav_2.done(); + const nav_1 = bus.dispatch(NavigateEvent({ url: 'site1' })) + const nav_2 = bus.dispatch(NavigateEvent({ url: 'site2' })) + await nav_1.done() + await nav_2.done() const tab_1 = await bus.find(TabCreatedEvent, { child_of: nav_1, past: true, - future: false - }); + future: false, + }) const tab_2 = await bus.find(TabCreatedEvent, { child_of: nav_2, past: true, - future: false - }); + future: false, + }) - assert.ok(tab_1); - assert.ok(tab_2); - assert.equal(tab_1.tab_id, "tab_for_site1"); - assert.equal(tab_2.tab_id, "tab_for_site2"); -}); + assert.ok(tab_1) + assert.ok(tab_2) + assert.equal(tab_1.tab_id, 'tab_for_site1') + assert.equal(tab_2.tab_id, 'tab_for_site2') +}) -test("find future with child_of waits for matching child", async () => { - const bus = new EventBus("FindFutureChildBus"); +test('find future with child_of waits for matching child', async () => { + const bus = new EventBus('FindFutureChildBus') bus.on(ParentEvent, async (event) => { - await delay(30); - await event.bus?.emit(ChildEvent({})).done(); - }); + await delay(30) + await event.bus?.emit(ChildEvent({})).done() + }) - const parent_event = bus.dispatch(ParentEvent({})); + const parent_event = bus.dispatch(ParentEvent({})) const find_promise = bus.find(ChildEvent, { child_of: parent_event, past: false, - future: 0.3 - }); - - const child_event = await find_promise; - assert.ok(child_event); - assert.equal(child_event.event_parent_id, parent_event.event_id); -}); - -test("find with past float and where filter", async () => { - const bus = new EventBus("FindWherePastFloatBus"); - - const old_event = bus.dispatch(ScreenshotEvent({ target_id: "tab1" })); - await old_event.done(); - await delay(120); - const new_event = bus.dispatch(ScreenshotEvent({ target_id: "tab2" })); - await new_event.done(); - - const found_tab2 = await bus.find( - ScreenshotEvent, - (event) => event.target_id === "tab2", - { past: 0.1, future: false } - ); - - assert.ok(found_tab2); - assert.equal(found_tab2.event_id, new_event.event_id); - - const found_tab1 = await bus.find( - ScreenshotEvent, - (event) => event.target_id === "tab1", - { past: 0.1, future: false } - ); - assert.equal(found_tab1, null); -}); - -test("find with child_of and past float", async () => { - const bus = new EventBus("FindChildPastFloatBus"); - - let child_event_id: string | null = null; + future: 0.3, + }) + + const child_event = await find_promise + assert.ok(child_event) + assert.equal(child_event.event_parent_id, parent_event.event_id) +}) + +test('find with past float and where filter', async () => { + const bus = new EventBus('FindWherePastFloatBus') + + const old_event = bus.dispatch(ScreenshotEvent({ target_id: 'tab1' })) + await old_event.done() + await delay(120) + const new_event = bus.dispatch(ScreenshotEvent({ target_id: 'tab2' })) + await new_event.done() + + const found_tab2 = await bus.find(ScreenshotEvent, (event) => event.target_id === 'tab2', { past: 0.1, future: false }) + + assert.ok(found_tab2) + assert.equal(found_tab2.event_id, new_event.event_id) + + const found_tab1 = await bus.find(ScreenshotEvent, (event) => event.target_id === 'tab1', { past: 0.1, future: false }) + assert.equal(found_tab1, null) +}) + +test('find with child_of and past float', async () => { + const bus = new EventBus('FindChildPastFloatBus') + + let child_event_id: string | null = null bus.on(ParentEvent, async (event) => { - const child = await event.bus?.emit(ChildEvent({})).done(); - child_event_id = child?.event_id ?? null; - }); + const child = await event.bus?.emit(ChildEvent({})).done() + child_event_id = child?.event_id ?? null + }) - const parent_event = bus.dispatch(ParentEvent({})); - await parent_event.done(); - await bus.waitUntilIdle(); + const parent_event = bus.dispatch(ParentEvent({})) + await parent_event.done() + await bus.waitUntilIdle() const found_child = await bus.find(ChildEvent, { child_of: parent_event, past: 5, - future: false - }); + future: false, + }) - assert.ok(found_child); - assert.equal(found_child.event_id, child_event_id); -}); + assert.ok(found_child) + assert.equal(found_child.event_id, child_event_id) +}) -test("find with all parameters combined", async () => { - const bus = new EventBus("FindAllParamsBus"); +test('find with all parameters combined', async () => { + const bus = new EventBus('FindAllParamsBus') - let child_event_id: string | null = null; + let child_event_id: string | null = null bus.on(ParentEvent, async (event) => { - const child = await event.bus?.emit(ScreenshotEvent({ target_id: "child_tab" })).done(); - child_event_id = child?.event_id ?? null; - }); - - const parent_event = bus.dispatch(ParentEvent({})); - await parent_event.done(); - await bus.waitUntilIdle(); - - const found_child = await bus.find( - ScreenshotEvent, - (event) => event.target_id === "child_tab", - { - child_of: parent_event, - past: 5, - future: false - } - ); - - assert.ok(found_child); - assert.equal(found_child.event_id, child_event_id); -}); - -test("find past ignores in-progress events but returns after completion", async () => { - const bus = new EventBus("FindCompletedOnlyBus"); + const child = await event.bus?.emit(ScreenshotEvent({ target_id: 'child_tab' })).done() + child_event_id = child?.event_id ?? null + }) + + const parent_event = bus.dispatch(ParentEvent({})) + await parent_event.done() + await bus.waitUntilIdle() + + const found_child = await bus.find(ScreenshotEvent, (event) => event.target_id === 'child_tab', { + child_of: parent_event, + past: 5, + future: false, + }) + + assert.ok(found_child) + assert.equal(found_child.event_id, child_event_id) +}) + +test('find past ignores in-progress events but returns after completion', async () => { + const bus = new EventBus('FindCompletedOnlyBus') bus.on(ParentEvent, async () => { - await delay(80); - }); + await delay(80) + }) - const dispatched = bus.dispatch(ParentEvent({})); - await delay(10); + const dispatched = bus.dispatch(ParentEvent({})) + await delay(10) - const early_find = await bus.find(ParentEvent, { past: true, future: false }); - assert.equal(early_find, null); + const early_find = await bus.find(ParentEvent, { past: true, future: false }) + assert.equal(early_find, null) - await dispatched.done(); + await dispatched.done() - const later_find = await bus.find(ParentEvent, { past: true, future: false }); - assert.ok(later_find); - assert.equal(later_find.event_id, dispatched.event_id); -}); + const later_find = await bus.find(ParentEvent, { past: true, future: false }) + assert.ok(later_find) + assert.equal(later_find.event_id, dispatched.event_id) +}) -test("find future resolves before handlers complete", async () => { - const bus = new EventBus("FindBeforeCompleteBus"); +test('find future resolves before handlers complete', async () => { + const bus = new EventBus('FindBeforeCompleteBus') bus.on(ParentEvent, async () => { - await delay(80); - }); + await delay(80) + }) - const find_promise = bus.find(ParentEvent, { past: false, future: 0.5 }); + const find_promise = bus.find(ParentEvent, { past: false, future: 0.5 }) setTimeout(() => { - bus.dispatch(ParentEvent({})); - }, 20); + bus.dispatch(ParentEvent({})) + }, 20) - const found_event = await find_promise; - assert.ok(found_event); - assert.equal(found_event.event_status, "started"); + const found_event = await find_promise + assert.ok(found_event) + assert.equal(found_event.event_status, 'started') - await found_event.done(); - assert.equal(found_event.event_status, "completed"); -}); + await found_event.done() + assert.equal(found_event.event_status, 'completed') +}) -test("find catches child event that fired during parent handler", async () => { - const bus = new EventBus("FindRaceConditionBus"); +test('find catches child event that fired during parent handler', async () => { + const bus = new EventBus('FindRaceConditionBus') - let tab_event_id: string | null = null; + let tab_event_id: string | null = null bus.on(NavigateEvent, async (event) => { - const tab_event = await event.bus?.emit(TabCreatedEvent({ tab_id: "new_tab" })).done(); - tab_event_id = tab_event?.event_id ?? null; - }); - bus.on(TabCreatedEvent, () => {}); + const tab_event = await event.bus?.emit(TabCreatedEvent({ tab_id: 'new_tab' })).done() + tab_event_id = tab_event?.event_id ?? null + }) + bus.on(TabCreatedEvent, () => {}) - const nav_event = bus.dispatch(NavigateEvent({ url: "https://example.com" })); - await nav_event.done(); + const nav_event = bus.dispatch(NavigateEvent({ url: 'https://example.com' })) + await nav_event.done() const found_tab = await bus.find(TabCreatedEvent, { child_of: nav_event, past: true, - future: false - }); + future: false, + }) - assert.ok(found_tab); - assert.equal(found_tab.event_id, tab_event_id); -}); + assert.ok(found_tab) + assert.equal(found_tab.event_id, tab_event_id) +}) -test("find returns promise that can be awaited later", async () => { - const bus = new EventBus("FindPromiseBus"); +test('find returns promise that can be awaited later', async () => { + const bus = new EventBus('FindPromiseBus') - const find_promise = bus.find(ParentEvent, { past: false, future: 0.5 }); - assert.ok(find_promise instanceof Promise); + const find_promise = bus.find(ParentEvent, { past: false, future: 0.5 }) + assert.ok(find_promise instanceof Promise) - bus.dispatch(ParentEvent({})); - const found_event = await find_promise; - assert.ok(found_event); -}); + bus.dispatch(ParentEvent({})) + const found_event = await find_promise + assert.ok(found_event) +}) diff --git a/bubus-ts/tests/forwarding.test.ts b/bubus-ts/tests/forwarding.test.ts index a380ecf..27c8d92 100644 --- a/bubus-ts/tests/forwarding.test.ts +++ b/bubus-ts/tests/forwarding.test.ts @@ -1,186 +1,186 @@ -import assert from "node:assert/strict"; -import { test } from "node:test"; +import assert from 'node:assert/strict' +import { test } from 'node:test' -import { z } from "zod"; +import { z } from 'zod' -import { BaseEvent, EventBus } from "../src/index.js"; +import { BaseEvent, EventBus } from '../src/index.js' -const PingEvent = BaseEvent.extend("PingEvent", { value: z.number() }); +const PingEvent = BaseEvent.extend('PingEvent', { value: z.number() }) -test("events forward between buses without duplication", async () => { - const bus_a = new EventBus("BusA"); - const bus_b = new EventBus("BusB"); - const bus_c = new EventBus("BusC"); +test('events forward between buses without duplication', async () => { + const bus_a = new EventBus('BusA') + const bus_b = new EventBus('BusB') + const bus_c = new EventBus('BusC') - const seen_a: string[] = []; - const seen_b: string[] = []; - const seen_c: string[] = []; + const seen_a: string[] = [] + const seen_b: string[] = [] + const seen_c: string[] = [] bus_a.on(PingEvent, (event) => { - seen_a.push(event.event_id); - }); + seen_a.push(event.event_id) + }) bus_b.on(PingEvent, (event) => { - seen_b.push(event.event_id); - }); + seen_b.push(event.event_id) + }) bus_c.on(PingEvent, (event) => { - seen_c.push(event.event_id); - }); + seen_c.push(event.event_id) + }) - bus_a.on("*", bus_b.dispatch); - bus_b.on("*", bus_c.dispatch); + bus_a.on('*', bus_b.dispatch) + bus_b.on('*', bus_c.dispatch) - const event = bus_a.dispatch(PingEvent({ value: 1 })); + const event = bus_a.dispatch(PingEvent({ value: 1 })) - await bus_a.waitUntilIdle(); - await bus_b.waitUntilIdle(); - await bus_c.waitUntilIdle(); + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + await bus_c.waitUntilIdle() - assert.equal(seen_a.length, 1); - assert.equal(seen_b.length, 1); - assert.equal(seen_c.length, 1); + assert.equal(seen_a.length, 1) + assert.equal(seen_b.length, 1) + assert.equal(seen_c.length, 1) - assert.equal(seen_a[0], event.event_id); - assert.equal(seen_b[0], event.event_id); - assert.equal(seen_c[0], event.event_id); + assert.equal(seen_a[0], event.event_id) + assert.equal(seen_b[0], event.event_id) + assert.equal(seen_c[0], event.event_id) - assert.deepEqual(event.event_path, ["BusA", "BusB", "BusC"]); -}); + assert.deepEqual(event.event_path, ['BusA', 'BusB', 'BusC']) +}) -test("await event.done waits for handlers on forwarded buses", async () => { - const bus_a = new EventBus("BusA"); - const bus_b = new EventBus("BusB"); - const bus_c = new EventBus("BusC"); +test('await event.done waits for handlers on forwarded buses', async () => { + const bus_a = new EventBus('BusA') + const bus_b = new EventBus('BusB') + const bus_c = new EventBus('BusC') - const completion_log: string[] = []; + const completion_log: string[] = [] const delay = (ms: number): Promise => new Promise((resolve) => { - setTimeout(resolve, ms); - }); + setTimeout(resolve, ms) + }) bus_a.on(PingEvent, async () => { - await delay(10); - completion_log.push("A"); - }); + await delay(10) + completion_log.push('A') + }) bus_b.on(PingEvent, async () => { - await delay(30); - completion_log.push("B"); - }); + await delay(30) + completion_log.push('B') + }) bus_c.on(PingEvent, async () => { - await delay(50); - completion_log.push("C"); - }); + await delay(50) + completion_log.push('C') + }) - bus_a.on("*", bus_b.dispatch); - bus_b.on("*", bus_c.dispatch); + bus_a.on('*', bus_b.dispatch) + bus_b.on('*', bus_c.dispatch) - const event = bus_a.dispatch(PingEvent({ value: 2 })); + const event = bus_a.dispatch(PingEvent({ value: 2 })) - await event.done(); + await event.done() - assert.deepEqual(completion_log.sort(), ["A", "B", "C"]); - assert.equal(event.event_pending_buses, 0); -}); + assert.deepEqual(completion_log.sort(), ['A', 'B', 'C']) + assert.equal(event.event_pending_buses, 0) +}) -test("circular forwarding A->B->C->A does not loop", async () => { - const peer1 = new EventBus("Peer1"); - const peer2 = new EventBus("Peer2"); - const peer3 = new EventBus("Peer3"); +test('circular forwarding A->B->C->A does not loop', async () => { + const peer1 = new EventBus('Peer1') + const peer2 = new EventBus('Peer2') + const peer3 = new EventBus('Peer3') - const events_at_peer1: string[] = []; - const events_at_peer2: string[] = []; - const events_at_peer3: string[] = []; + const events_at_peer1: string[] = [] + const events_at_peer2: string[] = [] + const events_at_peer3: string[] = [] peer1.on(PingEvent, (event) => { - events_at_peer1.push(event.event_id); - }); + events_at_peer1.push(event.event_id) + }) peer2.on(PingEvent, (event) => { - events_at_peer2.push(event.event_id); - }); + events_at_peer2.push(event.event_id) + }) peer3.on(PingEvent, (event) => { - events_at_peer3.push(event.event_id); - }); + events_at_peer3.push(event.event_id) + }) // Create a full cycle: Peer1 -> Peer2 -> Peer3 -> Peer1 - peer1.on("*", peer2.dispatch); - peer2.on("*", peer3.dispatch); - peer3.on("*", peer1.dispatch); // completes the circle + peer1.on('*', peer2.dispatch) + peer2.on('*', peer3.dispatch) + peer3.on('*', peer1.dispatch) // completes the circle - const event = peer1.dispatch(PingEvent({ value: 42 })); + const event = peer1.dispatch(PingEvent({ value: 42 })) - await peer1.waitUntilIdle(); - await peer2.waitUntilIdle(); - await peer3.waitUntilIdle(); + await peer1.waitUntilIdle() + await peer2.waitUntilIdle() + await peer3.waitUntilIdle() // Each peer must see the event exactly once (no infinite loop) - assert.equal(events_at_peer1.length, 1); - assert.equal(events_at_peer2.length, 1); - assert.equal(events_at_peer3.length, 1); + assert.equal(events_at_peer1.length, 1) + assert.equal(events_at_peer2.length, 1) + assert.equal(events_at_peer3.length, 1) // All saw the same event - assert.equal(events_at_peer1[0], event.event_id); - assert.equal(events_at_peer2[0], event.event_id); - assert.equal(events_at_peer3[0], event.event_id); + assert.equal(events_at_peer1[0], event.event_id) + assert.equal(events_at_peer2[0], event.event_id) + assert.equal(events_at_peer3[0], event.event_id) // event_path shows propagation order without looping back - assert.deepEqual(event.event_path, ["Peer1", "Peer2", "Peer3"]); + assert.deepEqual(event.event_path, ['Peer1', 'Peer2', 'Peer3']) // --- Start from a different peer in the same cycle --- - events_at_peer1.length = 0; - events_at_peer2.length = 0; - events_at_peer3.length = 0; + events_at_peer1.length = 0 + events_at_peer2.length = 0 + events_at_peer3.length = 0 - const event2 = peer2.dispatch(PingEvent({ value: 99 })); + const event2 = peer2.dispatch(PingEvent({ value: 99 })) - await peer1.waitUntilIdle(); - await peer2.waitUntilIdle(); - await peer3.waitUntilIdle(); + await peer1.waitUntilIdle() + await peer2.waitUntilIdle() + await peer3.waitUntilIdle() // Each peer sees it exactly once - assert.equal(events_at_peer1.length, 1); - assert.equal(events_at_peer2.length, 1); - assert.equal(events_at_peer3.length, 1); + assert.equal(events_at_peer1.length, 1) + assert.equal(events_at_peer2.length, 1) + assert.equal(events_at_peer3.length, 1) // Path starts at Peer2, goes to Peer3, then Peer1 (stops before looping back to Peer2) - assert.deepEqual(event2.event_path, ["Peer2", "Peer3", "Peer1"]); -}); + assert.deepEqual(event2.event_path, ['Peer2', 'Peer3', 'Peer1']) +}) -test("await event.done waits when forwarding handler is async-delayed", async () => { - const bus_a = new EventBus("BusA"); - const bus_b = new EventBus("BusB"); +test('await event.done waits when forwarding handler is async-delayed', async () => { + const bus_a = new EventBus('BusA') + const bus_b = new EventBus('BusB') const delay = (ms: number): Promise => new Promise((resolve) => { - setTimeout(resolve, ms); - }); + setTimeout(resolve, ms) + }) - let bus_a_done = false; - let bus_b_done = false; + let bus_a_done = false + let bus_b_done = false bus_a.on(PingEvent, async () => { - await delay(20); - bus_a_done = true; - }); + await delay(20) + bus_a_done = true + }) bus_b.on(PingEvent, async () => { - await delay(10); - bus_b_done = true; - }); - - bus_a.on("*", async (event) => { - await delay(30); - bus_b.dispatch(event); - }); - - const event = bus_a.dispatch(PingEvent({ value: 3 })); - await event.done(); - - assert.equal(bus_a_done, true); - assert.equal(bus_b_done, true); - assert.equal(event.event_pending_buses, 0); - assert.deepEqual(event.event_path, ["BusA", "BusB"]); -}); + await delay(10) + bus_b_done = true + }) + + bus_a.on('*', async (event) => { + await delay(30) + bus_b.dispatch(event) + }) + + const event = bus_a.dispatch(PingEvent({ value: 3 })) + await event.done() + + assert.equal(bus_a_done, true) + assert.equal(bus_b_done, true) + assert.equal(event.event_pending_buses, 0) + assert.deepEqual(event.event_path, ['BusA', 'BusB']) +}) diff --git a/bubus-ts/tests/handlers.test.ts b/bubus-ts/tests/handlers.test.ts index 6599427..837eb6b 100644 --- a/bubus-ts/tests/handlers.test.ts +++ b/bubus-ts/tests/handlers.test.ts @@ -1,154 +1,150 @@ -import assert from "node:assert/strict"; -import { test } from "node:test"; +import assert from 'node:assert/strict' +import { test } from 'node:test' -import { z } from "zod"; +import { z } from 'zod' -import { BaseEvent, EventBus } from "../src/index.js"; +import { BaseEvent, EventBus } from '../src/index.js' -const UserActionEvent = BaseEvent.extend("UserActionEvent", { +const UserActionEvent = BaseEvent.extend('UserActionEvent', { action: z.string(), - user_id: z.string() -}); + user_id: z.string(), +}) -const SystemEventModel = BaseEvent.extend("SystemEventModel", { - event_name: z.string() -}); +const SystemEventModel = BaseEvent.extend('SystemEventModel', { + event_name: z.string(), +}) -test("handler registration via string, class, and wildcard", async () => { - const bus = new EventBus("HandlerRegistrationBus"); +test('handler registration via string, class, and wildcard', async () => { + const bus = new EventBus('HandlerRegistrationBus') const results: Record = { specific: [], model: [], - universal: [] - }; + universal: [], + } const user_handler = async (event: InstanceType): Promise => { - results.specific.push(event.action); - return "user_handled"; - }; + results.specific.push(event.action) + return 'user_handled' + } const system_handler = async (event: InstanceType): Promise => { - results.model.push(event.event_name); - return "system_handled"; - }; + results.model.push(event.event_name) + return 'system_handled' + } const universal_handler = async (event: BaseEvent): Promise => { - results.universal.push(event.event_type); - return "universal"; - }; + results.universal.push(event.event_type) + return 'universal' + } - const system_event_class = (SystemEventModel as unknown as { class: typeof BaseEvent }).class; + const system_event_class = (SystemEventModel as unknown as { class: typeof BaseEvent }).class - bus.on("UserActionEvent", user_handler); - bus.on(system_event_class, system_handler); - bus.on("*", universal_handler); + bus.on('UserActionEvent', user_handler) + bus.on(system_event_class, system_handler) + bus.on('*', universal_handler) - bus.dispatch(UserActionEvent({ action: "login", user_id: "u1" })); - bus.dispatch(SystemEventModel({ event_name: "startup" })); - await bus.waitUntilIdle(); + bus.dispatch(UserActionEvent({ action: 'login', user_id: 'u1' })) + bus.dispatch(SystemEventModel({ event_name: 'startup' })) + await bus.waitUntilIdle() - assert.deepEqual(results.specific, ["login"]); - assert.deepEqual(results.model, ["startup"]); - assert.deepEqual(new Set(results.universal), new Set(["UserActionEvent", "SystemEventModel"])); -}); + assert.deepEqual(results.specific, ['login']) + assert.deepEqual(results.model, ['startup']) + assert.deepEqual(new Set(results.universal), new Set(['UserActionEvent', 'SystemEventModel'])) +}) -test("handlers can be sync or async", async () => { - const bus = new EventBus("SyncAsyncHandlersBus"); +test('handlers can be sync or async', async () => { + const bus = new EventBus('SyncAsyncHandlersBus') - const sync_handler = (_event: BaseEvent): string => "sync"; - const async_handler = async (_event: BaseEvent): Promise => "async"; + const sync_handler = (_event: BaseEvent): string => 'sync' + const async_handler = async (_event: BaseEvent): Promise => 'async' - bus.on("TestEvent", sync_handler); - bus.on("TestEvent", async_handler); + bus.on('TestEvent', sync_handler) + bus.on('TestEvent', async_handler) - const handler_count = Array.from(bus.handlers.values()).filter( - (entry) => entry.event_key === "TestEvent" - ).length; - assert.equal(handler_count, 2); + const handler_count = Array.from(bus.handlers.values()).filter((entry) => entry.event_key === 'TestEvent').length + assert.equal(handler_count, 2) - const event = bus.dispatch(BaseEvent.extend("TestEvent", {})({})); - await event.done(); + const event = bus.dispatch(BaseEvent.extend('TestEvent', {})({})) + await event.done() - const results = Array.from(event.event_results.values()).map((result) => result.result); - assert.ok(results.includes("sync")); - assert.ok(results.includes("async")); -}); + const results = Array.from(event.event_results.values()).map((result) => result.result) + assert.ok(results.includes('sync')) + assert.ok(results.includes('async')) +}) -test("instance, class, and static method handlers", async () => { - const bus = new EventBus("MethodHandlersBus"); - const results: string[] = []; +test('instance, class, and static method handlers', async () => { + const bus = new EventBus('MethodHandlersBus') + const results: string[] = [] class EventProcessor { - name: string; - value: number; + name: string + value: number constructor(name: string, value: number) { - this.name = name; - this.value = value; + this.name = name + this.value = value } sync_method_handler = (event: InstanceType): Record => { - results.push(`${this.name}_sync`); - return { processor: this.name, value: this.value, action: event.action }; - }; - - async async_method_handler( - event: InstanceType - ): Promise> { - await new Promise((resolve) => setTimeout(resolve, 10)); - results.push(`${this.name}_async`); - return { processor: this.name, value: this.value * 2, action: event.action }; + results.push(`${this.name}_sync`) + return { processor: this.name, value: this.value, action: event.action } + } + + async async_method_handler(event: InstanceType): Promise> { + await new Promise((resolve) => setTimeout(resolve, 10)) + results.push(`${this.name}_async`) + return { processor: this.name, value: this.value * 2, action: event.action } } static class_method_handler(event: InstanceType): string { - results.push("classmethod"); - return `Handled by ${event.event_type}`; + results.push('classmethod') + return `Handled by ${event.event_type}` } static static_method_handler(_event: InstanceType): string { - results.push("staticmethod"); - return "Handled by static method"; + results.push('staticmethod') + return 'Handled by static method' } } - const processor1 = new EventProcessor("Processor1", 10); - const processor2 = new EventProcessor("Processor2", 20); + const processor1 = new EventProcessor('Processor1', 10) + const processor2 = new EventProcessor('Processor2', 20) - bus.on(UserActionEvent, processor1.sync_method_handler); - bus.on(UserActionEvent, processor1.async_method_handler.bind(processor1)); - bus.on(UserActionEvent, processor2.sync_method_handler); - bus.on("UserActionEvent", EventProcessor.class_method_handler); - bus.on("UserActionEvent", EventProcessor.static_method_handler); + bus.on(UserActionEvent, processor1.sync_method_handler) + bus.on(UserActionEvent, processor1.async_method_handler.bind(processor1)) + bus.on(UserActionEvent, processor2.sync_method_handler) + bus.on('UserActionEvent', EventProcessor.class_method_handler) + bus.on('UserActionEvent', EventProcessor.static_method_handler) - const event = UserActionEvent({ action: "test_methods", user_id: "u123" }); - const completed_event = bus.dispatch(event); - await completed_event.done(); + const event = UserActionEvent({ action: 'test_methods', user_id: 'u123' }) + const completed_event = bus.dispatch(event) + await completed_event.done() - assert.equal(results.length, 5); - assert.ok(results.includes("Processor1_sync")); - assert.ok(results.includes("Processor1_async")); - assert.ok(results.includes("Processor2_sync")); - assert.ok(results.includes("classmethod")); - assert.ok(results.includes("staticmethod")); + assert.equal(results.length, 5) + assert.ok(results.includes('Processor1_sync')) + assert.ok(results.includes('Processor1_async')) + assert.ok(results.includes('Processor2_sync')) + assert.ok(results.includes('classmethod')) + assert.ok(results.includes('staticmethod')) - const result_values = Array.from(completed_event.event_results.values()).map((result) => result.result); + const result_values = Array.from(completed_event.event_results.values()).map((result) => result.result) const p1_sync = result_values.find( (result) => - typeof result === "object" && + typeof result === 'object' && result !== null && - (result as { processor?: string; value?: number }).processor === "Processor1" && + (result as { processor?: string; value?: number }).processor === 'Processor1' && (result as { value?: number }).value === 10 - ) as { action?: string } | undefined; + ) as { action?: string } | undefined const p1_async = result_values.find( (result) => - typeof result === "object" && + typeof result === 'object' && result !== null && - (result as { processor?: string; value?: number }).processor === "Processor1" && + (result as { processor?: string; value?: number }).processor === 'Processor1' && (result as { value?: number }).value === 20 - ) as { action?: string } | undefined; + ) as { action?: string } | undefined - assert.equal(p1_sync?.action, "test_methods"); - assert.equal(p1_async?.action, "test_methods"); -}); + assert.equal(p1_sync?.action, 'test_methods') + assert.equal(p1_async?.action, 'test_methods') +}) diff --git a/bubus-ts/tests/locking.test.ts b/bubus-ts/tests/locking.test.ts index 87b9e46..f9bd0d8 100644 --- a/bubus-ts/tests/locking.test.ts +++ b/bubus-ts/tests/locking.test.ts @@ -1,9 +1,9 @@ -import assert from "node:assert/strict"; -import { test } from "node:test"; +import assert from 'node:assert/strict' +import { test } from 'node:test' -import { z } from "zod"; +import { z } from 'zod' -import { BaseEvent, EventBus } from "../src/index.js"; +import { BaseEvent, EventBus } from '../src/index.js' /* Potential failure modes @@ -78,989 +78,983 @@ M) Edge-cases - Event emitted with no bus set (done should reject). */ -const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms)); +const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms)) const withResolvers = () => { - let resolve!: (value: T | PromiseLike) => void; - let reject!: (reason?: unknown) => void; + let resolve!: (value: T | PromiseLike) => void + let reject!: (reason?: unknown) => void const promise = new Promise((resolve_fn, reject_fn) => { - resolve = resolve_fn; - reject = reject_fn; - }); - return { promise, resolve, reject }; -}; - -test("global-serial: only one event processes at a time across buses", async () => { - const SerialEvent = BaseEvent.extend("SerialEvent", { + resolve = resolve_fn + reject = reject_fn + }) + return { promise, resolve, reject } +} + +test('global-serial: only one event processes at a time across buses', async () => { + const SerialEvent = BaseEvent.extend('SerialEvent', { order: z.number(), - source: z.string() - }); + source: z.string(), + }) - const bus_a = new EventBus("GlobalSerialA", { event_concurrency: "global-serial" }); - const bus_b = new EventBus("GlobalSerialB", { event_concurrency: "global-serial" }); + const bus_a = new EventBus('GlobalSerialA', { event_concurrency: 'global-serial' }) + const bus_b = new EventBus('GlobalSerialB', { event_concurrency: 'global-serial' }) - let in_flight = 0; - let max_in_flight = 0; - const starts: string[] = []; + let in_flight = 0 + let max_in_flight = 0 + const starts: string[] = [] const handler = async (event: InstanceType) => { - in_flight += 1; - max_in_flight = Math.max(max_in_flight, in_flight); - starts.push(`${event.source}:${event.order}`); - await sleep(10); - in_flight -= 1; - }; + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + starts.push(`${event.source}:${event.order}`) + await sleep(10) + in_flight -= 1 + } - bus_a.on(SerialEvent, handler); - bus_b.on(SerialEvent, handler); + bus_a.on(SerialEvent, handler) + bus_b.on(SerialEvent, handler) for (let i = 0; i < 3; i += 1) { - bus_a.dispatch(SerialEvent({ order: i, source: "a" })); - bus_b.dispatch(SerialEvent({ order: i, source: "b" })); + bus_a.dispatch(SerialEvent({ order: i, source: 'a' })) + bus_b.dispatch(SerialEvent({ order: i, source: 'b' })) } - await bus_a.waitUntilIdle(); - await bus_b.waitUntilIdle(); + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() - assert.equal(max_in_flight, 1); + assert.equal(max_in_flight, 1) - const starts_a = starts.filter((value) => value.startsWith("a:")).map((value) => Number(value.split(":")[1])); - const starts_b = starts.filter((value) => value.startsWith("b:")).map((value) => Number(value.split(":")[1])); + const starts_a = starts.filter((value) => value.startsWith('a:')).map((value) => Number(value.split(':')[1])) + const starts_b = starts.filter((value) => value.startsWith('b:')).map((value) => Number(value.split(':')[1])) - assert.deepEqual(starts_a, [0, 1, 2]); - assert.deepEqual(starts_b, [0, 1, 2]); -}); + assert.deepEqual(starts_a, [0, 1, 2]) + assert.deepEqual(starts_b, [0, 1, 2]) +}) -test("global-serial: awaited child jumps ahead of queued events across buses", async () => { - const ParentEvent = BaseEvent.extend("ParentEvent", {}); - const ChildEvent = BaseEvent.extend("ChildEvent", {}); - const QueuedEvent = BaseEvent.extend("QueuedEvent", {}); +test('global-serial: awaited child jumps ahead of queued events across buses', async () => { + const ParentEvent = BaseEvent.extend('ParentEvent', {}) + const ChildEvent = BaseEvent.extend('ChildEvent', {}) + const QueuedEvent = BaseEvent.extend('QueuedEvent', {}) - const bus_a = new EventBus("GlobalSerialParent", { event_concurrency: "global-serial" }); - const bus_b = new EventBus("GlobalSerialChild", { event_concurrency: "global-serial" }); + const bus_a = new EventBus('GlobalSerialParent', { event_concurrency: 'global-serial' }) + const bus_b = new EventBus('GlobalSerialChild', { event_concurrency: 'global-serial' }) - const order: string[] = []; + const order: string[] = [] bus_b.on(ChildEvent, async () => { - order.push("child_start"); - await sleep(5); - order.push("child_end"); - }); + order.push('child_start') + await sleep(5) + order.push('child_end') + }) bus_b.on(QueuedEvent, async () => { - order.push("queued_start"); - await sleep(1); - order.push("queued_end"); - }); + order.push('queued_start') + await sleep(1) + order.push('queued_end') + }) bus_a.on(ParentEvent, async (event) => { - order.push("parent_start"); - bus_b.emit(QueuedEvent({})); + order.push('parent_start') + bus_b.emit(QueuedEvent({})) // Emit through the scoped proxy so parent tracking is set up, // then also dispatch to bus_b for cross-bus processing. - const child = event.bus?.emit(ChildEvent({}))!; - bus_b.dispatch(child); - order.push("child_dispatched"); - await child.done(); - order.push("child_awaited"); - order.push("parent_end"); - }); - - const parent = bus_a.dispatch(ParentEvent({})); - await parent.done(); - await bus_b.waitUntilIdle(); - - const child_start_idx = order.indexOf("child_start"); - const child_end_idx = order.indexOf("child_end"); - const queued_start_idx = order.indexOf("queued_start"); - - assert.ok(child_start_idx !== -1); - assert.ok(child_end_idx !== -1); - assert.ok(queued_start_idx !== -1); - assert.ok(child_start_idx < queued_start_idx); - assert.ok(child_end_idx < queued_start_idx); -}); - -test("global-serial: handler limiter serializes handlers across buses", async () => { - const HandlerEvent = BaseEvent.extend("HandlerEvent", { + const child = event.bus?.emit(ChildEvent({}))! + bus_b.dispatch(child) + order.push('child_dispatched') + await child.done() + order.push('child_awaited') + order.push('parent_end') + }) + + const parent = bus_a.dispatch(ParentEvent({})) + await parent.done() + await bus_b.waitUntilIdle() + + const child_start_idx = order.indexOf('child_start') + const child_end_idx = order.indexOf('child_end') + const queued_start_idx = order.indexOf('queued_start') + + assert.ok(child_start_idx !== -1) + assert.ok(child_end_idx !== -1) + assert.ok(queued_start_idx !== -1) + assert.ok(child_start_idx < queued_start_idx) + assert.ok(child_end_idx < queued_start_idx) +}) + +test('global-serial: handler limiter serializes handlers across buses', async () => { + const HandlerEvent = BaseEvent.extend('HandlerEvent', { order: z.number(), - source: z.string() - }); + source: z.string(), + }) - const bus_a = new EventBus("GlobalHandlerA", { - event_concurrency: "parallel", - handler_concurrency: "global-serial" - }); - const bus_b = new EventBus("GlobalHandlerB", { - event_concurrency: "parallel", - handler_concurrency: "global-serial" - }); + const bus_a = new EventBus('GlobalHandlerA', { + event_concurrency: 'parallel', + handler_concurrency: 'global-serial', + }) + const bus_b = new EventBus('GlobalHandlerB', { + event_concurrency: 'parallel', + handler_concurrency: 'global-serial', + }) - let in_flight = 0; - let max_in_flight = 0; + let in_flight = 0 + let max_in_flight = 0 const handler = async () => { - in_flight += 1; - max_in_flight = Math.max(max_in_flight, in_flight); - await sleep(5); - in_flight -= 1; - }; + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await sleep(5) + in_flight -= 1 + } - bus_a.on(HandlerEvent, handler); - bus_b.on(HandlerEvent, handler); + bus_a.on(HandlerEvent, handler) + bus_b.on(HandlerEvent, handler) for (let i = 0; i < 4; i += 1) { - bus_a.dispatch(HandlerEvent({ order: i, source: "a" })); - bus_b.dispatch(HandlerEvent({ order: i, source: "b" })); + bus_a.dispatch(HandlerEvent({ order: i, source: 'a' })) + bus_b.dispatch(HandlerEvent({ order: i, source: 'b' })) } - await bus_a.waitUntilIdle(); - await bus_b.waitUntilIdle(); + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() - assert.equal(max_in_flight, 1); -}); + assert.equal(max_in_flight, 1) +}) -test("bus-serial: events serialize per bus but overlap across buses", async () => { - const SerialEvent = BaseEvent.extend("SerialPerBusEvent", { +test('bus-serial: events serialize per bus but overlap across buses', async () => { + const SerialEvent = BaseEvent.extend('SerialPerBusEvent', { order: z.number(), - source: z.string() - }); + source: z.string(), + }) - const bus_a = new EventBus("BusSerialA", { event_concurrency: "bus-serial" }); - const bus_b = new EventBus("BusSerialB", { event_concurrency: "bus-serial" }); + const bus_a = new EventBus('BusSerialA', { event_concurrency: 'bus-serial' }) + const bus_b = new EventBus('BusSerialB', { event_concurrency: 'bus-serial' }) - let in_flight_global = 0; - let max_in_flight_global = 0; - let in_flight_a = 0; - let in_flight_b = 0; - let max_in_flight_a = 0; - let max_in_flight_b = 0; + let in_flight_global = 0 + let max_in_flight_global = 0 + let in_flight_a = 0 + let in_flight_b = 0 + let max_in_flight_a = 0 + let max_in_flight_b = 0 - let resolve_b_started: (() => void) | null = null; + let resolve_b_started: (() => void) | null = null const b_started = new Promise((resolve) => { - resolve_b_started = resolve; - }); + resolve_b_started = resolve + }) bus_a.on(SerialEvent, async () => { - in_flight_global += 1; - in_flight_a += 1; - max_in_flight_global = Math.max(max_in_flight_global, in_flight_global); - max_in_flight_a = Math.max(max_in_flight_a, in_flight_a); - await b_started; - await sleep(10); - in_flight_global -= 1; - in_flight_a -= 1; - }); + in_flight_global += 1 + in_flight_a += 1 + max_in_flight_global = Math.max(max_in_flight_global, in_flight_global) + max_in_flight_a = Math.max(max_in_flight_a, in_flight_a) + await b_started + await sleep(10) + in_flight_global -= 1 + in_flight_a -= 1 + }) bus_b.on(SerialEvent, async () => { - in_flight_global += 1; - in_flight_b += 1; - max_in_flight_global = Math.max(max_in_flight_global, in_flight_global); - max_in_flight_b = Math.max(max_in_flight_b, in_flight_b); + in_flight_global += 1 + in_flight_b += 1 + max_in_flight_global = Math.max(max_in_flight_global, in_flight_global) + max_in_flight_b = Math.max(max_in_flight_b, in_flight_b) if (resolve_b_started) { - resolve_b_started(); - resolve_b_started = null; + resolve_b_started() + resolve_b_started = null } - await sleep(10); - in_flight_global -= 1; - in_flight_b -= 1; - }); + await sleep(10) + in_flight_global -= 1 + in_flight_b -= 1 + }) - bus_a.dispatch(SerialEvent({ order: 0, source: "a" })); - bus_b.dispatch(SerialEvent({ order: 0, source: "b" })); + bus_a.dispatch(SerialEvent({ order: 0, source: 'a' })) + bus_b.dispatch(SerialEvent({ order: 0, source: 'b' })) - await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]); + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]) - assert.equal(max_in_flight_a, 1); - assert.equal(max_in_flight_b, 1); - assert.ok(max_in_flight_global >= 2); -}); + assert.equal(max_in_flight_a, 1) + assert.equal(max_in_flight_b, 1) + assert.ok(max_in_flight_global >= 2) +}) -test("bus-serial: FIFO order preserved per bus with interleaving", async () => { - const SerialEvent = BaseEvent.extend("SerialInterleavedEvent", { +test('bus-serial: FIFO order preserved per bus with interleaving', async () => { + const SerialEvent = BaseEvent.extend('SerialInterleavedEvent', { order: z.number(), - source: z.string() - }); + source: z.string(), + }) - const bus_a = new EventBus("BusSerialOrderA", { event_concurrency: "bus-serial" }); - const bus_b = new EventBus("BusSerialOrderB", { event_concurrency: "bus-serial" }); + const bus_a = new EventBus('BusSerialOrderA', { event_concurrency: 'bus-serial' }) + const bus_b = new EventBus('BusSerialOrderB', { event_concurrency: 'bus-serial' }) - const starts_a: number[] = []; - const starts_b: number[] = []; + const starts_a: number[] = [] + const starts_b: number[] = [] bus_a.on(SerialEvent, async (event) => { - starts_a.push(event.order); - await sleep(2); - }); + starts_a.push(event.order) + await sleep(2) + }) bus_b.on(SerialEvent, async (event) => { - starts_b.push(event.order); - await sleep(2); - }); + starts_b.push(event.order) + await sleep(2) + }) for (let i = 0; i < 4; i += 1) { - bus_a.dispatch(SerialEvent({ order: i, source: "a" })); - bus_b.dispatch(SerialEvent({ order: i, source: "b" })); + bus_a.dispatch(SerialEvent({ order: i, source: 'a' })) + bus_b.dispatch(SerialEvent({ order: i, source: 'b' })) } - await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]); + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]) - assert.deepEqual(starts_a, [0, 1, 2, 3]); - assert.deepEqual(starts_b, [0, 1, 2, 3]); -}); + assert.deepEqual(starts_a, [0, 1, 2, 3]) + assert.deepEqual(starts_b, [0, 1, 2, 3]) +}) -test("bus-serial: awaiting child on one bus does not block other bus queue", async () => { - const ParentEvent = BaseEvent.extend("BusSerialParent", {}); - const ChildEvent = BaseEvent.extend("BusSerialChild", {}); - const OtherEvent = BaseEvent.extend("BusSerialOther", {}); +test('bus-serial: awaiting child on one bus does not block other bus queue', async () => { + const ParentEvent = BaseEvent.extend('BusSerialParent', {}) + const ChildEvent = BaseEvent.extend('BusSerialChild', {}) + const OtherEvent = BaseEvent.extend('BusSerialOther', {}) - const bus_a = new EventBus("BusSerialParentBus", { event_concurrency: "bus-serial" }); - const bus_b = new EventBus("BusSerialOtherBus", { event_concurrency: "bus-serial" }); + const bus_a = new EventBus('BusSerialParentBus', { event_concurrency: 'bus-serial' }) + const bus_b = new EventBus('BusSerialOtherBus', { event_concurrency: 'bus-serial' }) - const order: string[] = []; + const order: string[] = [] bus_a.on(ChildEvent, async () => { - order.push("child_start"); - await sleep(10); - order.push("child_end"); - }); + order.push('child_start') + await sleep(10) + order.push('child_end') + }) bus_a.on(ParentEvent, async (event) => { - order.push("parent_start"); - const child = event.bus?.emit(ChildEvent({}))!; - await child.done(); - order.push("parent_end"); - }); + order.push('parent_start') + const child = event.bus?.emit(ChildEvent({}))! + await child.done() + order.push('parent_end') + }) bus_b.on(OtherEvent, async () => { - order.push("other_start"); - await sleep(2); - order.push("other_end"); - }); - - const parent = bus_a.dispatch(ParentEvent({})); - await sleep(0); - bus_b.dispatch(OtherEvent({})); - - await parent.done(); - await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]); - - const other_start_idx = order.indexOf("other_start"); - const parent_end_idx = order.indexOf("parent_end"); - assert.ok(other_start_idx !== -1); - assert.ok(parent_end_idx !== -1); - assert.ok(other_start_idx < parent_end_idx); -}); - -test("parallel: events overlap on same bus when event_concurrency is parallel", async () => { - const ParallelEvent = BaseEvent.extend("ParallelEvent", { order: z.number() }); - const bus = new EventBus("ParallelEventBus", { - event_concurrency: "parallel", - handler_concurrency: "parallel" - }); - - let in_flight = 0; - let max_in_flight = 0; - const { promise, resolve } = withResolvers(); - setTimeout(() => resolve(), 20); - - bus.on(ParallelEvent, async (event) => { - in_flight += 1; - max_in_flight = Math.max(max_in_flight, in_flight); - await promise; - await sleep(10); - in_flight -= 1; - }); - - bus.dispatch(ParallelEvent({ order: 0 })); - bus.dispatch(ParallelEvent({ order: 1 })); - - await bus.waitUntilIdle(); - assert.ok(max_in_flight >= 2); -}); - -test("parallel: handlers overlap for same event when handler_concurrency is parallel", async () => { - const ParallelHandlerEvent = BaseEvent.extend("ParallelHandlerEvent", {}); - const bus = new EventBus("ParallelHandlerBus", { - event_concurrency: "bus-serial", - handler_concurrency: "parallel" - }); - - let in_flight = 0; - let max_in_flight = 0; - const { promise, resolve } = withResolvers(); + order.push('other_start') + await sleep(2) + order.push('other_end') + }) + + const parent = bus_a.dispatch(ParentEvent({})) + await sleep(0) + bus_b.dispatch(OtherEvent({})) + + await parent.done() + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]) + + const other_start_idx = order.indexOf('other_start') + const parent_end_idx = order.indexOf('parent_end') + assert.ok(other_start_idx !== -1) + assert.ok(parent_end_idx !== -1) + assert.ok(other_start_idx < parent_end_idx) +}) + +test('parallel: events overlap on same bus when event_concurrency is parallel', async () => { + const ParallelEvent = BaseEvent.extend('ParallelEvent', { order: z.number() }) + const bus = new EventBus('ParallelEventBus', { + event_concurrency: 'parallel', + handler_concurrency: 'parallel', + }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() + setTimeout(() => resolve(), 20) + + bus.on(ParallelEvent, async (_event) => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + await sleep(10) + in_flight -= 1 + }) + + bus.dispatch(ParallelEvent({ order: 0 })) + bus.dispatch(ParallelEvent({ order: 1 })) + + await bus.waitUntilIdle() + assert.ok(max_in_flight >= 2) +}) + +test('parallel: handlers overlap for same event when handler_concurrency is parallel', async () => { + const ParallelHandlerEvent = BaseEvent.extend('ParallelHandlerEvent', {}) + const bus = new EventBus('ParallelHandlerBus', { + event_concurrency: 'bus-serial', + handler_concurrency: 'parallel', + }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() const handler_a = async () => { - in_flight += 1; - max_in_flight = Math.max(max_in_flight, in_flight); - await promise; - in_flight -= 1; - }; + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + } const handler_b = async () => { - in_flight += 1; - max_in_flight = Math.max(max_in_flight, in_flight); - await promise; - in_flight -= 1; - }; - - bus.on(ParallelHandlerEvent, handler_a); - bus.on(ParallelHandlerEvent, handler_b); - - const event = bus.dispatch(ParallelHandlerEvent({})); - await sleep(0); - resolve(); - await event.done(); - await bus.waitUntilIdle(); - - assert.ok(max_in_flight >= 2); -}); - -test("parallel: global-serial handler limiter still serializes across buses", async () => { - const ParallelEvent = BaseEvent.extend("ParallelEventGlobalHandler", { - source: z.string() - }); - - const bus_a = new EventBus("ParallelHandlerGlobalA", { - event_concurrency: "parallel", - handler_concurrency: "global-serial" - }); - const bus_b = new EventBus("ParallelHandlerGlobalB", { - event_concurrency: "parallel", - handler_concurrency: "global-serial" - }); - - let in_flight = 0; - let max_in_flight = 0; - const { promise, resolve } = withResolvers(); + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + } + + bus.on(ParallelHandlerEvent, handler_a) + bus.on(ParallelHandlerEvent, handler_b) + + const event = bus.dispatch(ParallelHandlerEvent({})) + await sleep(0) + resolve() + await event.done() + await bus.waitUntilIdle() + + assert.ok(max_in_flight >= 2) +}) + +test('parallel: global-serial handler limiter still serializes across buses', async () => { + const ParallelEvent = BaseEvent.extend('ParallelEventGlobalHandler', { + source: z.string(), + }) + + const bus_a = new EventBus('ParallelHandlerGlobalA', { + event_concurrency: 'parallel', + handler_concurrency: 'global-serial', + }) + const bus_b = new EventBus('ParallelHandlerGlobalB', { + event_concurrency: 'parallel', + handler_concurrency: 'global-serial', + }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() const handler = async () => { - in_flight += 1; - max_in_flight = Math.max(max_in_flight, in_flight); - await promise; - in_flight -= 1; - }; + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + } - bus_a.on(ParallelEvent, handler); - bus_b.on(ParallelEvent, handler); + bus_a.on(ParallelEvent, handler) + bus_b.on(ParallelEvent, handler) - bus_a.dispatch(ParallelEvent({ source: "a" })); - bus_b.dispatch(ParallelEvent({ source: "b" })); + bus_a.dispatch(ParallelEvent({ source: 'a' })) + bus_b.dispatch(ParallelEvent({ source: 'b' })) - await sleep(0); - resolve(); - await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]); + await sleep(0) + resolve() + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]) - assert.equal(max_in_flight, 1); -}); + assert.equal(max_in_flight, 1) +}) -test("precedence: event handler_concurrency overrides handler options", async () => { - const OverrideEvent = BaseEvent.extend("OverrideEvent", { - handler_concurrency: z.literal("bus-serial") - }); - const bus = new EventBus("OverrideBus", { handler_concurrency: "parallel" }); +test('precedence: event handler_concurrency overrides handler options', async () => { + const OverrideEvent = BaseEvent.extend('OverrideEvent', { + handler_concurrency: z.literal('bus-serial'), + }) + const bus = new EventBus('OverrideBus', { handler_concurrency: 'parallel' }) - let in_flight = 0; - let max_in_flight = 0; - const { promise, resolve } = withResolvers(); + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() const handler = async () => { - in_flight += 1; - max_in_flight = Math.max(max_in_flight, in_flight); - await promise; - in_flight -= 1; - }; + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + } - bus.on(OverrideEvent, handler, { handler_concurrency: "parallel" }); - bus.on(OverrideEvent, handler, { handler_concurrency: "parallel" }); + bus.on(OverrideEvent, handler, { handler_concurrency: 'parallel' }) + bus.on(OverrideEvent, handler, { handler_concurrency: 'parallel' }) - const event = bus.dispatch(OverrideEvent({ handler_concurrency: "bus-serial" })); - await sleep(0); - resolve(); - await event.done(); - await bus.waitUntilIdle(); + const event = bus.dispatch(OverrideEvent({ handler_concurrency: 'bus-serial' })) + await sleep(0) + resolve() + await event.done() + await bus.waitUntilIdle() - assert.equal(max_in_flight, 1); -}); + assert.equal(max_in_flight, 1) +}) -test("precedence: handler options override bus defaults when event has no override", async () => { - const OptionEvent = BaseEvent.extend("OptionEvent", {}); - const bus = new EventBus("OptionBus", { handler_concurrency: "bus-serial" }); +test('precedence: handler options override bus defaults when event has no override', async () => { + const OptionEvent = BaseEvent.extend('OptionEvent', {}) + const bus = new EventBus('OptionBus', { handler_concurrency: 'bus-serial' }) - let in_flight = 0; - let max_in_flight = 0; - const { promise, resolve } = withResolvers(); + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() const handler_a = async () => { - in_flight += 1; - max_in_flight = Math.max(max_in_flight, in_flight); - await promise; - in_flight -= 1; - }; + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + } const handler_b = async () => { - in_flight += 1; - max_in_flight = Math.max(max_in_flight, in_flight); - await promise; - in_flight -= 1; - }; - - bus.on(OptionEvent, handler_a, { handler_concurrency: "parallel" }); - bus.on(OptionEvent, handler_b, { handler_concurrency: "parallel" }); - - const event = bus.dispatch(OptionEvent({})); - await sleep(0); - resolve(); - await event.done(); - await bus.waitUntilIdle(); - - assert.ok(max_in_flight >= 2); -}); - -test("precedence: event handler_concurrency overrides handler options to parallel", async () => { - const OverrideEvent = BaseEvent.extend("OverrideEventParallelHandlers", { - handler_concurrency: z.literal("parallel") - }); - const bus = new EventBus("OverrideParallelHandlersBus", { handler_concurrency: "bus-serial" }); - - let in_flight = 0; - let max_in_flight = 0; - const { promise, resolve } = withResolvers(); + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + } + + bus.on(OptionEvent, handler_a, { handler_concurrency: 'parallel' }) + bus.on(OptionEvent, handler_b, { handler_concurrency: 'parallel' }) + + const event = bus.dispatch(OptionEvent({})) + await sleep(0) + resolve() + await event.done() + await bus.waitUntilIdle() + + assert.ok(max_in_flight >= 2) +}) + +test('precedence: event handler_concurrency overrides handler options to parallel', async () => { + const OverrideEvent = BaseEvent.extend('OverrideEventParallelHandlers', { + handler_concurrency: z.literal('parallel'), + }) + const bus = new EventBus('OverrideParallelHandlersBus', { handler_concurrency: 'bus-serial' }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() const handler = async () => { - in_flight += 1; - max_in_flight = Math.max(max_in_flight, in_flight); - await promise; - in_flight -= 1; - }; - - bus.on(OverrideEvent, handler, { handler_concurrency: "bus-serial" }); - bus.on(OverrideEvent, handler, { handler_concurrency: "bus-serial" }); - - const event = bus.dispatch(OverrideEvent({ handler_concurrency: "parallel" })); - await sleep(0); - resolve(); - await event.done(); - await bus.waitUntilIdle(); - - assert.ok(max_in_flight >= 2); -}); - -test("precedence: event event_concurrency overrides bus defaults to parallel", async () => { - const OverrideEvent = BaseEvent.extend("OverrideEventParallelEvents", { - event_concurrency: z.literal("parallel"), - order: z.number() - }); - const bus = new EventBus("OverrideParallelEventsBus", { - event_concurrency: "bus-serial", - handler_concurrency: "parallel" - }); - - let in_flight = 0; - let max_in_flight = 0; - const { promise, resolve } = withResolvers(); + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + } + + bus.on(OverrideEvent, handler, { handler_concurrency: 'bus-serial' }) + bus.on(OverrideEvent, handler, { handler_concurrency: 'bus-serial' }) + + const event = bus.dispatch(OverrideEvent({ handler_concurrency: 'parallel' })) + await sleep(0) + resolve() + await event.done() + await bus.waitUntilIdle() + + assert.ok(max_in_flight >= 2) +}) + +test('precedence: event event_concurrency overrides bus defaults to parallel', async () => { + const OverrideEvent = BaseEvent.extend('OverrideEventParallelEvents', { + event_concurrency: z.literal('parallel'), + order: z.number(), + }) + const bus = new EventBus('OverrideParallelEventsBus', { + event_concurrency: 'bus-serial', + handler_concurrency: 'parallel', + }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() bus.on(OverrideEvent, async () => { - in_flight += 1; - max_in_flight = Math.max(max_in_flight, in_flight); - await promise; - in_flight -= 1; - }); - - bus.dispatch(OverrideEvent({ order: 0, event_concurrency: "parallel" })); - bus.dispatch(OverrideEvent({ order: 1, event_concurrency: "parallel" })); - - await sleep(0); - resolve(); - await bus.waitUntilIdle(); - - assert.ok(max_in_flight >= 2); -}); - -test("precedence: event event_concurrency overrides bus defaults to bus-serial", async () => { - const OverrideEvent = BaseEvent.extend("OverrideEventBusSerial", { - event_concurrency: z.literal("bus-serial"), - order: z.number() - }); - const bus = new EventBus("OverrideBusSerialEventsBus", { - event_concurrency: "parallel", - handler_concurrency: "parallel" - }); - - let in_flight = 0; - let max_in_flight = 0; - const { promise, resolve } = withResolvers(); + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + }) + + bus.dispatch(OverrideEvent({ order: 0, event_concurrency: 'parallel' })) + bus.dispatch(OverrideEvent({ order: 1, event_concurrency: 'parallel' })) + + await sleep(0) + resolve() + await bus.waitUntilIdle() + + assert.ok(max_in_flight >= 2) +}) + +test('precedence: event event_concurrency overrides bus defaults to bus-serial', async () => { + const OverrideEvent = BaseEvent.extend('OverrideEventBusSerial', { + event_concurrency: z.literal('bus-serial'), + order: z.number(), + }) + const bus = new EventBus('OverrideBusSerialEventsBus', { + event_concurrency: 'parallel', + handler_concurrency: 'parallel', + }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() bus.on(OverrideEvent, async () => { - in_flight += 1; - max_in_flight = Math.max(max_in_flight, in_flight); - await promise; - in_flight -= 1; - }); - - bus.dispatch(OverrideEvent({ order: 0, event_concurrency: "bus-serial" })); - bus.dispatch(OverrideEvent({ order: 1, event_concurrency: "bus-serial" })); - - await sleep(0); - assert.equal(max_in_flight, 1); - resolve(); - await bus.waitUntilIdle(); -}); - -test("global-serial + handler parallel: handlers overlap but events do not across buses", async () => { - const SerialParallelEvent = BaseEvent.extend("GlobalSerialParallelHandlers", {}); - - const bus_a = new EventBus("GlobalSerialParallelA", { - event_concurrency: "global-serial", - handler_concurrency: "parallel" - }); - const bus_b = new EventBus("GlobalSerialParallelB", { - event_concurrency: "global-serial", - handler_concurrency: "parallel" - }); - - let in_flight = 0; - let max_in_flight = 0; - const { promise, resolve } = withResolvers(); + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + }) + + bus.dispatch(OverrideEvent({ order: 0, event_concurrency: 'bus-serial' })) + bus.dispatch(OverrideEvent({ order: 1, event_concurrency: 'bus-serial' })) + + await sleep(0) + assert.equal(max_in_flight, 1) + resolve() + await bus.waitUntilIdle() +}) + +test('global-serial + handler parallel: handlers overlap but events do not across buses', async () => { + const SerialParallelEvent = BaseEvent.extend('GlobalSerialParallelHandlers', {}) + + const bus_a = new EventBus('GlobalSerialParallelA', { + event_concurrency: 'global-serial', + handler_concurrency: 'parallel', + }) + const bus_b = new EventBus('GlobalSerialParallelB', { + event_concurrency: 'global-serial', + handler_concurrency: 'parallel', + }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() const handler = async () => { - in_flight += 1; - max_in_flight = Math.max(max_in_flight, in_flight); - await promise; - in_flight -= 1; - }; - - bus_a.on(SerialParallelEvent, handler); - bus_a.on(SerialParallelEvent, handler); - bus_b.on(SerialParallelEvent, handler); - bus_b.on(SerialParallelEvent, handler); - - bus_a.dispatch(SerialParallelEvent({})); - bus_b.dispatch(SerialParallelEvent({})); - - await sleep(0); - assert.equal(max_in_flight, 2); - resolve(); - await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]); -}); - -test("event parallel + handler bus-serial: handlers serialize within a bus across events", async () => { - const ParallelEvent = BaseEvent.extend("ParallelEventsSerialHandlers", { order: z.number() }); - const bus = new EventBus("ParallelEventsSerialHandlersBus", { - event_concurrency: "parallel", - handler_concurrency: "bus-serial" - }); - - let in_flight = 0; - let max_in_flight = 0; - const { promise, resolve } = withResolvers(); + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + } + + bus_a.on(SerialParallelEvent, handler) + bus_a.on(SerialParallelEvent, handler) + bus_b.on(SerialParallelEvent, handler) + bus_b.on(SerialParallelEvent, handler) + + bus_a.dispatch(SerialParallelEvent({})) + bus_b.dispatch(SerialParallelEvent({})) + + await sleep(0) + assert.equal(max_in_flight, 2) + resolve() + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]) +}) + +test('event parallel + handler bus-serial: handlers serialize within a bus across events', async () => { + const ParallelEvent = BaseEvent.extend('ParallelEventsSerialHandlers', { order: z.number() }) + const bus = new EventBus('ParallelEventsSerialHandlersBus', { + event_concurrency: 'parallel', + handler_concurrency: 'bus-serial', + }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() bus.on(ParallelEvent, async () => { - in_flight += 1; - max_in_flight = Math.max(max_in_flight, in_flight); - await promise; - in_flight -= 1; - }); - - bus.dispatch(ParallelEvent({ order: 0 })); - bus.dispatch(ParallelEvent({ order: 1 })); - - await sleep(0); - assert.equal(max_in_flight, 1); - resolve(); - await bus.waitUntilIdle(); -}); - -test("event parallel + handler bus-serial: handlers overlap across buses", async () => { - const ParallelEvent = BaseEvent.extend("ParallelEventsBusHandlers", { source: z.string() }); - - const bus_a = new EventBus("ParallelBusHandlersA", { - event_concurrency: "parallel", - handler_concurrency: "bus-serial" - }); - const bus_b = new EventBus("ParallelBusHandlersB", { - event_concurrency: "parallel", - handler_concurrency: "bus-serial" - }); - - let in_flight = 0; - let max_in_flight = 0; - const { promise, resolve } = withResolvers(); + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + }) + + bus.dispatch(ParallelEvent({ order: 0 })) + bus.dispatch(ParallelEvent({ order: 1 })) + + await sleep(0) + assert.equal(max_in_flight, 1) + resolve() + await bus.waitUntilIdle() +}) + +test('event parallel + handler bus-serial: handlers overlap across buses', async () => { + const ParallelEvent = BaseEvent.extend('ParallelEventsBusHandlers', { source: z.string() }) + + const bus_a = new EventBus('ParallelBusHandlersA', { + event_concurrency: 'parallel', + handler_concurrency: 'bus-serial', + }) + const bus_b = new EventBus('ParallelBusHandlersB', { + event_concurrency: 'parallel', + handler_concurrency: 'bus-serial', + }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() const handler = async () => { - in_flight += 1; - max_in_flight = Math.max(max_in_flight, in_flight); - await promise; - in_flight -= 1; - }; - - bus_a.on(ParallelEvent, handler); - bus_b.on(ParallelEvent, handler); - - bus_a.dispatch(ParallelEvent({ source: "a" })); - bus_b.dispatch(ParallelEvent({ source: "b" })); - - await sleep(0); - assert.ok(max_in_flight >= 2); - resolve(); - await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]); -}); - -test("handler options can enforce global-serial even when bus defaults to parallel", async () => { - const HandlerEvent = BaseEvent.extend("HandlerOptionsGlobalSerial", { source: z.string() }); - - const bus_a = new EventBus("HandlerOptionsGlobalA", { - event_concurrency: "parallel", - handler_concurrency: "parallel" - }); - const bus_b = new EventBus("HandlerOptionsGlobalB", { - event_concurrency: "parallel", - handler_concurrency: "parallel" - }); - - let in_flight = 0; - let max_in_flight = 0; - const { promise, resolve } = withResolvers(); + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + } + + bus_a.on(ParallelEvent, handler) + bus_b.on(ParallelEvent, handler) + + bus_a.dispatch(ParallelEvent({ source: 'a' })) + bus_b.dispatch(ParallelEvent({ source: 'b' })) + + await sleep(0) + assert.ok(max_in_flight >= 2) + resolve() + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]) +}) + +test('handler options can enforce global-serial even when bus defaults to parallel', async () => { + const HandlerEvent = BaseEvent.extend('HandlerOptionsGlobalSerial', { source: z.string() }) + + const bus_a = new EventBus('HandlerOptionsGlobalA', { + event_concurrency: 'parallel', + handler_concurrency: 'parallel', + }) + const bus_b = new EventBus('HandlerOptionsGlobalB', { + event_concurrency: 'parallel', + handler_concurrency: 'parallel', + }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() const handler = async () => { - in_flight += 1; - max_in_flight = Math.max(max_in_flight, in_flight); - await promise; - in_flight -= 1; - }; + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + } - bus_a.on(HandlerEvent, handler, { handler_concurrency: "global-serial" }); - bus_b.on(HandlerEvent, handler, { handler_concurrency: "global-serial" }); + bus_a.on(HandlerEvent, handler, { handler_concurrency: 'global-serial' }) + bus_b.on(HandlerEvent, handler, { handler_concurrency: 'global-serial' }) - bus_a.dispatch(HandlerEvent({ source: "a" })); - bus_b.dispatch(HandlerEvent({ source: "b" })); + bus_a.dispatch(HandlerEvent({ source: 'a' })) + bus_b.dispatch(HandlerEvent({ source: 'b' })) - await sleep(0); - assert.equal(max_in_flight, 1); - resolve(); - await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]); -}); + await sleep(0) + assert.equal(max_in_flight, 1) + resolve() + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]) +}) -test("auto: event_concurrency auto resolves to bus defaults", async () => { - const AutoEvent = BaseEvent.extend("AutoEvent", { - event_concurrency: z.literal("auto") - }); - const bus = new EventBus("AutoBus", { event_concurrency: "bus-serial" }); +test('auto: event_concurrency auto resolves to bus defaults', async () => { + const AutoEvent = BaseEvent.extend('AutoEvent', { + event_concurrency: z.literal('auto'), + }) + const bus = new EventBus('AutoBus', { event_concurrency: 'bus-serial' }) - let in_flight = 0; - let max_in_flight = 0; + let in_flight = 0 + let max_in_flight = 0 bus.on(AutoEvent, async () => { - in_flight += 1; - max_in_flight = Math.max(max_in_flight, in_flight); - await sleep(5); - in_flight -= 1; - }); + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await sleep(5) + in_flight -= 1 + }) - bus.dispatch(AutoEvent({ event_concurrency: "auto" })); - bus.dispatch(AutoEvent({ event_concurrency: "auto" })); + bus.dispatch(AutoEvent({ event_concurrency: 'auto' })) + bus.dispatch(AutoEvent({ event_concurrency: 'auto' })) - await bus.waitUntilIdle(); - assert.equal(max_in_flight, 1); -}); + await bus.waitUntilIdle() + assert.equal(max_in_flight, 1) +}) -test("auto: handler_concurrency auto resolves to bus defaults", async () => { - const AutoHandlerEvent = BaseEvent.extend("AutoHandlerEvent", { - handler_concurrency: z.literal("auto") - }); - const bus = new EventBus("AutoHandlerBus", { handler_concurrency: "bus-serial" }); +test('auto: handler_concurrency auto resolves to bus defaults', async () => { + const AutoHandlerEvent = BaseEvent.extend('AutoHandlerEvent', { + handler_concurrency: z.literal('auto'), + }) + const bus = new EventBus('AutoHandlerBus', { handler_concurrency: 'bus-serial' }) - let in_flight = 0; - let max_in_flight = 0; - const { promise, resolve } = withResolvers(); + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() const handler = async () => { - in_flight += 1; - max_in_flight = Math.max(max_in_flight, in_flight); - await promise; - in_flight -= 1; - }; + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + } - bus.on(AutoHandlerEvent, handler); - bus.on(AutoHandlerEvent, handler); + bus.on(AutoHandlerEvent, handler) + bus.on(AutoHandlerEvent, handler) - const event = bus.dispatch(AutoHandlerEvent({ handler_concurrency: "auto" })); - await sleep(0); - resolve(); - await event.done(); - await bus.waitUntilIdle(); + const event = bus.dispatch(AutoHandlerEvent({ handler_concurrency: 'auto' })) + await sleep(0) + resolve() + await event.done() + await bus.waitUntilIdle() - assert.equal(max_in_flight, 1); -}); + assert.equal(max_in_flight, 1) +}) -test("queue-jump: awaited child preempts queued sibling on same bus", async () => { - const ParentEvent = BaseEvent.extend("QueueJumpParent", {}); - const ChildEvent = BaseEvent.extend("QueueJumpChild", {}); - const SiblingEvent = BaseEvent.extend("QueueJumpSibling", {}); +test('queue-jump: awaited child preempts queued sibling on same bus', async () => { + const ParentEvent = BaseEvent.extend('QueueJumpParent', {}) + const ChildEvent = BaseEvent.extend('QueueJumpChild', {}) + const SiblingEvent = BaseEvent.extend('QueueJumpSibling', {}) - const bus = new EventBus("QueueJumpBus", { event_concurrency: "bus-serial" }); - const order: string[] = []; + const bus = new EventBus('QueueJumpBus', { event_concurrency: 'bus-serial' }) + const order: string[] = [] bus.on(ChildEvent, async () => { - order.push("child_start"); - await sleep(5); - order.push("child_end"); - }); + order.push('child_start') + await sleep(5) + order.push('child_end') + }) bus.on(SiblingEvent, async () => { - order.push("sibling_start"); - await sleep(1); - order.push("sibling_end"); - }); + order.push('sibling_start') + await sleep(1) + order.push('sibling_end') + }) bus.on(ParentEvent, async (event) => { - order.push("parent_start"); - bus.emit(SiblingEvent({})); - const child = event.bus?.emit(ChildEvent({}))!; - order.push("child_dispatched"); - await child.done(); - order.push("child_awaited"); - order.push("parent_end"); - }); - - const parent = bus.dispatch(ParentEvent({})); - await parent.done(); - await bus.waitUntilIdle(); - - const child_start_idx = order.indexOf("child_start"); - const child_end_idx = order.indexOf("child_end"); - const sibling_start_idx = order.indexOf("sibling_start"); - - assert.ok(child_start_idx !== -1); - assert.ok(child_end_idx !== -1); - assert.ok(sibling_start_idx !== -1); - assert.ok(child_start_idx < sibling_start_idx); - assert.ok(child_end_idx < sibling_start_idx); -}); - -test("queue-jump: awaiting in-flight event does not double-run handlers", async () => { - const InFlightEvent = BaseEvent.extend("InFlightEvent", {}); - const bus = new EventBus("InFlightBus", { - event_concurrency: "parallel", - handler_concurrency: "parallel" - }); - - let handler_runs = 0; - let resolve_started: (() => void) | null = null; + order.push('parent_start') + bus.emit(SiblingEvent({})) + const child = event.bus?.emit(ChildEvent({}))! + order.push('child_dispatched') + await child.done() + order.push('child_awaited') + order.push('parent_end') + }) + + const parent = bus.dispatch(ParentEvent({})) + await parent.done() + await bus.waitUntilIdle() + + const child_start_idx = order.indexOf('child_start') + const child_end_idx = order.indexOf('child_end') + const sibling_start_idx = order.indexOf('sibling_start') + + assert.ok(child_start_idx !== -1) + assert.ok(child_end_idx !== -1) + assert.ok(sibling_start_idx !== -1) + assert.ok(child_start_idx < sibling_start_idx) + assert.ok(child_end_idx < sibling_start_idx) +}) + +test('queue-jump: awaiting in-flight event does not double-run handlers', async () => { + const InFlightEvent = BaseEvent.extend('InFlightEvent', {}) + const bus = new EventBus('InFlightBus', { + event_concurrency: 'parallel', + handler_concurrency: 'parallel', + }) + + let handler_runs = 0 + let resolve_started: (() => void) | null = null const started = new Promise((resolve) => { - resolve_started = resolve; - }); - const { promise: release_child, resolve: resolve_child } = withResolvers(); + resolve_started = resolve + }) + const { promise: release_child, resolve: resolve_child } = withResolvers() bus.on(InFlightEvent, async () => { - handler_runs += 1; + handler_runs += 1 if (resolve_started) { - resolve_started(); - resolve_started = null; + resolve_started() + resolve_started = null } - await release_child; - }); + await release_child + }) - const child = bus.dispatch(InFlightEvent({})); - await started; + const child = bus.dispatch(InFlightEvent({})) + await started - let done_resolved = false; + let done_resolved = false const done_promise = child.done().then(() => { - done_resolved = true; - }); + done_resolved = true + }) - await sleep(0); - assert.equal(done_resolved, false); + await sleep(0) + assert.equal(done_resolved, false) - resolve_child(); - await done_promise; - await bus.waitUntilIdle(); + resolve_child() + await done_promise + await bus.waitUntilIdle() - assert.equal(handler_runs, 1); -}); + assert.equal(handler_runs, 1) +}) -test("edge-case: event with no handlers completes immediately", async () => { - const NoHandlerEvent = BaseEvent.extend("NoHandlerEvent", {}); - const bus = new EventBus("NoHandlerBus"); +test('edge-case: event with no handlers completes immediately', async () => { + const NoHandlerEvent = BaseEvent.extend('NoHandlerEvent', {}) + const bus = new EventBus('NoHandlerBus') - const event = bus.dispatch(NoHandlerEvent({})); - await event.done(); - await bus.waitUntilIdle(); + const event = bus.dispatch(NoHandlerEvent({})) + await event.done() + await bus.waitUntilIdle() - assert.equal(event.event_status, "completed"); - assert.equal(event.event_pending_buses, 0); -}); + assert.equal(event.event_status, 'completed') + assert.equal(event.event_pending_buses, 0) +}) -test("fifo: forwarded events preserve order on target bus (bus-serial)", async () => { - const OrderedEvent = BaseEvent.extend("ForwardOrderEvent", { order: z.number() }); +test('fifo: forwarded events preserve order on target bus (bus-serial)', async () => { + const OrderedEvent = BaseEvent.extend('ForwardOrderEvent', { order: z.number() }) - const bus_a = new EventBus("ForwardOrderA", { event_concurrency: "bus-serial" }); - const bus_b = new EventBus("ForwardOrderB", { event_concurrency: "bus-serial" }); + const bus_a = new EventBus('ForwardOrderA', { event_concurrency: 'bus-serial' }) + const bus_b = new EventBus('ForwardOrderB', { event_concurrency: 'bus-serial' }) - const order_a: number[] = []; - const order_b: number[] = []; + const order_a: number[] = [] + const order_b: number[] = [] bus_a.on(OrderedEvent, async (event) => { - order_a.push(event.order); - bus_b.dispatch(event); - await sleep(2); - }); + order_a.push(event.order) + bus_b.dispatch(event) + await sleep(2) + }) bus_b.on(OrderedEvent, async (event) => { - const bus_b_results = Array.from(event.event_results.values()).filter( - (result) => result.eventbus_name === "ForwardOrderB" - ); - const in_flight = bus_b_results.filter( - (result) => result.status === "pending" || result.status === "started" - ); - assert.ok(in_flight.length <= 1); - order_b.push(event.order); - await sleep(1); - }); + const bus_b_results = Array.from(event.event_results.values()).filter((result) => result.eventbus_name === 'ForwardOrderB') + const in_flight = bus_b_results.filter((result) => result.status === 'pending' || result.status === 'started') + assert.ok(in_flight.length <= 1) + order_b.push(event.order) + await sleep(1) + }) for (let i = 0; i < 5; i += 1) { - bus_a.dispatch(OrderedEvent({ order: i })); + bus_a.dispatch(OrderedEvent({ order: i })) } - await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]); + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]) - const history_orders = Array.from(bus_b.event_history.values()).map((event) => (event as { order?: number }).order); - const results_sizes = Array.from(bus_b.event_history.values()).map((event) => event.event_results.size); - const bus_b_result_counts = Array.from(bus_b.event_history.values()).map((event) => - Array.from(event.event_results.values()).filter( - (result) => result.eventbus_name === "ForwardOrderB" - ).length - ); + const history_orders = Array.from(bus_b.event_history.values()).map((event) => (event as { order?: number }).order) + const results_sizes = Array.from(bus_b.event_history.values()).map((event) => event.event_results.size) + const bus_b_result_counts = Array.from(bus_b.event_history.values()).map( + (event) => Array.from(event.event_results.values()).filter((result) => result.eventbus_name === 'ForwardOrderB').length + ) const processed_flags = Array.from(bus_b.event_history.values()).map((event) => Array.from(event.event_results.values()) - .filter((result) => result.eventbus_name === "ForwardOrderB") - .every((result) => result.status === "completed" || result.status === "error") - ); + .filter((result) => result.eventbus_name === 'ForwardOrderB') + .every((result) => result.status === 'completed' || result.status === 'error') + ) const pending_counts = Array.from(bus_b.event_history.values()).map( - (event) => Array.from(event.event_results.values()).filter((result) => result.status === "pending").length - ); - assert.deepEqual(order_a, [0, 1, 2, 3, 4]); - assert.deepEqual(order_b, [0, 1, 2, 3, 4]); - assert.deepEqual(history_orders, [0, 1, 2, 3, 4]); - assert.deepEqual(results_sizes, [2, 2, 2, 2, 2]); - assert.deepEqual(bus_b_result_counts, [1, 1, 1, 1, 1]); - assert.deepEqual(processed_flags, [true, true, true, true, true]); - assert.deepEqual(pending_counts, [0, 0, 0, 0, 0]); -}); - -test("fifo: forwarded events preserve order across chained buses (bus-serial)", async () => { - const OrderedEvent = BaseEvent.extend("ForwardChainEvent", { order: z.number() }); - - const bus_a = new EventBus("ForwardChainA", { event_concurrency: "bus-serial" }); - const bus_b = new EventBus("ForwardChainB", { event_concurrency: "bus-serial" }); - const bus_c = new EventBus("ForwardChainC", { event_concurrency: "bus-serial" }); - - const order_c: number[] = []; + (event) => Array.from(event.event_results.values()).filter((result) => result.status === 'pending').length + ) + assert.deepEqual(order_a, [0, 1, 2, 3, 4]) + assert.deepEqual(order_b, [0, 1, 2, 3, 4]) + assert.deepEqual(history_orders, [0, 1, 2, 3, 4]) + assert.deepEqual(results_sizes, [2, 2, 2, 2, 2]) + assert.deepEqual(bus_b_result_counts, [1, 1, 1, 1, 1]) + assert.deepEqual(processed_flags, [true, true, true, true, true]) + assert.deepEqual(pending_counts, [0, 0, 0, 0, 0]) +}) + +test('fifo: forwarded events preserve order across chained buses (bus-serial)', async () => { + const OrderedEvent = BaseEvent.extend('ForwardChainEvent', { order: z.number() }) + + const bus_a = new EventBus('ForwardChainA', { event_concurrency: 'bus-serial' }) + const bus_b = new EventBus('ForwardChainB', { event_concurrency: 'bus-serial' }) + const bus_c = new EventBus('ForwardChainC', { event_concurrency: 'bus-serial' }) + + const order_c: number[] = [] bus_b.on(OrderedEvent, async () => { - await sleep(2); - }); + await sleep(2) + }) bus_c.on(OrderedEvent, async (event) => { - order_c.push(event.order); - await sleep(1); - }); + order_c.push(event.order) + await sleep(1) + }) - bus_a.on("*", bus_b.dispatch); - bus_b.on("*", bus_c.dispatch); + bus_a.on('*', bus_b.dispatch) + bus_b.on('*', bus_c.dispatch) for (let i = 0; i < 6; i += 1) { - bus_a.dispatch(OrderedEvent({ order: i })); + bus_a.dispatch(OrderedEvent({ order: i })) } - await bus_a.waitUntilIdle(); - await bus_b.waitUntilIdle(); - await bus_c.waitUntilIdle(); + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + await bus_c.waitUntilIdle() - assert.deepEqual(order_c, [0, 1, 2, 3, 4, 5]); -}); + assert.deepEqual(order_c, [0, 1, 2, 3, 4, 5]) +}) -test("find: past returns most recent completed event (bus-scoped)", async () => { - const DebounceEvent = BaseEvent.extend("FindPastEvent", { value: z.number() }); - const bus = new EventBus("FindPastBus"); +test('find: past returns most recent completed event (bus-scoped)', async () => { + const DebounceEvent = BaseEvent.extend('FindPastEvent', { value: z.number() }) + const bus = new EventBus('FindPastBus') - bus.on(DebounceEvent, async () => {}); + bus.on(DebounceEvent, async () => {}) - bus.dispatch(DebounceEvent({ value: 1 })); - bus.dispatch(DebounceEvent({ value: 2 })); + bus.dispatch(DebounceEvent({ value: 1 })) + bus.dispatch(DebounceEvent({ value: 2 })) - await bus.waitUntilIdle(); + await bus.waitUntilIdle() - const found = await bus.find(DebounceEvent, { past: true, future: false }); - assert.ok(found); - assert.equal(found.value, 2); - assert.equal(found.event_status, "completed"); - assert.ok(found.bus); - assert.equal(found.bus.name, "FindPastBus"); - assert.equal(typeof found.bus.dispatch, "function"); -}); + const found = await bus.find(DebounceEvent, { past: true, future: false }) + assert.ok(found) + assert.equal(found.value, 2) + assert.equal(found.event_status, 'completed') + assert.ok(found.bus) + assert.equal(found.bus.name, 'FindPastBus') + assert.equal(typeof found.bus.dispatch, 'function') +}) -test("find: future returns in-flight event and done waits", async () => { - const DebounceEvent = BaseEvent.extend("FindFutureEvent", { value: z.number() }); - const bus = new EventBus("FindFutureBus"); - const { promise, resolve } = withResolvers(); +test('find: future returns in-flight event and done waits', async () => { + const DebounceEvent = BaseEvent.extend('FindFutureEvent', { value: z.number() }) + const bus = new EventBus('FindFutureBus') + const { promise, resolve } = withResolvers() bus.on(DebounceEvent, async () => { - await promise; - }); + await promise + }) - bus.dispatch(DebounceEvent({ value: 1 })); + bus.dispatch(DebounceEvent({ value: 1 })) - const found = await bus.find(DebounceEvent, { past: false, future: true }); - assert.ok(found); - assert.equal(found.value, 1); - assert.ok(found.event_status !== "completed"); - assert.ok(found.bus); - assert.equal(found.bus.name, "FindFutureBus"); + const found = await bus.find(DebounceEvent, { past: false, future: true }) + assert.ok(found) + assert.equal(found.value, 1) + assert.ok(found.event_status !== 'completed') + assert.ok(found.bus) + assert.equal(found.bus.name, 'FindFutureBus') - resolve(); - const completed = await found.done(); - assert.equal(completed.event_status, "completed"); -}); + resolve() + const completed = await found.done() + assert.equal(completed.event_status, 'completed') +}) -test("find: future waits for next event when none in-flight", async () => { - const DebounceEvent = BaseEvent.extend("FindWaitEvent", { value: z.number() }); - const bus = new EventBus("FindWaitBus"); +test('find: future waits for next event when none in-flight', async () => { + const DebounceEvent = BaseEvent.extend('FindWaitEvent', { value: z.number() }) + const bus = new EventBus('FindWaitBus') - bus.on(DebounceEvent, async () => {}); + bus.on(DebounceEvent, async () => {}) setTimeout(() => { - bus.dispatch(DebounceEvent({ value: 99 })); - }, 10); - - const found = await bus.find(DebounceEvent, { past: false, future: 0.2 }); - assert.ok(found); - assert.equal(found.value, 99); - assert.ok(found.bus); - assert.equal(found.bus.name, "FindWaitBus"); - await found.done(); -}); - -test("find: most recent wins across completed and in-flight", async () => { - const DebounceEvent = BaseEvent.extend("FindMostRecentEvent", { value: z.number() }); - const bus = new EventBus("FindMostRecentBus"); - const { promise, resolve } = withResolvers(); + bus.dispatch(DebounceEvent({ value: 99 })) + }, 10) + + const found = await bus.find(DebounceEvent, { past: false, future: 0.2 }) + assert.ok(found) + assert.equal(found.value, 99) + assert.ok(found.bus) + assert.equal(found.bus.name, 'FindWaitBus') + await found.done() +}) + +test('find: most recent wins across completed and in-flight', async () => { + const DebounceEvent = BaseEvent.extend('FindMostRecentEvent', { value: z.number() }) + const bus = new EventBus('FindMostRecentBus') + const { promise, resolve } = withResolvers() bus.on(DebounceEvent, async (event) => { if (event.value === 2) { - await promise; + await promise } - }); + }) - bus.dispatch(DebounceEvent({ value: 1 })); - await bus.waitUntilIdle(); + bus.dispatch(DebounceEvent({ value: 1 })) + await bus.waitUntilIdle() - bus.dispatch(DebounceEvent({ value: 2 })); + bus.dispatch(DebounceEvent({ value: 2 })) - const found = await bus.find(DebounceEvent, { past: true, future: true }); - assert.ok(found); - assert.equal(found.value, 2); - assert.ok(found.event_status !== "completed"); + const found = await bus.find(DebounceEvent, { past: true, future: true }) + assert.ok(found) + assert.equal(found.value, 2) + assert.ok(found.event_status !== 'completed') - resolve(); - await found.done(); -}); + resolve() + await found.done() +}) diff --git a/bubus-ts/tests/log_tree.test.ts b/bubus-ts/tests/log_tree.test.ts index 42e578f..f7c24f2 100644 --- a/bubus-ts/tests/log_tree.test.ts +++ b/bubus-ts/tests/log_tree.test.ts @@ -1,224 +1,224 @@ -import assert from "node:assert/strict"; -import { test } from "node:test"; +import assert from 'node:assert/strict' +import { test } from 'node:test' -import { z } from "zod"; +import { z } from 'zod' -import { BaseEvent, EventBus, EventResult } from "../src/index.js"; +import { BaseEvent, EventBus, EventResult } from '../src/index.js' -const RootEvent = BaseEvent.extend("RootEvent", { data: z.string().optional() }); -const ChildEvent = BaseEvent.extend("ChildEvent", { value: z.number().optional() }); -const GrandchildEvent = BaseEvent.extend("GrandchildEvent", { nested: z.record(z.number()).optional() }); +const RootEvent = BaseEvent.extend('RootEvent', { data: z.string().optional() }) +const ChildEvent = BaseEvent.extend('ChildEvent', { value: z.number().optional() }) +const GrandchildEvent = BaseEvent.extend('GrandchildEvent', { nested: z.record(z.number()).optional() }) class ValueError extends Error { constructor(message: string) { - super(message); - this.name = "ValueError"; + super(message) + this.name = 'ValueError' } } -test("logTree: single event", () => { - const bus = new EventBus("SingleBus"); +test('logTree: single event', () => { + const bus = new EventBus('SingleBus') - const event = RootEvent({ data: "test" }); - event.event_status = "completed"; - event.event_completed_at = event.event_created_at; + const event = RootEvent({ data: 'test' }) + event.event_status = 'completed' + event.event_completed_at = event.event_created_at - bus.event_history.set(event.event_id, event); + bus.event_history.set(event.event_id, event) - const output = bus.logTree(); + const output = bus.logTree() - assert.ok(output.includes("└── βœ… RootEvent#")); - assert.ok(output.includes("[") && output.includes("]")); -}); + assert.ok(output.includes('└── βœ… RootEvent#')) + assert.ok(output.includes('[') && output.includes(']')) +}) -test("logTree: with handler results", () => { - const bus = new EventBus("HandlerBus"); +test('logTree: with handler results', () => { + const bus = new EventBus('HandlerBus') - const event = RootEvent({ data: "test" }); - event.event_status = "completed"; - event.event_completed_at = event.event_created_at; + const event = RootEvent({ data: 'test' }) + event.event_status = 'completed' + event.event_completed_at = event.event_created_at - const handler_id = "handler-1"; + const handler_id = 'handler-1' const result = new EventResult({ event_id: event.event_id, handler_id, - handler_name: "test_handler", - eventbus_name: "HandlerBus" - }); - result.markStarted(); - result.markCompleted("status: success"); - event.event_results.set(handler_id, result); + handler_name: 'test_handler', + eventbus_name: 'HandlerBus', + }) + result.markStarted() + result.markCompleted('status: success') + event.event_results.set(handler_id, result) - bus.event_history.set(event.event_id, event); + bus.event_history.set(event.event_id, event) - const output = bus.logTree(); + const output = bus.logTree() - assert.ok(output.includes("└── βœ… RootEvent#")); - assert.ok(output.includes("HandlerBus.test_handler#")); - assert.ok(output.includes("\"status: success\"")); -}); + assert.ok(output.includes('└── βœ… RootEvent#')) + assert.ok(output.includes('HandlerBus.test_handler#')) + assert.ok(output.includes('"status: success"')) +}) -test("logTree: with handler errors", () => { - const bus = new EventBus("ErrorBus"); +test('logTree: with handler errors', () => { + const bus = new EventBus('ErrorBus') - const event = RootEvent({ data: "test" }); - event.event_status = "completed"; - event.event_completed_at = event.event_created_at; + const event = RootEvent({ data: 'test' }) + event.event_status = 'completed' + event.event_completed_at = event.event_created_at - const handler_id = "handler-2"; + const handler_id = 'handler-2' const result = new EventResult({ event_id: event.event_id, handler_id, - handler_name: "error_handler", - eventbus_name: "ErrorBus" - }); - result.markStarted(); - result.markError(new ValueError("Test error message")); - event.event_results.set(handler_id, result); + handler_name: 'error_handler', + eventbus_name: 'ErrorBus', + }) + result.markStarted() + result.markError(new ValueError('Test error message')) + event.event_results.set(handler_id, result) - bus.event_history.set(event.event_id, event); + bus.event_history.set(event.event_id, event) - const output = bus.logTree(); + const output = bus.logTree() - assert.ok(output.includes("ErrorBus.error_handler#")); - assert.ok(output.includes("ValueError: Test error message")); -}); + assert.ok(output.includes('ErrorBus.error_handler#')) + assert.ok(output.includes('ValueError: Test error message')) +}) -test("logTree: complex nested", () => { - const bus = new EventBus("ComplexBus"); +test('logTree: complex nested', () => { + const bus = new EventBus('ComplexBus') - const root = RootEvent({ data: "root_data" }); - root.event_status = "completed"; - root.event_completed_at = root.event_created_at; + const root = RootEvent({ data: 'root_data' }) + root.event_status = 'completed' + root.event_completed_at = root.event_created_at - const root_handler_id = "handler-root"; + const root_handler_id = 'handler-root' const root_result = new EventResult({ event_id: root.event_id, handler_id: root_handler_id, - handler_name: "root_handler", - eventbus_name: "ComplexBus" - }); - root_result.markStarted(); - root_result.markCompleted("Root processed"); - root.event_results.set(root_handler_id, root_result); - - const child = ChildEvent({ value: 100 }); - child.event_parent_id = root.event_id; - child.event_status = "completed"; - child.event_completed_at = child.event_created_at; - root_result.event_children.push(child); - - const child_handler_id = "handler-child"; + handler_name: 'root_handler', + eventbus_name: 'ComplexBus', + }) + root_result.markStarted() + root_result.markCompleted('Root processed') + root.event_results.set(root_handler_id, root_result) + + const child = ChildEvent({ value: 100 }) + child.event_parent_id = root.event_id + child.event_status = 'completed' + child.event_completed_at = child.event_created_at + root_result.event_children.push(child) + + const child_handler_id = 'handler-child' const child_result = new EventResult({ event_id: child.event_id, handler_id: child_handler_id, - handler_name: "child_handler", - eventbus_name: "ComplexBus" - }); - child_result.markStarted(); - child_result.markCompleted([1, 2, 3]); - child.event_results.set(child_handler_id, child_result); - - const grandchild = GrandchildEvent({}); - grandchild.event_parent_id = child.event_id; - grandchild.event_status = "completed"; - grandchild.event_completed_at = grandchild.event_created_at; - child_result.event_children.push(grandchild); - - const grandchild_handler_id = "handler-grandchild"; + handler_name: 'child_handler', + eventbus_name: 'ComplexBus', + }) + child_result.markStarted() + child_result.markCompleted([1, 2, 3]) + child.event_results.set(child_handler_id, child_result) + + const grandchild = GrandchildEvent({}) + grandchild.event_parent_id = child.event_id + grandchild.event_status = 'completed' + grandchild.event_completed_at = grandchild.event_created_at + child_result.event_children.push(grandchild) + + const grandchild_handler_id = 'handler-grandchild' const grandchild_result = new EventResult({ event_id: grandchild.event_id, handler_id: grandchild_handler_id, - handler_name: "grandchild_handler", - eventbus_name: "ComplexBus" - }); - grandchild_result.markStarted(); - grandchild_result.markCompleted(null); - grandchild.event_results.set(grandchild_handler_id, grandchild_result); - - bus.event_history.set(root.event_id, root); - bus.event_history.set(child.event_id, child); - bus.event_history.set(grandchild.event_id, grandchild); - - const output = bus.logTree(); - - assert.ok(output.includes("βœ… RootEvent#")); - assert.ok(output.includes("βœ… ComplexBus.root_handler#")); - assert.ok(output.includes("βœ… ChildEvent#")); - assert.ok(output.includes("βœ… ComplexBus.child_handler#")); - assert.ok(output.includes("βœ… GrandchildEvent#")); - assert.ok(output.includes("βœ… ComplexBus.grandchild_handler#")); - assert.ok(output.includes("\"Root processed\"")); - assert.ok(output.includes("list(3 items)")); - assert.ok(output.includes("None")); -}); - -test("logTree: multiple roots", () => { - const bus = new EventBus("MultiBus"); - - const root1 = RootEvent({ data: "first" }); - root1.event_status = "completed"; - root1.event_completed_at = root1.event_created_at; - - const root2 = RootEvent({ data: "second" }); - root2.event_status = "completed"; - root2.event_completed_at = root2.event_created_at; - - bus.event_history.set(root1.event_id, root1); - bus.event_history.set(root2.event_id, root2); - - const output = bus.logTree(); - - assert.equal(output.split("β”œβ”€β”€ βœ… RootEvent#").length - 1, 1); - assert.equal(output.split("└── βœ… RootEvent#").length - 1, 1); -}); - -test("logTree: timing info", () => { - const bus = new EventBus("TimingBus"); - - const event = RootEvent({}); - event.event_status = "completed"; - event.event_completed_at = event.event_created_at; - - const handler_id = "handler-time"; + handler_name: 'grandchild_handler', + eventbus_name: 'ComplexBus', + }) + grandchild_result.markStarted() + grandchild_result.markCompleted(null) + grandchild.event_results.set(grandchild_handler_id, grandchild_result) + + bus.event_history.set(root.event_id, root) + bus.event_history.set(child.event_id, child) + bus.event_history.set(grandchild.event_id, grandchild) + + const output = bus.logTree() + + assert.ok(output.includes('βœ… RootEvent#')) + assert.ok(output.includes('βœ… ComplexBus.root_handler#')) + assert.ok(output.includes('βœ… ChildEvent#')) + assert.ok(output.includes('βœ… ComplexBus.child_handler#')) + assert.ok(output.includes('βœ… GrandchildEvent#')) + assert.ok(output.includes('βœ… ComplexBus.grandchild_handler#')) + assert.ok(output.includes('"Root processed"')) + assert.ok(output.includes('list(3 items)')) + assert.ok(output.includes('None')) +}) + +test('logTree: multiple roots', () => { + const bus = new EventBus('MultiBus') + + const root1 = RootEvent({ data: 'first' }) + root1.event_status = 'completed' + root1.event_completed_at = root1.event_created_at + + const root2 = RootEvent({ data: 'second' }) + root2.event_status = 'completed' + root2.event_completed_at = root2.event_created_at + + bus.event_history.set(root1.event_id, root1) + bus.event_history.set(root2.event_id, root2) + + const output = bus.logTree() + + assert.equal(output.split('β”œβ”€β”€ βœ… RootEvent#').length - 1, 1) + assert.equal(output.split('└── βœ… RootEvent#').length - 1, 1) +}) + +test('logTree: timing info', () => { + const bus = new EventBus('TimingBus') + + const event = RootEvent({}) + event.event_status = 'completed' + event.event_completed_at = event.event_created_at + + const handler_id = 'handler-time' const result = new EventResult({ event_id: event.event_id, handler_id, - handler_name: "timed_handler", - eventbus_name: "TimingBus" - }); - result.markStarted(); - result.markCompleted("done"); - event.event_results.set(handler_id, result); + handler_name: 'timed_handler', + eventbus_name: 'TimingBus', + }) + result.markStarted() + result.markCompleted('done') + event.event_results.set(handler_id, result) - bus.event_history.set(event.event_id, event); + bus.event_history.set(event.event_id, event) - const output = bus.logTree(); + const output = bus.logTree() - assert.ok(output.includes("(")); - assert.ok(output.includes("s)")); -}); + assert.ok(output.includes('(')) + assert.ok(output.includes('s)')) +}) -test("logTree: running handler", () => { - const bus = new EventBus("RunningBus"); +test('logTree: running handler', () => { + const bus = new EventBus('RunningBus') - const event = RootEvent({}); - event.event_status = "started"; + const event = RootEvent({}) + event.event_status = 'started' - const handler_id = "handler-running"; + const handler_id = 'handler-running' const result = new EventResult({ event_id: event.event_id, handler_id, - handler_name: "running_handler", - eventbus_name: "RunningBus" - }); - result.markStarted(); - event.event_results.set(handler_id, result); + handler_name: 'running_handler', + eventbus_name: 'RunningBus', + }) + result.markStarted() + event.event_results.set(handler_id, result) - bus.event_history.set(event.event_id, event); + bus.event_history.set(event.event_id, event) - const output = bus.logTree(); + const output = bus.logTree() - assert.ok(output.includes("RunningBus.running_handler#")); - assert.ok(output.includes("RootEvent#")); -}); + assert.ok(output.includes('RunningBus.running_handler#')) + assert.ok(output.includes('RootEvent#')) +}) diff --git a/bubus-ts/tests/parent_child.test.ts b/bubus-ts/tests/parent_child.test.ts index 0a7c0d7..698c5b4 100644 --- a/bubus-ts/tests/parent_child.test.ts +++ b/bubus-ts/tests/parent_child.test.ts @@ -1,64 +1,64 @@ -import assert from "node:assert/strict"; -import { test } from "node:test"; +import assert from 'node:assert/strict' +import { test } from 'node:test' -import { BaseEvent, EventBus } from "../src/index.js"; +import { BaseEvent, EventBus } from '../src/index.js' -const ParentEvent = BaseEvent.extend("ParentEvent", {}); -const ChildEvent = BaseEvent.extend("ChildEvent", {}); -const GrandchildEvent = BaseEvent.extend("GrandchildEvent", {}); -const UnrelatedEvent = BaseEvent.extend("UnrelatedEvent", {}); +const ParentEvent = BaseEvent.extend('ParentEvent', {}) +const ChildEvent = BaseEvent.extend('ChildEvent', {}) +const GrandchildEvent = BaseEvent.extend('GrandchildEvent', {}) +const UnrelatedEvent = BaseEvent.extend('UnrelatedEvent', {}) -test("eventIsChildOf and eventIsParentOf work for direct children", async () => { - const bus = new EventBus("ParentChildBus"); +test('eventIsChildOf and eventIsParentOf work for direct children', async () => { + const bus = new EventBus('ParentChildBus') bus.on(ParentEvent, (event) => { - event.bus?.emit(ChildEvent({})); - }); + event.bus?.emit(ChildEvent({})) + }) - const parent_event = bus.dispatch(ParentEvent({})); - await bus.waitUntilIdle(); + const parent_event = bus.dispatch(ParentEvent({})) + await bus.waitUntilIdle() - const child_event = Array.from(bus.event_history.values()).find((event) => event.event_type === "ChildEvent"); - assert.ok(child_event); + const child_event = Array.from(bus.event_history.values()).find((event) => event.event_type === 'ChildEvent') + assert.ok(child_event) - assert.equal(child_event.event_parent_id, parent_event.event_id); - assert.equal(bus.eventIsChildOf(child_event, parent_event), true); - assert.equal(bus.eventIsParentOf(parent_event, child_event), true); -}); + assert.equal(child_event.event_parent_id, parent_event.event_id) + assert.equal(bus.eventIsChildOf(child_event, parent_event), true) + assert.equal(bus.eventIsParentOf(parent_event, child_event), true) +}) -test("eventIsChildOf works for grandchildren", async () => { - const bus = new EventBus("GrandchildBus"); +test('eventIsChildOf works for grandchildren', async () => { + const bus = new EventBus('GrandchildBus') bus.on(ParentEvent, (event) => { - event.bus?.emit(ChildEvent({})); - }); + event.bus?.emit(ChildEvent({})) + }) bus.on(ChildEvent, (event) => { - event.bus?.emit(GrandchildEvent({})); - }); + event.bus?.emit(GrandchildEvent({})) + }) - const parent_event = bus.dispatch(ParentEvent({})); - await bus.waitUntilIdle(); + const parent_event = bus.dispatch(ParentEvent({})) + await bus.waitUntilIdle() - const child_event = Array.from(bus.event_history.values()).find((event) => event.event_type === "ChildEvent"); - const grandchild_event = Array.from(bus.event_history.values()).find((event) => event.event_type === "GrandchildEvent"); + const child_event = Array.from(bus.event_history.values()).find((event) => event.event_type === 'ChildEvent') + const grandchild_event = Array.from(bus.event_history.values()).find((event) => event.event_type === 'GrandchildEvent') - assert.ok(child_event); - assert.ok(grandchild_event); + assert.ok(child_event) + assert.ok(grandchild_event) - assert.equal(bus.eventIsChildOf(child_event, parent_event), true); - assert.equal(bus.eventIsChildOf(grandchild_event, parent_event), true); - assert.equal(bus.eventIsParentOf(parent_event, grandchild_event), true); -}); + assert.equal(bus.eventIsChildOf(child_event, parent_event), true) + assert.equal(bus.eventIsChildOf(grandchild_event, parent_event), true) + assert.equal(bus.eventIsParentOf(parent_event, grandchild_event), true) +}) -test("eventIsChildOf returns false for unrelated events", async () => { - const bus = new EventBus("UnrelatedBus"); +test('eventIsChildOf returns false for unrelated events', async () => { + const bus = new EventBus('UnrelatedBus') - const parent_event = bus.dispatch(ParentEvent({})); - const unrelated_event = bus.dispatch(UnrelatedEvent({})); - await parent_event.done(); - await unrelated_event.done(); + const parent_event = bus.dispatch(ParentEvent({})) + const unrelated_event = bus.dispatch(UnrelatedEvent({})) + await parent_event.done() + await unrelated_event.done() - assert.equal(bus.eventIsChildOf(unrelated_event, parent_event), false); - assert.equal(bus.eventIsParentOf(parent_event, unrelated_event), false); -}); + assert.equal(bus.eventIsChildOf(unrelated_event, parent_event), false) + assert.equal(bus.eventIsParentOf(parent_event, unrelated_event), false) +}) diff --git a/bubus-ts/tests/performance.test.ts b/bubus-ts/tests/performance.test.ts index 043b910..ea71efa 100644 --- a/bubus-ts/tests/performance.test.ts +++ b/bubus-ts/tests/performance.test.ts @@ -1,36 +1,338 @@ -import assert from "node:assert/strict"; -import { test } from "node:test"; +import assert from 'node:assert/strict' +import { test } from 'node:test' +import { z } from 'zod' -import { BaseEvent, EventBus } from "../src/index.js"; +import { BaseEvent, EventBus, EventHandlerTimeoutError, EventHandlerCancelledError } from '../src/index.js' -const SimpleEvent = BaseEvent.extend("SimpleEvent", {}); +const SimpleEvent = BaseEvent.extend('SimpleEvent', {}) +const mb = (bytes: number) => (bytes / 1024 / 1024).toFixed(1) + +test('processes 50k events within reasonable time', { timeout: 30_000 }, async () => { + const bus = new EventBus('PerfBus', { max_history_size: 1000 }) + + let processed_count = 0 + bus.on(SimpleEvent, () => { + processed_count += 1 + }) + + const total_events = 50_000 + + global.gc?.() + const mem_before = process.memoryUsage() + + const t0 = Date.now() + + const pending: Array> = [] + for (let i = 0; i < total_events; i += 1) { + pending.push(bus.dispatch(SimpleEvent({}))) + } + + const t_dispatch = Date.now() + const mem_dispatch = process.memoryUsage() + + await Promise.all(pending.map((event) => event.done())) + await bus.waitUntilIdle() + + const t_done = Date.now() + const mem_done = process.memoryUsage() + + global.gc?.() + const mem_gc = process.memoryUsage() + + const dispatch_ms = t_dispatch - t0 + const await_ms = t_done - t_dispatch + const total_ms = t_done - t0 + + console.log( + `\n perf: ${total_events} events in ${total_ms}ms (${Math.round(total_events / (total_ms / 1000))}/s)` + + `\n dispatch: ${dispatch_ms}ms | await: ${await_ms}ms` + + `\n memory: before=${mb(mem_before.heapUsed)}MB β†’ dispatch=${mb(mem_dispatch.heapUsed)}MB β†’ done=${mb(mem_done.heapUsed)}MB β†’ gc=${mb(mem_gc.heapUsed)}MB` + + `\n rss: before=${mb(mem_before.rss)}MB β†’ done=${mb(mem_done.rss)}MB β†’ gc=${mb(mem_gc.rss)}MB` + ) + + assert.equal(processed_count, total_events) + assert.ok(total_ms < 30_000, `Processing took ${total_ms}ms`) + assert.ok(bus.event_history.size <= bus.max_history_size) + + bus.destroy() +}) + +// Simulates a fastify backend where each request creates its own bus with handlers, +// processes events, then tears down. Tests that bus creation/destruction at scale +// doesn't leak memory or degrade performance. +test('500 ephemeral buses with 100 events each', { timeout: 30_000 }, async () => { + const total_buses = 500 + const events_per_bus = 100 + const total_events = total_buses * events_per_bus + + let processed_count = 0 + + global.gc?.() + const mem_before = process.memoryUsage() + const t0 = Date.now() + + for (let b = 0; b < total_buses; b += 1) { + const bus = new EventBus(`ReqBus-${b}`, { max_history_size: 10 }) + + bus.on(SimpleEvent, () => { + processed_count += 1 + }) + + const pending: Array> = [] + for (let i = 0; i < events_per_bus; i += 1) { + pending.push(bus.dispatch(SimpleEvent({}))) + } + + await Promise.all(pending.map((event) => event.done())) + await bus.waitUntilIdle() + + bus.destroy() + } + + const t_done = Date.now() + const mem_done = process.memoryUsage() + + global.gc?.() + const mem_gc = process.memoryUsage() + + const total_ms = t_done - t0 + + console.log( + `\n perf: ${total_buses} buses Γ— ${events_per_bus} events = ${total_events} total in ${total_ms}ms (${Math.round(total_events / (total_ms / 1000))}/s)` + + `\n memory: before=${mb(mem_before.heapUsed)}MB β†’ done=${mb(mem_done.heapUsed)}MB β†’ gc=${mb(mem_gc.heapUsed)}MB` + + `\n rss: before=${mb(mem_before.rss)}MB β†’ done=${mb(mem_done.rss)}MB β†’ gc=${mb(mem_gc.rss)}MB` + + `\n live bus instances: ${EventBus.instances.size}` + ) + + assert.equal(processed_count, total_events) + assert.ok(total_ms < 30_000, `Processing took ${total_ms}ms`) + // All buses should have been cleaned up from the registry + assert.equal(EventBus.instances.size, 0, 'All buses should be destroyed') +}) + +// Simulates per-request handler registration pattern: a shared bus where each +// "request" registers a handler with .on(), dispatches events, then removes the +// handler with .off(). Tests for handler map churn overhead and cleanup leaks. +test('50k events with ephemeral on/off handler registration across 2 buses', { timeout: 30_000 }, async () => { + const RequestEvent = BaseEvent.extend('RequestEvent', {}) + + const bus_a = new EventBus('SharedBusA', { max_history_size: 1000 }) + const bus_b = new EventBus('SharedBusB', { max_history_size: 1000 }) + + const total_events = 50_000 + let processed_a = 0 + let processed_b = 0 + + // Persistent handler on bus_b that forwards count + bus_b.on(RequestEvent, () => { + processed_b += 1 + }) + + global.gc?.() + const mem_before = process.memoryUsage() + const t0 = Date.now() + + for (let i = 0; i < total_events; i += 1) { + // Register ephemeral handler + const ephemeral_handler = () => { + processed_a += 1 + } + bus_a.on(RequestEvent, ephemeral_handler) + + // Dispatch on bus_a, forward to bus_b + const event = RequestEvent({}) + const ev_a = bus_a.dispatch(event) + bus_b.dispatch(event) + + await ev_a.done() + + // Tear down ephemeral handler + bus_a.off(RequestEvent, ephemeral_handler) + } + + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + const t_done = Date.now() + const mem_done = process.memoryUsage() + + global.gc?.() + const mem_gc = process.memoryUsage() + + const total_ms = t_done - t0 + + console.log( + `\n perf: ${total_events} events with ephemeral on/off in ${total_ms}ms (${Math.round(total_events / (total_ms / 1000))}/s)` + + `\n dispatch: bus_a=${processed_a} | bus_b=${processed_b}` + + `\n memory: before=${mb(mem_before.heapUsed)}MB β†’ done=${mb(mem_done.heapUsed)}MB β†’ gc=${mb(mem_gc.heapUsed)}MB` + + `\n rss: before=${mb(mem_before.rss)}MB β†’ done=${mb(mem_done.rss)}MB β†’ gc=${mb(mem_gc.rss)}MB` + + `\n bus_a handlers: ${bus_a.handlers.size} | bus_b handlers: ${bus_b.handlers.size}` + ) + + assert.equal(processed_a, total_events) + assert.equal(processed_b, total_events) + assert.ok(total_ms < 30_000, `Processing took ${total_ms}ms`) + // Ephemeral handlers should all be cleaned up + assert.equal(bus_a.handlers.size, 0, 'All ephemeral handlers should be removed from bus_a') + assert.equal(bus_b.handlers.size, 1, 'bus_b should still have its persistent handler') + assert.ok(bus_a.event_history.size <= bus_a.max_history_size!) + assert.ok(bus_b.event_history.size <= bus_b.max_history_size!) + + bus_a.destroy() + bus_b.destroy() +}) + +// Worst-case memory leak stress test. Exercises every retention path simultaneously: +// multi-bus forwarding, queue-jumping (done() inside handler), timeouts that cancel +// pending handlers, nested parent-child-grandchild trees, Proxy accumulation from +// _getBusScopedEvent, ephemeral on/off handler churn, find() waiter timeouts, +// and aggressive history trimming via _gc(). If any code path leaks references, +// memory will grow unbounded across 2000 iterations. test( - "processes 20k events within reasonable time", - { timeout: 120_000 }, + 'worst-case: forwarding + queue-jump + timeouts + cancellation at scale', + { timeout: 60_000 }, async () => { - const bus = new EventBus("PerfBus", { max_history_size: 1000 }); + const ParentEvent = BaseEvent.extend('WC_Parent', { + iteration: z.number(), + }) + const ChildEvent = BaseEvent.extend('WC_Child', { + iteration: z.number(), + }) + const GrandchildEvent = BaseEvent.extend('WC_Grandchild', { + iteration: z.number(), + }) - let processed_count = 0; - bus.on(SimpleEvent, () => { - processed_count += 1; - }); + const bus_a = new EventBus('WC_A', { max_history_size: 50 }) + const bus_b = new EventBus('WC_B', { max_history_size: 50 }) + const bus_c = new EventBus('WC_C', { max_history_size: 50 }) + + const total_iterations = 2000 + let parent_handled_a = 0 + let parent_handled_b = 0 + let child_handled_c = 0 + let grandchild_handled = 0 + let timeout_count = 0 + let cancel_count = 0 + + // Persistent handler on bus_b β€” just counts + bus_b.on(ParentEvent, () => { + parent_handled_b += 1 + }) - const total_events = 20_000; - const start = Date.now(); + // Persistent handler on bus_c β€” processes child, emits grandchild + bus_c.on(ChildEvent, async (event) => { + child_handled_c += 1 + const gc = event.bus?.emit(GrandchildEvent({ iteration: (event as any).iteration }))! + bus_c.dispatch(gc) + await gc.done() + }) - const pending: Array> = []; - for (let i = 0; i < total_events; i += 1) { - pending.push(bus.dispatch(SimpleEvent({}))); + // Persistent handler on bus_c for grandchild + bus_c.on(GrandchildEvent, () => { + grandchild_handled += 1 + }) + + global.gc?.() + const mem_before = process.memoryUsage() + const t0 = Date.now() + + for (let i = 0; i < total_iterations; i += 1) { + const should_timeout = i % 5 === 0 + + // Ephemeral handler on bus_a β€” queue-jumps a child to bus_c + const ephemeral_handler = async (event: any) => { + parent_handled_a += 1 + const child_timeout = should_timeout ? 0.001 : null // 1ms timeout β†’ will fire + const child = event.bus?.emit(ChildEvent({ + iteration: i, + event_timeout: child_timeout, + }))! + bus_c.dispatch(child) + try { + await child.done() + } catch { + // Swallow β€” timeout errors are expected + } + } + bus_a.on(ParentEvent, ephemeral_handler) + + // Dispatch parent to bus_a (with handler) and bus_b (forwarding) + const parent = ParentEvent({ iteration: i }) + const ev_a = bus_a.dispatch(parent) + bus_b.dispatch(parent) + + await ev_a.done() + await bus_c.waitUntilIdle() + + // Deregister ephemeral handler + bus_a.off(ParentEvent, ephemeral_handler) + + // Periodic find() with short timeout β€” exercises find_waiter cleanup + if (i % 10 === 0) { + // Don't await β€” let it timeout in the background + bus_a.find(ParentEvent, { future: 0.001 }) + } } - await Promise.all(pending.map((event) => event.done())); - await bus.waitUntilIdle(); + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + await bus_c.waitUntilIdle() + + // Count timeouts and cancellations from bus_c's history + for (const event of bus_c.event_history.values()) { + for (const result of event.event_results.values()) { + if (result.error instanceof EventHandlerTimeoutError) timeout_count += 1 + if (result.error instanceof EventHandlerCancelledError) cancel_count += 1 + } + } + + const t_done = Date.now() + const mem_done = process.memoryUsage() + + global.gc?.() + // Short delay to let find() timeouts and timed-out handler promises settle + await new Promise((r) => setTimeout(r, 50)) + global.gc?.() + const mem_gc = process.memoryUsage() + + const total_ms = t_done - t0 + const mem_delta_mb = (mem_gc.heapUsed - mem_before.heapUsed) / 1024 / 1024 + + console.log( + `\n worst-case: ${total_iterations} iterations in ${total_ms}ms (${Math.round(total_iterations / (total_ms / 1000))}/s)` + + `\n parent: bus_a=${parent_handled_a} bus_b=${parent_handled_b}` + + `\n child: bus_c=${child_handled_c} | grandchild=${grandchild_handled}` + + `\n timeouts=${timeout_count} cancellations=${cancel_count}` + + `\n memory: before=${mb(mem_before.heapUsed)}MB β†’ done=${mb(mem_done.heapUsed)}MB β†’ gc=${mb(mem_gc.heapUsed)}MB (delta=${mem_delta_mb.toFixed(1)}MB)` + + `\n rss: before=${mb(mem_before.rss)}MB β†’ done=${mb(mem_done.rss)}MB β†’ gc=${mb(mem_gc.rss)}MB` + + `\n history: a=${bus_a.event_history.size} b=${bus_b.event_history.size} c=${bus_c.event_history.size}` + + `\n handlers: a=${bus_a.handlers.size} b=${bus_b.handlers.size} c=${bus_c.handlers.size}` + + `\n instances: ${EventBus.instances.size}` + ) + + // All iterations processed + assert.equal(parent_handled_a, total_iterations) + assert.equal(parent_handled_b, total_iterations) + + // History bounded by max_history_size + assert.ok(bus_a.event_history.size <= 50, `bus_a history ${bus_a.event_history.size} > 50`) + assert.ok(bus_b.event_history.size <= 50, `bus_b history ${bus_b.event_history.size} > 50`) + assert.ok(bus_c.event_history.size <= 50, `bus_c history ${bus_c.event_history.size} > 50`) + + // Ephemeral handlers all cleaned up + assert.equal(bus_a.handlers.size, 0, 'All ephemeral handlers removed from bus_a') + + // Memory should not grow unbounded β€” allow 50MB over baseline + assert.ok( + mem_delta_mb < 50, + `Memory grew ${mem_delta_mb.toFixed(1)}MB over baseline (limit 50MB)` + ) - const duration_ms = Date.now() - start; + bus_a.destroy() + bus_b.destroy() + bus_c.destroy() - assert.equal(processed_count, total_events); - assert.ok(duration_ms < 120_000, `Processing took ${duration_ms}ms`); - assert.ok(bus.event_history.size <= bus.max_history_size); + assert.equal(EventBus.instances.size, 0, 'All buses destroyed') } -); +) diff --git a/bubus-ts/tests/timeout.test.ts b/bubus-ts/tests/timeout.test.ts index 1bfe24a..cfb272a 100644 --- a/bubus-ts/tests/timeout.test.ts +++ b/bubus-ts/tests/timeout.test.ts @@ -1,519 +1,506 @@ -import assert from "node:assert/strict"; -import { test } from "node:test"; +import assert from 'node:assert/strict' +import { test } from 'node:test' -import { - BaseEvent, - EventBus, - EventHandlerCancelledError, - EventHandlerTimeoutError -} from "../src/index.js"; +import { BaseEvent, EventBus, EventHandlerCancelledError, EventHandlerTimeoutError } from '../src/index.js' -const TimeoutEvent = BaseEvent.extend("TimeoutEvent", {}); +const TimeoutEvent = BaseEvent.extend('TimeoutEvent', {}) const delay = (ms: number): Promise => new Promise((resolve) => { - setTimeout(resolve, ms); - }); + setTimeout(resolve, ms) + }) -test("handler timeout marks EventResult as error", async () => { - const bus = new EventBus("TimeoutBus"); +test('handler timeout marks EventResult as error', async () => { + const bus = new EventBus('TimeoutBus') bus.on(TimeoutEvent, async () => { - await delay(50); - return "slow"; - }); + await delay(50) + return 'slow' + }) - const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.01 })); - await event.done(); + const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.01 })) + await event.done() - const result = Array.from(event.event_results.values())[0]; - assert.equal(result.status, "error"); - assert.ok(result.error instanceof EventHandlerTimeoutError); -}); + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'error') + assert.ok(result.error instanceof EventHandlerTimeoutError) +}) -test("handler completes within timeout", async () => { - const bus = new EventBus("TimeoutOkBus"); +test('handler completes within timeout', async () => { + const bus = new EventBus('TimeoutOkBus') bus.on(TimeoutEvent, async () => { - await delay(5); - return "fast"; - }); + await delay(5) + return 'fast' + }) - const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.5 })); - await event.done(); + const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.5 })) + await event.done() - const result = Array.from(event.event_results.values())[0]; - assert.equal(result.status, "completed"); - assert.equal(result.result, "fast"); -}); + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.equal(result.result, 'fast') +}) -test("handler timeouts fire across concurrency modes", async () => { - const modes = ["global-serial", "bus-serial", "parallel"] as const; +test('handler timeouts fire across concurrency modes', async () => { + const modes = ['global-serial', 'bus-serial', 'parallel'] as const for (const event_mode of modes) { for (const handler_mode of modes) { const bus = new EventBus(`Timeout-${event_mode}-${handler_mode}`, { event_concurrency: event_mode, - handler_concurrency: handler_mode - }); + handler_concurrency: handler_mode, + }) bus.on(TimeoutEvent, async () => { - await delay(50); - return "slow"; - }); - - const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.01 })); - await event.done(); - - const result = Array.from(event.event_results.values())[0]; - assert.equal( - result.status, - "error", - `Expected timeout error for event=${event_mode} handler=${handler_mode}` - ); + await delay(50) + return 'slow' + }) + + const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.01 })) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'error', `Expected timeout error for event=${event_mode} handler=${handler_mode}`) assert.ok( result.error instanceof EventHandlerTimeoutError, `Expected EventHandlerTimeoutError for event=${event_mode} handler=${handler_mode}` - ); + ) - await bus.waitUntilIdle(); + await bus.waitUntilIdle() } } -}); +}) -test("timeout still marks event failed when other handlers finish", async () => { - const bus = new EventBus("TimeoutParallelHandlers", { - event_concurrency: "parallel", - handler_concurrency: "parallel" - }); +test('timeout still marks event failed when other handlers finish', async () => { + const bus = new EventBus('TimeoutParallelHandlers', { + event_concurrency: 'parallel', + handler_concurrency: 'parallel', + }) - const results: string[] = []; + const results: string[] = [] bus.on(TimeoutEvent, async () => { - await delay(1); - results.push("fast"); - return "fast"; - }); + await delay(1) + results.push('fast') + return 'fast' + }) bus.on(TimeoutEvent, async () => { - await delay(50); - results.push("slow"); - return "slow"; - }); - - const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.01 })); - await event.done(); - - const statuses = Array.from(event.event_results.values()).map((result) => result.status); - assert.ok(statuses.includes("completed")); - assert.ok(statuses.includes("error")); - assert.equal(event.event_status, "completed"); - assert.ok(event.event_errors.length > 0); - assert.ok(results.includes("fast")); -}); - -test("deadlock warning triggers when event exceeds timeout", async () => { - const bus = new EventBus("DeadlockWarnBus"); - const warnings: string[] = []; - const original_warn = console.warn; + await delay(50) + results.push('slow') + return 'slow' + }) + + const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.01 })) + await event.done() + + const statuses = Array.from(event.event_results.values()).map((result) => result.status) + assert.ok(statuses.includes('completed')) + assert.ok(statuses.includes('error')) + assert.equal(event.event_status, 'completed') + assert.ok(event.event_errors.length > 0) + assert.ok(results.includes('fast')) +}) + +test('deadlock warning triggers when event exceeds timeout', async () => { + const bus = new EventBus('DeadlockWarnBus') + const warnings: string[] = [] + const original_warn = console.warn console.warn = (message?: unknown, ...args: unknown[]) => { - warnings.push(String(message)); + warnings.push(String(message)) if (args.length > 0) { - warnings.push(args.map(String).join(" ")); + warnings.push(args.map(String).join(' ')) } - }; + } try { bus.on(TimeoutEvent, async () => { await new Promise(() => { // never resolve - }); - }); + }) + }) - const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.01 })); - await event.done(); + const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.01 })) + await event.done() } finally { - console.warn = original_warn; + console.warn = original_warn } assert.ok( - warnings.some((message) => message.includes("Possible deadlock")), - "Expected deadlock warning" - ); -}); - -test("slow handler warning fires when handler runs long", async () => { - const bus = new EventBus("SlowHandlerWarnBus"); - const warnings: string[] = []; - const original_warn = console.warn; - const original_set_timeout = global.setTimeout; - const original_clear_timeout = global.clearTimeout; + warnings.some((message) => message.includes('Possible deadlock')), + 'Expected deadlock warning' + ) +}) + +test('slow handler warning fires when handler runs long', async () => { + const bus = new EventBus('SlowHandlerWarnBus') + const warnings: string[] = [] + const original_warn = console.warn + const original_set_timeout = global.setTimeout + const original_clear_timeout = global.clearTimeout console.warn = (message?: unknown, ...args: unknown[]) => { - warnings.push(String(message)); + warnings.push(String(message)) if (args.length > 0) { - warnings.push(args.map(String).join(" ")); + warnings.push(args.map(String).join(' ')) } - }; + } // Force the slow-handler warning timer to fire immediately global.setTimeout = ((callback: (...args: unknown[]) => void, delay?: number, ...args: unknown[]) => { if (delay === 15000) { - return original_set_timeout(callback, 0, ...args); + return original_set_timeout(callback, 0, ...args) } - return original_set_timeout(callback, delay as number, ...args); - }) as typeof setTimeout; + return original_set_timeout(callback, delay as number, ...args) + }) as typeof setTimeout global.clearTimeout = ((timeout: ReturnType) => { - return original_clear_timeout(timeout); - }) as typeof clearTimeout; + return original_clear_timeout(timeout) + }) as typeof clearTimeout try { bus.on(TimeoutEvent, async () => { - await delay(5); - return "ok"; - }); + await delay(5) + return 'ok' + }) - const event = bus.dispatch(TimeoutEvent({ event_timeout: null })); - await event.done(); + const event = bus.dispatch(TimeoutEvent({ event_timeout: null })) + await event.done() } finally { - console.warn = original_warn; - global.setTimeout = original_set_timeout; - global.clearTimeout = original_clear_timeout; + console.warn = original_warn + global.setTimeout = original_set_timeout + global.clearTimeout = original_clear_timeout } assert.ok( - warnings.some((message) => message.includes("Slow handler")), - "Expected slow handler warning" - ); -}); + warnings.some((message) => message.includes('Slow handler')), + 'Expected slow handler warning' + ) +}) -test("event-level concurrency overrides do not bypass timeouts", async () => { - const bus = new EventBus("TimeoutEventOverrideBus", { - event_concurrency: "global-serial", - handler_concurrency: "global-serial" - }); +test('event-level concurrency overrides do not bypass timeouts', async () => { + const bus = new EventBus('TimeoutEventOverrideBus', { + event_concurrency: 'global-serial', + handler_concurrency: 'global-serial', + }) bus.on(TimeoutEvent, async () => { - await delay(50); - return "slow"; - }); + await delay(50) + return 'slow' + }) const event = bus.dispatch( TimeoutEvent({ event_timeout: 0.01, - event_concurrency: "parallel", - handler_concurrency: "parallel" + event_concurrency: 'parallel', + handler_concurrency: 'parallel', }) - ); - await event.done(); + ) + await event.done() - const result = Array.from(event.event_results.values())[0]; - assert.equal(result.status, "error"); - assert.ok(result.error instanceof EventHandlerTimeoutError); -}); + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'error') + assert.ok(result.error instanceof EventHandlerTimeoutError) +}) -test("handler-level concurrency overrides do not bypass timeouts", async () => { - const bus = new EventBus("TimeoutHandlerOverrideBus", { - event_concurrency: "parallel", - handler_concurrency: "global-serial" - }); +test('handler-level concurrency overrides do not bypass timeouts', async () => { + const bus = new EventBus('TimeoutHandlerOverrideBus', { + event_concurrency: 'parallel', + handler_concurrency: 'global-serial', + }) - const order: string[] = []; + const order: string[] = [] bus.on( TimeoutEvent, async () => { - order.push("slow_start"); - await delay(50); - order.push("slow_end"); - return "slow"; + order.push('slow_start') + await delay(50) + order.push('slow_end') + return 'slow' }, - { handler_concurrency: "bus-serial" } - ); + { handler_concurrency: 'bus-serial' } + ) bus.on( TimeoutEvent, async () => { - order.push("fast_start"); - await delay(1); - order.push("fast_end"); - return "fast"; + order.push('fast_start') + await delay(1) + order.push('fast_end') + return 'fast' }, - { handler_concurrency: "parallel" } - ); + { handler_concurrency: 'parallel' } + ) - const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.01 })); - await event.done(); + const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.01 })) + await event.done() - const statuses = Array.from(event.event_results.values()).map((result) => result.status); - assert.ok(statuses.includes("error")); - assert.ok(statuses.includes("completed")); - assert.ok(order.includes("fast_start")); -}); + const statuses = Array.from(event.event_results.values()).map((result) => result.status) + assert.ok(statuses.includes('error')) + assert.ok(statuses.includes('completed')) + assert.ok(order.includes('fast_start')) +}) -test("forwarded event timeouts apply across buses", async () => { - const bus_a = new EventBus("TimeoutForwardA", { event_concurrency: "bus-serial" }); - const bus_b = new EventBus("TimeoutForwardB", { event_concurrency: "bus-serial" }); +test('forwarded event timeouts apply across buses', async () => { + const bus_a = new EventBus('TimeoutForwardA', { event_concurrency: 'bus-serial' }) + const bus_b = new EventBus('TimeoutForwardB', { event_concurrency: 'bus-serial' }) bus_a.on(TimeoutEvent, async (event) => { - bus_b.dispatch(event); - }); + bus_b.dispatch(event) + }) bus_b.on(TimeoutEvent, async () => { - await delay(50); - return "slow"; - }); + await delay(50) + return 'slow' + }) - const event = bus_a.dispatch(TimeoutEvent({ event_timeout: 0.01 })); - await event.done(); + const event = bus_a.dispatch(TimeoutEvent({ event_timeout: 0.01 })) + await event.done() - const results = Array.from(event.event_results.values()); - const bus_b_result = results.find((result) => result.eventbus_name === "TimeoutForwardB"); - assert.ok(bus_b_result); - assert.equal(bus_b_result?.status, "error"); - assert.ok(bus_b_result?.error instanceof EventHandlerTimeoutError); -}); + const results = Array.from(event.event_results.values()) + const bus_b_result = results.find((result) => result.eventbus_name === 'TimeoutForwardB') + assert.ok(bus_b_result) + assert.equal(bus_b_result?.status, 'error') + assert.ok(bus_b_result?.error instanceof EventHandlerTimeoutError) +}) -test("queue-jump awaited child timeouts still fire across buses", async () => { - const ParentEvent = BaseEvent.extend("TimeoutParentEvent", {}); - const ChildEvent = BaseEvent.extend("TimeoutChildEvent", {}); +test('queue-jump awaited child timeouts still fire across buses', async () => { + const ParentEvent = BaseEvent.extend('TimeoutParentEvent', {}) + const ChildEvent = BaseEvent.extend('TimeoutChildEvent', {}) - const bus_a = new EventBus("TimeoutQueueJumpA", { event_concurrency: "global-serial" }); - const bus_b = new EventBus("TimeoutQueueJumpB", { event_concurrency: "global-serial" }); + const bus_a = new EventBus('TimeoutQueueJumpA', { event_concurrency: 'global-serial' }) + const bus_b = new EventBus('TimeoutQueueJumpB', { event_concurrency: 'global-serial' }) - let child_ref: InstanceType | null = null; + let child_ref: InstanceType | null = null bus_b.on(ChildEvent, async () => { - await delay(50); - return "slow"; - }); - - bus_a.on(ParentEvent, async () => { - const child = bus_b.dispatch(ChildEvent({ event_timeout: 0.01 })); - child_ref = child; - await child.done(); - }); - - const parent = bus_a.dispatch(ParentEvent({ event_timeout: 0.5 })); - await parent.done(); - - assert.ok(child_ref); - const child_results = Array.from(child_ref!.event_results.values()); - const timeout_result = child_results.find( - (result) => result.error instanceof EventHandlerTimeoutError - ); - assert.ok(timeout_result); -}); - -test("parent timeout cancels pending child handler results under serial handler limiter", async () => { - const ParentEvent = BaseEvent.extend("TimeoutCancelParentEvent", {}); - const ChildEvent = BaseEvent.extend("TimeoutCancelChildEvent", {}); - - const bus = new EventBus("TimeoutCancelBus", { - event_concurrency: "bus-serial", - handler_concurrency: "bus-serial" - }); - - let child_runs = 0; + await delay(50) + return 'slow' + }) + + bus_a.on(ParentEvent, async (event) => { + // Use scoped bus emit to set parent tracking (event_parent_id, event_emitted_by_handler_id), + // then also dispatch on bus_b for cross-bus handler execution. + // Without parent tracking, _runImmediately can't detect the queue-jump context + // and falls back to waitForCompletion(), which deadlocks with global-serial. + const child = event.bus?.emit(ChildEvent({ event_timeout: 0.01 }))! + bus_b.dispatch(child) + child_ref = child + await child.done() + }) + + const parent = bus_a.dispatch(ParentEvent({ event_timeout: 0.5 })) + await parent.done() + + assert.ok(child_ref) + const child_results = Array.from(child_ref!.event_results.values()) + const timeout_result = child_results.find((result) => result.error instanceof EventHandlerTimeoutError) + assert.ok(timeout_result) +}) + +test('parent timeout cancels pending child handler results under serial handler limiter', async () => { + const ParentEvent = BaseEvent.extend('TimeoutCancelParentEvent', {}) + const ChildEvent = BaseEvent.extend('TimeoutCancelChildEvent', {}) + + const bus = new EventBus('TimeoutCancelBus', { + event_concurrency: 'bus-serial', + handler_concurrency: 'bus-serial', + }) + + let child_runs = 0 bus.on(ChildEvent, async () => { - child_runs += 1; - await delay(30); - return "first"; - }); + child_runs += 1 + await delay(30) + return 'first' + }) bus.on(ChildEvent, async () => { - child_runs += 1; - await delay(10); - return "second"; - }); + child_runs += 1 + await delay(10) + return 'second' + }) bus.on(ParentEvent, async (event) => { - event.bus?.emit(ChildEvent({ event_timeout: 0.2 })); - await delay(50); - }); + event.bus?.emit(ChildEvent({ event_timeout: 0.2 })) + await delay(50) + }) - const parent = bus.dispatch(ParentEvent({ event_timeout: 0.01 })); - await parent.done(); - await bus.waitUntilIdle(); + const parent = bus.dispatch(ParentEvent({ event_timeout: 0.01 })) + await parent.done() + await bus.waitUntilIdle() - const child = parent.event_children[0]; - assert.ok(child); + const child = parent.event_children[0] + assert.ok(child) - assert.equal(child_runs, 0); + assert.equal(child_runs, 0) - const cancelled_results = Array.from(child.event_results.values()).filter( - (result) => result.error instanceof EventHandlerCancelledError - ); - assert.ok(cancelled_results.length > 0); -}); + const cancelled_results = Array.from(child.event_results.values()).filter((result) => result.error instanceof EventHandlerCancelledError) + assert.ok(cancelled_results.length > 0) +}) -test("event_timeout null falls back to bus default", async () => { - const bus = new EventBus("TimeoutDefaultBus", { event_timeout: 0.01 }); +test('event_timeout null falls back to bus default', async () => { + const bus = new EventBus('TimeoutDefaultBus', { event_timeout: 0.01 }) bus.on(TimeoutEvent, async () => { - await delay(50); - return "slow"; - }); + await delay(50) + return 'slow' + }) - const event = bus.dispatch(TimeoutEvent({ event_timeout: null })); - await event.done(); + const event = bus.dispatch(TimeoutEvent({ event_timeout: null })) + await event.done() - const result = Array.from(event.event_results.values())[0]; - assert.equal(result.status, "error"); - assert.ok(result.error instanceof EventHandlerTimeoutError); -}); + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'error') + assert.ok(result.error instanceof EventHandlerTimeoutError) +}) -test("bus default null disables timeouts when event_timeout is null", async () => { - const bus = new EventBus("TimeoutDisabledBus", { event_timeout: null }); +test('bus default null disables timeouts when event_timeout is null', async () => { + const bus = new EventBus('TimeoutDisabledBus', { event_timeout: null }) bus.on(TimeoutEvent, async () => { - await delay(20); - return "ok"; - }); - - const event = bus.dispatch(TimeoutEvent({ event_timeout: null })); - await event.done(); - - const result = Array.from(event.event_results.values())[0]; - assert.equal(result.status, "completed"); - assert.equal(result.result, "ok"); -}); - -test("multi-level timeout cascade with mixed cancellations", async () => { - const TopEvent = BaseEvent.extend("TimeoutCascadeTop", {}); - const QueuedChildEvent = BaseEvent.extend("TimeoutCascadeQueuedChild", {}); - const AwaitedChildEvent = BaseEvent.extend("TimeoutCascadeAwaitedChild", {}); - const ImmediateGrandchildEvent = BaseEvent.extend("TimeoutCascadeImmediateGrandchild", {}); - const QueuedGrandchildEvent = BaseEvent.extend("TimeoutCascadeQueuedGrandchild", {}); - - const bus = new EventBus("TimeoutCascadeBus", { - event_concurrency: "bus-serial", - handler_concurrency: "bus-serial" - }); - - let queued_child: InstanceType | null = null; - let awaited_child: InstanceType | null = null; - let immediate_grandchild: InstanceType | null = null; - let queued_grandchild: InstanceType | null = null; - - let queued_child_runs = 0; - let immediate_grandchild_runs = 0; - let queued_grandchild_runs = 0; + await delay(20) + return 'ok' + }) + + const event = bus.dispatch(TimeoutEvent({ event_timeout: null })) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.equal(result.result, 'ok') +}) + +test('multi-level timeout cascade with mixed cancellations', async () => { + const TopEvent = BaseEvent.extend('TimeoutCascadeTop', {}) + const QueuedChildEvent = BaseEvent.extend('TimeoutCascadeQueuedChild', {}) + const AwaitedChildEvent = BaseEvent.extend('TimeoutCascadeAwaitedChild', {}) + const ImmediateGrandchildEvent = BaseEvent.extend('TimeoutCascadeImmediateGrandchild', {}) + const QueuedGrandchildEvent = BaseEvent.extend('TimeoutCascadeQueuedGrandchild', {}) + + const bus = new EventBus('TimeoutCascadeBus', { + event_concurrency: 'bus-serial', + handler_concurrency: 'bus-serial', + }) + + let queued_child: InstanceType | null = null + let awaited_child: InstanceType | null = null + let immediate_grandchild: InstanceType | null = null + let queued_grandchild: InstanceType | null = null + + let queued_child_runs = 0 + let immediate_grandchild_runs = 0 + let queued_grandchild_runs = 0 const queued_child_fast = async () => { - queued_child_runs += 1; - await delay(5); - return "queued_fast"; - }; + queued_child_runs += 1 + await delay(5) + return 'queued_fast' + } const queued_child_slow = async () => { - queued_child_runs += 1; - await delay(50); - return "queued_slow"; - }; + queued_child_runs += 1 + await delay(50) + return 'queued_slow' + } const awaited_child_fast = async () => { - await delay(5); - return "awaited_fast"; - }; + await delay(5) + return 'awaited_fast' + } const awaited_child_slow = async (event: BaseEvent) => { - queued_grandchild = event.bus?.emit( - QueuedGrandchildEvent({ event_timeout: 0.2 }) - )!; - immediate_grandchild = event.bus?.emit( - ImmediateGrandchildEvent({ event_timeout: 0.2 }) - )!; - await immediate_grandchild.done(); - await delay(100); - return "awaited_slow"; - }; + queued_grandchild = event.bus?.emit(QueuedGrandchildEvent({ event_timeout: 0.2 }))! + immediate_grandchild = event.bus?.emit(ImmediateGrandchildEvent({ event_timeout: 0.2 }))! + await immediate_grandchild.done() + await delay(100) + return 'awaited_slow' + } const immediate_grandchild_slow = async () => { - immediate_grandchild_runs += 1; - await delay(50); - return "immediate_grandchild_slow"; - }; + immediate_grandchild_runs += 1 + await delay(50) + return 'immediate_grandchild_slow' + } const immediate_grandchild_fast = async () => { - immediate_grandchild_runs += 1; - await delay(10); - return "immediate_grandchild_fast"; - }; + immediate_grandchild_runs += 1 + await delay(10) + return 'immediate_grandchild_fast' + } const queued_grandchild_slow = async () => { - queued_grandchild_runs += 1; - await delay(50); - return "queued_grandchild_slow"; - }; + queued_grandchild_runs += 1 + await delay(50) + return 'queued_grandchild_slow' + } const queued_grandchild_fast = async () => { - queued_grandchild_runs += 1; - await delay(10); - return "queued_grandchild_fast"; - }; - - bus.on(QueuedChildEvent, queued_child_fast); - bus.on(QueuedChildEvent, queued_child_slow); - bus.on(AwaitedChildEvent, awaited_child_fast); - bus.on(AwaitedChildEvent, awaited_child_slow); - bus.on(ImmediateGrandchildEvent, immediate_grandchild_slow); - bus.on(ImmediateGrandchildEvent, immediate_grandchild_fast); - bus.on(QueuedGrandchildEvent, queued_grandchild_slow); - bus.on(QueuedGrandchildEvent, queued_grandchild_fast); + queued_grandchild_runs += 1 + await delay(10) + return 'queued_grandchild_fast' + } + + bus.on(QueuedChildEvent, queued_child_fast) + bus.on(QueuedChildEvent, queued_child_slow) + bus.on(AwaitedChildEvent, awaited_child_fast) + bus.on(AwaitedChildEvent, awaited_child_slow) + bus.on(ImmediateGrandchildEvent, immediate_grandchild_slow) + bus.on(ImmediateGrandchildEvent, immediate_grandchild_fast) + bus.on(QueuedGrandchildEvent, queued_grandchild_slow) + bus.on(QueuedGrandchildEvent, queued_grandchild_fast) bus.on(TopEvent, async (event) => { - queued_child = event.bus?.emit(QueuedChildEvent({ event_timeout: 0.2 }))!; - awaited_child = event.bus?.emit(AwaitedChildEvent({ event_timeout: 0.03 }))!; - await awaited_child.done(); - await delay(80); - }); - - const top = bus.dispatch(TopEvent({ event_timeout: 0.04 })); - await top.done(); - await bus.waitUntilIdle(); - - const top_result = Array.from(top.event_results.values())[0]; - assert.equal(top_result.status, "error"); - assert.ok(top_result.error instanceof EventHandlerTimeoutError); - - assert.ok(queued_child); - const queued_results = Array.from(queued_child!.event_results.values()); - assert.equal(queued_child_runs, 0); - assert.ok(queued_results.length >= 2); + queued_child = event.bus?.emit(QueuedChildEvent({ event_timeout: 0.2 }))! + awaited_child = event.bus?.emit(AwaitedChildEvent({ event_timeout: 0.03 }))! + await awaited_child.done() + await delay(80) + }) + + const top = bus.dispatch(TopEvent({ event_timeout: 0.04 })) + await top.done() + await bus.waitUntilIdle() + + const top_result = Array.from(top.event_results.values())[0] + assert.equal(top_result.status, 'error') + assert.ok(top_result.error instanceof EventHandlerTimeoutError) + + assert.ok(queued_child) + const queued_results = Array.from(queued_child!.event_results.values()) + assert.equal(queued_child_runs, 0) + assert.ok(queued_results.length >= 2) for (const result of queued_results) { - assert.equal(result.status, "error"); - assert.ok(result.error instanceof EventHandlerCancelledError); - assert.ok( - (result.error as EventHandlerCancelledError).parent_error instanceof EventHandlerTimeoutError - ); - } - - assert.ok(awaited_child); - const awaited_results = Array.from(awaited_child!.event_results.values()); - const awaited_completed = awaited_results.filter((result) => result.status === "completed"); - const awaited_timeouts = awaited_results.filter( - (result) => result.error instanceof EventHandlerTimeoutError - ); - assert.equal(awaited_completed.length, 1); - assert.equal(awaited_timeouts.length, 1); - - assert.ok(immediate_grandchild); - const immediate_results = Array.from(immediate_grandchild!.event_results.values()); - assert.equal(immediate_grandchild_runs, 2); - const immediate_completed = immediate_results.filter((result) => result.status === "completed"); - assert.equal(immediate_completed.length, 2); - - assert.ok(queued_grandchild); - const queued_grandchild_results = Array.from(queued_grandchild!.event_results.values()); - assert.equal(queued_grandchild_runs, 0); - const queued_cancelled = queued_grandchild_results.filter( - (result) => result.error instanceof EventHandlerCancelledError - ); - assert.ok(queued_cancelled.length >= 2); -}); + assert.equal(result.status, 'error') + assert.ok(result.error instanceof EventHandlerCancelledError) + assert.ok((result.error as EventHandlerCancelledError).parent_error instanceof EventHandlerTimeoutError) + } + + assert.ok(awaited_child) + const awaited_results = Array.from(awaited_child!.event_results.values()) + const awaited_completed = awaited_results.filter((result) => result.status === 'completed') + const awaited_timeouts = awaited_results.filter((result) => result.error instanceof EventHandlerTimeoutError) + assert.equal(awaited_completed.length, 1) + assert.equal(awaited_timeouts.length, 1) + + assert.ok(immediate_grandchild) + const immediate_results = Array.from(immediate_grandchild!.event_results.values()) + // With bus-serial handler concurrency (no longer bypassed during queue-jump), + // only the first grandchild handler starts before the awaited child's 30ms timeout fires. + // The second handler is still pending (waiting for limiter) β†’ cancelled. + assert.equal(immediate_grandchild_runs, 1) + const immediate_completed = immediate_results.filter((result) => result.status === 'completed') + assert.equal(immediate_completed.length, 1) + const immediate_cancelled = immediate_results.filter((result) => result.error instanceof EventHandlerCancelledError) + assert.equal(immediate_cancelled.length, 1) + + assert.ok(queued_grandchild) + const queued_grandchild_results = Array.from(queued_grandchild!.event_results.values()) + assert.equal(queued_grandchild_runs, 0) + const queued_cancelled = queued_grandchild_results.filter((result) => result.error instanceof EventHandlerCancelledError) + assert.ok(queued_cancelled.length >= 2) +}) // ============================================================================= // Three-level timeout cascade (mirrors Python test_handler_timeout.py) @@ -530,14 +517,14 @@ test("multi-level timeout cascade with mixed cancellations", async () => { // // KEY MECHANIC: When a child event is awaited via event.done() inside a handler, // it triggers "queue-jumping" via _runImmediately β†’ runImmediatelyAcrossBuses. -// Queue-jumped events bypass the handler limiter (bypass_handler_limiters: true), -// so all handlers for that event run in PARALLEL, even on a bus-serial bus. +// Queue-jumped events use yield-and-reacquire: the parent handler's limiter is +// temporarily released so child handlers can acquire it normally. This means +// child handlers run SERIALLY on a bus-serial bus (respecting concurrency limits). // Non-awaited child events stay in the pending_event_queue and are blocked by // immediate_processing_stack_depth > 0 (runloop is paused during queue-jump). // // TIMEOUT BEHAVIOR: Each handler gets its OWN timeout window starting from when // that handler begins execution β€” NOT from when the event was dispatched. -// So with parallel handlers, all timeouts start at roughly the same time. // With serial handlers, each timeout starts when the handler acquires the limiter. // // CANCELLATION CASCADE: When a handler times out, cancelPendingChildProcessing() @@ -546,65 +533,64 @@ test("multi-level timeout cascade with mixed cancellations", async () => { // that already started ("started" status) continue running in the background. // ============================================================================= -test("three-level timeout cascade with per-level timeouts and cascading cancellation", async () => { - const TopEvent = BaseEvent.extend("Cascade3LTop", {}); - const ChildEvent = BaseEvent.extend("Cascade3LChild", {}); - const GrandchildEvent = BaseEvent.extend("Cascade3LGrandchild", {}); - const QueuedGrandchildEvent = BaseEvent.extend("Cascade3LQueuedGC", {}); - const SiblingEvent = BaseEvent.extend("Cascade3LSibling", {}); +test('three-level timeout cascade with per-level timeouts and cascading cancellation', async () => { + const TopEvent = BaseEvent.extend('Cascade3LTop', {}) + const ChildEvent = BaseEvent.extend('Cascade3LChild', {}) + const GrandchildEvent = BaseEvent.extend('Cascade3LGrandchild', {}) + const QueuedGrandchildEvent = BaseEvent.extend('Cascade3LQueuedGC', {}) + const SiblingEvent = BaseEvent.extend('Cascade3LSibling', {}) - const bus = new EventBus("Cascade3LevelBus", { - event_concurrency: "bus-serial", - handler_concurrency: "bus-serial" - }); + const bus = new EventBus('Cascade3LevelBus', { + event_concurrency: 'bus-serial', + handler_concurrency: 'bus-serial', + }) - const execution_log: string[] = []; - let child_ref: InstanceType | null = null; - let grandchild_ref: InstanceType | null = null; - let queued_grandchild_ref: InstanceType | null = null; - let sibling_ref: InstanceType | null = null; + const execution_log: string[] = [] + let child_ref: InstanceType | null = null + let grandchild_ref: InstanceType | null = null + let queued_grandchild_ref: InstanceType | null = null + let sibling_ref: InstanceType | null = null // ── GrandchildEvent handlers ────────────────────────────────────────── - // These run in PARALLEL because GrandchildEvent is queue-jumped - // (bypass_handler_limiters: true). Each handler gets its own 35ms timeout - // window starting from approximately the same moment. + // These run SERIALLY because queue-jumped events respect the bus-serial + // handler limiter (yield-and-reacquire). Each handler gets its own 35ms + // timeout window starting from when that handler acquires the limiter. // - // Handlers a, c, e sleep 200ms β†’ each times out individually at 35ms - // Handler b is synchronous β†’ completes immediately - // Handler d sleeps 10ms β†’ completes within its 35ms window + // Serial order: a(35ms timeout) β†’ b(sync) β†’ c(35ms timeout) β†’ d(10ms) β†’ e(35ms timeout) + // Total time for all 5: ~35+0+35+10+35 = ~115ms (within child's 150ms timeout) const gc_handler_a = async () => { - execution_log.push("gc_a_start"); - await delay(200); // will be interrupted by 35ms timeout - execution_log.push("gc_a_end"); // should never reach here - return "gc_a_done"; - }; + execution_log.push('gc_a_start') + await delay(500) // will be interrupted by 35ms timeout (500ms > total test time) + execution_log.push('gc_a_end') // should never reach here before assertions + return 'gc_a_done' + } const gc_handler_b = () => { - execution_log.push("gc_b_complete"); - return "gc_b_done"; - }; + execution_log.push('gc_b_complete') + return 'gc_b_done' + } const gc_handler_c = async () => { - execution_log.push("gc_c_start"); - await delay(200); // will be interrupted by 35ms timeout - execution_log.push("gc_c_end"); // should never reach here - return "gc_c_done"; - }; + execution_log.push('gc_c_start') + await delay(500) // will be interrupted by 35ms timeout (500ms > total test time) + execution_log.push('gc_c_end') // should never reach here before assertions + return 'gc_c_done' + } const gc_handler_d = async () => { - execution_log.push("gc_d_start"); - await delay(10); // fast enough to complete within 35ms - execution_log.push("gc_d_complete"); - return "gc_d_done"; - }; + execution_log.push('gc_d_start') + await delay(10) // fast enough to complete within 35ms + execution_log.push('gc_d_complete') + return 'gc_d_done' + } const gc_handler_e = async () => { - execution_log.push("gc_e_start"); - await delay(200); // will be interrupted by 35ms timeout - execution_log.push("gc_e_end"); // should never reach here - return "gc_e_done"; - }; + execution_log.push('gc_e_start') + await delay(500) // will be interrupted by 35ms timeout (500ms > total test time) + execution_log.push('gc_e_end') // should never reach here before assertions + return 'gc_e_done' + } // ── QueuedGrandchildEvent handler ───────────────────────────────────── // This event is emitted by child_handler but NOT awaited, so it sits in @@ -612,35 +598,35 @@ test("three-level timeout cascade with per-level timeouts and cascading cancella // cancelPendingChildProcessing walks ChildEvent.event_children and finds // this event still pending β†’ its handler results are marked as cancelled. const queued_gc_handler = () => { - execution_log.push("queued_gc_start"); // should never reach here - return "queued_gc_done"; - }; + execution_log.push('queued_gc_start') // should never reach here + return 'queued_gc_done' + } // ── ChildEvent handler ──────────────────────────────────────────────── // Emits GrandchildEvent (awaited β†’ queue-jump, ~35ms to complete) // Emits QueuedGrandchildEvent (NOT awaited β†’ stays in queue) // After grandchild completes, sleeps 300ms β†’ times out at 80ms total const child_handler = async (event: InstanceType) => { - execution_log.push("child_start"); - grandchild_ref = event.bus?.emit(GrandchildEvent({ event_timeout: 0.035 }))!; - queued_grandchild_ref = event.bus?.emit(QueuedGrandchildEvent({ event_timeout: 0.5 }))!; - // Queue-jump: processes GrandchildEvent immediately, bypassing handler limiter. - // All 5 GC handlers run in parallel. Completes in ~35ms. - await grandchild_ref.done(); - execution_log.push("child_after_grandchild"); - await delay(300); // will be interrupted: child started at ~t=0, timeout at 80ms - execution_log.push("child_end"); // should never reach here - return "child_done"; - }; + execution_log.push('child_start') + grandchild_ref = event.bus?.emit(GrandchildEvent({ event_timeout: 0.035 }))! + queued_grandchild_ref = event.bus?.emit(QueuedGrandchildEvent({ event_timeout: 0.5 }))! + // Queue-jump: processes GrandchildEvent immediately via yield-and-reacquire. + // All 5 GC handlers run serially. Completes in ~115ms (within 150ms child timeout). + await grandchild_ref.done() + execution_log.push('child_after_grandchild') + await delay(300) // will be interrupted: child started at ~t=0, timeout at 150ms + execution_log.push('child_end') // should never reach here + return 'child_done' + } // ── SiblingEvent handler ────────────────────────────────────────────── // This event is emitted by top_handler_main but NOT awaited. Stays in // pending_event_queue until top_handler_main times out at 250ms β†’ // cancelled by cancelPendingChildProcessing. const sibling_handler = () => { - execution_log.push("sibling_start"); // should never reach here - return "sibling_done"; - }; + execution_log.push('sibling_start') // should never reach here + return 'sibling_done' + } // ── TopEvent handlers ───────────────────────────────────────────────── // These run SERIALLY (via bus handler limiter) because TopEvent is @@ -648,215 +634,195 @@ test("three-level timeout cascade with per-level timeouts and cascading cancella // goes first, completes quickly, then top_handler_main starts. const top_handler_fast = async () => { - execution_log.push("top_fast_start"); - await delay(2); - execution_log.push("top_fast_complete"); - return "top_fast_done"; - }; + execution_log.push('top_fast_start') + await delay(2) + execution_log.push('top_fast_complete') + return 'top_fast_done' + } const top_handler_main = async (event: InstanceType) => { - execution_log.push("top_main_start"); - child_ref = event.bus?.emit(ChildEvent({ event_timeout: 0.08 }))!; - sibling_ref = event.bus?.emit(SiblingEvent({ event_timeout: 0.5 }))!; + execution_log.push('top_main_start') + child_ref = event.bus?.emit(ChildEvent({ event_timeout: 0.15 }))! + sibling_ref = event.bus?.emit(SiblingEvent({ event_timeout: 0.5 }))! // Queue-jump: processes ChildEvent immediately (which in turn queue-jumps // GrandchildEvent). This entire subtree resolves in ~80ms (child timeout). - await child_ref.done(); - execution_log.push("top_main_after_child"); - await delay(300); // will be interrupted: top_handler_main started at ~t=2, timeout at 250ms - execution_log.push("top_main_end"); // should never reach here - return "top_main_done"; - }; + await child_ref.done() + execution_log.push('top_main_after_child') + await delay(300) // will be interrupted: top_handler_main started at ~t=2, timeout at 250ms + execution_log.push('top_main_end') // should never reach here + return 'top_main_done' + } // Register handlers (registration order = execution order for serial) - bus.on(TopEvent, top_handler_fast); - bus.on(TopEvent, top_handler_main); - bus.on(ChildEvent, child_handler); - bus.on(GrandchildEvent, gc_handler_a); - bus.on(GrandchildEvent, gc_handler_b); - bus.on(GrandchildEvent, gc_handler_c); - bus.on(GrandchildEvent, gc_handler_d); - bus.on(GrandchildEvent, gc_handler_e); - bus.on(QueuedGrandchildEvent, queued_gc_handler); - bus.on(SiblingEvent, sibling_handler); + bus.on(TopEvent, top_handler_fast) + bus.on(TopEvent, top_handler_main) + bus.on(ChildEvent, child_handler) + bus.on(GrandchildEvent, gc_handler_a) + bus.on(GrandchildEvent, gc_handler_b) + bus.on(GrandchildEvent, gc_handler_c) + bus.on(GrandchildEvent, gc_handler_d) + bus.on(GrandchildEvent, gc_handler_e) + bus.on(QueuedGrandchildEvent, queued_gc_handler) + bus.on(SiblingEvent, sibling_handler) // ── Dispatch and wait ───────────────────────────────────────────────── - const top = bus.dispatch(TopEvent({ event_timeout: 0.25 })); - await top.done(); - await bus.waitUntilIdle(); + const top = bus.dispatch(TopEvent({ event_timeout: 0.25 })) + await top.done() + await bus.waitUntilIdle() // ═══════════════════════════════════════════════════════════════════════ // ASSERTIONS // ═══════════════════════════════════════════════════════════════════════ // ── TopEvent: 2 handler results (1 completed, 1 timed out) ────────── - assert.equal(top.event_status, "completed"); - assert.ok(top.event_errors.length >= 1, "TopEvent should have at least 1 error"); + assert.equal(top.event_status, 'completed') + assert.ok(top.event_errors.length >= 1, 'TopEvent should have at least 1 error') - const top_results = Array.from(top.event_results.values()); - assert.equal(top_results.length, 2, "TopEvent should have 2 handler results"); + const top_results = Array.from(top.event_results.values()) + assert.equal(top_results.length, 2, 'TopEvent should have 2 handler results') - const top_fast_result = top_results.find((r) => r.handler_name === "top_handler_fast"); - assert.ok(top_fast_result, "top_handler_fast result should exist"); - assert.equal(top_fast_result!.status, "completed"); - assert.equal(top_fast_result!.result, "top_fast_done"); + const top_fast_result = top_results.find((r) => r.handler_name === 'top_handler_fast') + assert.ok(top_fast_result, 'top_handler_fast result should exist') + assert.equal(top_fast_result!.status, 'completed') + assert.equal(top_fast_result!.result, 'top_fast_done') - const top_main_result = top_results.find((r) => r.handler_name === "top_handler_main"); - assert.ok(top_main_result, "top_handler_main result should exist"); - assert.equal(top_main_result!.status, "error"); - assert.ok( - top_main_result!.error instanceof EventHandlerTimeoutError, - "top_handler_main should have timed out" - ); - - // ── ChildEvent: 1 handler result (timed out at 80ms) ──────────────── - assert.ok(child_ref, "ChildEvent should have been emitted"); - assert.equal(child_ref!.event_status, "completed"); - - const child_results = Array.from(child_ref!.event_results.values()); - assert.equal(child_results.length, 1, "ChildEvent should have 1 handler result"); - assert.equal(child_results[0].handler_name, "child_handler"); - assert.equal(child_results[0].status, "error"); - assert.ok( - child_results[0].error instanceof EventHandlerTimeoutError, - "child_handler should have timed out" - ); + const top_main_result = top_results.find((r) => r.handler_name === 'top_handler_main') + assert.ok(top_main_result, 'top_handler_main result should exist') + assert.equal(top_main_result!.status, 'error') + assert.ok(top_main_result!.error instanceof EventHandlerTimeoutError, 'top_handler_main should have timed out') + + // ── ChildEvent: 1 handler result (timed out at 150ms) ──────────────── + assert.ok(child_ref, 'ChildEvent should have been emitted') + assert.equal(child_ref!.event_status, 'completed') + + const child_results = Array.from(child_ref!.event_results.values()) + assert.equal(child_results.length, 1, 'ChildEvent should have 1 handler result') + assert.equal(child_results[0].handler_name, 'child_handler') + assert.equal(child_results[0].status, 'error') + assert.ok(child_results[0].error instanceof EventHandlerTimeoutError, 'child_handler should have timed out') // ── GrandchildEvent: 5 handler results (2 completed, 3 timed out) ── - assert.ok(grandchild_ref, "GrandchildEvent should have been emitted"); - assert.equal(grandchild_ref!.event_status, "completed"); + assert.ok(grandchild_ref, 'GrandchildEvent should have been emitted') + assert.equal(grandchild_ref!.event_status, 'completed') - const gc_results = Array.from(grandchild_ref!.event_results.values()); - assert.equal(gc_results.length, 5, "GrandchildEvent should have 5 handler results"); + const gc_results = Array.from(grandchild_ref!.event_results.values()) + assert.equal(gc_results.length, 5, 'GrandchildEvent should have 5 handler results') // Handlers a, c, e: slow β†’ individually timed out - for (const name of ["gc_handler_a", "gc_handler_c", "gc_handler_e"]) { - const result = gc_results.find((r) => r.handler_name === name); - assert.ok(result, `${name} result should exist`); - assert.equal(result!.status, "error", `${name} should have status error`); - assert.ok( - result!.error instanceof EventHandlerTimeoutError, - `${name} should be EventHandlerTimeoutError` - ); + for (const name of ['gc_handler_a', 'gc_handler_c', 'gc_handler_e']) { + const result = gc_results.find((r) => r.handler_name === name) + assert.ok(result, `${name} result should exist`) + assert.equal(result!.status, 'error', `${name} should have status error`) + assert.ok(result!.error instanceof EventHandlerTimeoutError, `${name} should be EventHandlerTimeoutError`) } // Handlers b, d: fast β†’ completed successfully - const gc_b_result = gc_results.find((r) => r.handler_name === "gc_handler_b"); - assert.ok(gc_b_result, "gc_handler_b result should exist"); - assert.equal(gc_b_result!.status, "completed"); - assert.equal(gc_b_result!.result, "gc_b_done"); + const gc_b_result = gc_results.find((r) => r.handler_name === 'gc_handler_b') + assert.ok(gc_b_result, 'gc_handler_b result should exist') + assert.equal(gc_b_result!.status, 'completed') + assert.equal(gc_b_result!.result, 'gc_b_done') - const gc_d_result = gc_results.find((r) => r.handler_name === "gc_handler_d"); - assert.ok(gc_d_result, "gc_handler_d result should exist"); - assert.equal(gc_d_result!.status, "completed"); - assert.equal(gc_d_result!.result, "gc_d_done"); + const gc_d_result = gc_results.find((r) => r.handler_name === 'gc_handler_d') + assert.ok(gc_d_result, 'gc_handler_d result should exist') + assert.equal(gc_d_result!.status, 'completed') + assert.equal(gc_d_result!.result, 'gc_d_done') // ── QueuedGrandchildEvent: CANCELLED by child_handler timeout ─────── // This event was emitted but never awaited. It sat in pending_event_queue // until child_handler timed out, which triggered cancelPendingChildProcessing // to walk ChildEvent.event_children and cancel all pending handlers. - assert.ok(queued_grandchild_ref, "QueuedGrandchildEvent should have been emitted"); - assert.equal(queued_grandchild_ref!.event_status, "completed"); + assert.ok(queued_grandchild_ref, 'QueuedGrandchildEvent should have been emitted') + assert.equal(queued_grandchild_ref!.event_status, 'completed') - const queued_gc_results = Array.from(queued_grandchild_ref!.event_results.values()); - assert.equal(queued_gc_results.length, 1, "QueuedGC should have 1 handler result"); - assert.equal(queued_gc_results[0].status, "error"); + const queued_gc_results = Array.from(queued_grandchild_ref!.event_results.values()) + assert.equal(queued_gc_results.length, 1, 'QueuedGC should have 1 handler result') + assert.equal(queued_gc_results[0].status, 'error') assert.ok( queued_gc_results[0].error instanceof EventHandlerCancelledError, - "QueuedGC handler should be EventHandlerCancelledError (not timeout β€” it never ran)" - ); + 'QueuedGC handler should be EventHandlerCancelledError (not timeout β€” it never ran)' + ) // Verify the cancellation error chain: CancelledError.parent_error β†’ TimeoutError assert.ok( - (queued_gc_results[0].error as EventHandlerCancelledError).parent_error instanceof - EventHandlerTimeoutError, + (queued_gc_results[0].error as EventHandlerCancelledError).parent_error instanceof EventHandlerTimeoutError, "QueuedGC cancellation should reference the child_handler's timeout as parent_error" - ); + ) // ── SiblingEvent: CANCELLED by top_handler_main timeout ───────────── // Same pattern: emitted but never awaited, stays in queue, cancelled when // top_handler_main times out and cancelPendingChildProcessing runs. - assert.ok(sibling_ref, "SiblingEvent should have been emitted"); - assert.equal(sibling_ref!.event_status, "completed"); + assert.ok(sibling_ref, 'SiblingEvent should have been emitted') + assert.equal(sibling_ref!.event_status, 'completed') - const sibling_results = Array.from(sibling_ref!.event_results.values()); - assert.equal(sibling_results.length, 1, "SiblingEvent should have 1 handler result"); - assert.equal(sibling_results[0].status, "error"); - assert.ok( - sibling_results[0].error instanceof EventHandlerCancelledError, - "SiblingEvent handler should be EventHandlerCancelledError" - ); + const sibling_results = Array.from(sibling_ref!.event_results.values()) + assert.equal(sibling_results.length, 1, 'SiblingEvent should have 1 handler result') + assert.equal(sibling_results[0].status, 'error') + assert.ok(sibling_results[0].error instanceof EventHandlerCancelledError, 'SiblingEvent handler should be EventHandlerCancelledError') assert.ok( - (sibling_results[0].error as EventHandlerCancelledError).parent_error instanceof - EventHandlerTimeoutError, + (sibling_results[0].error as EventHandlerCancelledError).parent_error instanceof EventHandlerTimeoutError, "SiblingEvent cancellation should reference top_handler_main's timeout as parent_error" - ); + ) // ── Execution log: verify what ran and what didn't ────────────────── // These handlers started AND completed: - assert.ok(execution_log.includes("top_fast_start"), "top_fast should have started"); - assert.ok(execution_log.includes("top_fast_complete"), "top_fast should have completed"); - assert.ok(execution_log.includes("gc_b_complete"), "gc_b (sync) should have completed"); - assert.ok(execution_log.includes("gc_d_start"), "gc_d should have started"); - assert.ok(execution_log.includes("gc_d_complete"), "gc_d should have completed"); + assert.ok(execution_log.includes('top_fast_start'), 'top_fast should have started') + assert.ok(execution_log.includes('top_fast_complete'), 'top_fast should have completed') + assert.ok(execution_log.includes('gc_b_complete'), 'gc_b (sync) should have completed') + assert.ok(execution_log.includes('gc_d_start'), 'gc_d should have started') + assert.ok(execution_log.includes('gc_d_complete'), 'gc_d should have completed') // These handlers started but were interrupted by their own timeout: - assert.ok(execution_log.includes("gc_a_start"), "gc_a should have started"); - assert.ok(!execution_log.includes("gc_a_end"), "gc_a should NOT have finished (timed out)"); - assert.ok(execution_log.includes("gc_c_start"), "gc_c should have started"); - assert.ok(!execution_log.includes("gc_c_end"), "gc_c should NOT have finished (timed out)"); - assert.ok(execution_log.includes("gc_e_start"), "gc_e should have started"); - assert.ok(!execution_log.includes("gc_e_end"), "gc_e should NOT have finished (timed out)"); + assert.ok(execution_log.includes('gc_a_start'), 'gc_a should have started') + assert.ok(!execution_log.includes('gc_a_end'), 'gc_a should NOT have finished (timed out)') + assert.ok(execution_log.includes('gc_c_start'), 'gc_c should have started') + assert.ok(!execution_log.includes('gc_c_end'), 'gc_c should NOT have finished (timed out)') + assert.ok(execution_log.includes('gc_e_start'), 'gc_e should have started') + assert.ok(!execution_log.includes('gc_e_end'), 'gc_e should NOT have finished (timed out)') // These handlers started and progressed, then parent timeout interrupted: - assert.ok(execution_log.includes("top_main_start"), "top_main should have started"); - assert.ok(execution_log.includes("child_start"), "child should have started"); - assert.ok( - execution_log.includes("child_after_grandchild"), - "child should have continued after grandchild completed" - ); - assert.ok( - execution_log.includes("top_main_after_child"), - "top_main should have continued after child completed" - ); - assert.ok(!execution_log.includes("child_end"), "child should NOT have finished (timed out)"); - assert.ok(!execution_log.includes("top_main_end"), "top_main should NOT have finished (timed out)"); + assert.ok(execution_log.includes('top_main_start'), 'top_main should have started') + assert.ok(execution_log.includes('child_start'), 'child should have started') + assert.ok(execution_log.includes('child_after_grandchild'), 'child should have continued after grandchild completed') + assert.ok(execution_log.includes('top_main_after_child'), 'top_main should have continued after child completed') + assert.ok(!execution_log.includes('child_end'), 'child should NOT have finished (timed out)') + assert.ok(!execution_log.includes('top_main_end'), 'top_main should NOT have finished (timed out)') // These handlers never ran at all (cancelled before starting): - assert.ok(!execution_log.includes("queued_gc_start"), "queued_gc should never have started"); - assert.ok(!execution_log.includes("sibling_start"), "sibling should never have started"); + assert.ok(!execution_log.includes('queued_gc_start'), 'queued_gc should never have started') + assert.ok(!execution_log.includes('sibling_start'), 'sibling should never have started') // ── Parent-child tree structure ───────────────────────────────────── assert.ok( top.event_children.some((c) => c.event_id === child_ref!.event_id), - "ChildEvent should be in TopEvent.event_children" - ); + 'ChildEvent should be in TopEvent.event_children' + ) assert.ok( top.event_children.some((c) => c.event_id === sibling_ref!.event_id), - "SiblingEvent should be in TopEvent.event_children" - ); + 'SiblingEvent should be in TopEvent.event_children' + ) assert.ok( child_ref!.event_children.some((c) => c.event_id === grandchild_ref!.event_id), - "GrandchildEvent should be in ChildEvent.event_children" - ); + 'GrandchildEvent should be in ChildEvent.event_children' + ) assert.ok( child_ref!.event_children.some((c) => c.event_id === queued_grandchild_ref!.event_id), - "QueuedGrandchildEvent should be in ChildEvent.event_children" - ); + 'QueuedGrandchildEvent should be in ChildEvent.event_children' + ) // ── Timing invariants ────────────────────────────────────────────── // All events should have completion timestamps for (const evt of [top, child_ref!, grandchild_ref!, queued_grandchild_ref!, sibling_ref!]) { - assert.ok(evt.event_completed_at, `${evt.event_type} should have event_completed_at`); + assert.ok(evt.event_completed_at, `${evt.event_type} should have event_completed_at`) } // All handler results should have started_at and completed_at for (const result of top_results) { - assert.ok(result.started_at, `${result.handler_name} should have started_at`); - assert.ok(result.completed_at, `${result.handler_name} should have completed_at`); + assert.ok(result.started_at, `${result.handler_name} should have started_at`) + assert.ok(result.completed_at, `${result.handler_name} should have completed_at`) } for (const result of gc_results) { - assert.ok(result.started_at, `${result.handler_name} should have started_at`); - assert.ok(result.completed_at, `${result.handler_name} should have completed_at`); + assert.ok(result.started_at, `${result.handler_name} should have started_at`) + assert.ok(result.completed_at, `${result.handler_name} should have completed_at`) } -}); +}) // ============================================================================= // Verify the timeoutβ†’cancellation error chain is intact at every level. @@ -866,91 +832,82 @@ test("three-level timeout cascade with per-level timeouts and cascading cancella // 2-level chain where each level's cancellation error can be inspected. // ============================================================================= -test("cancellation error chain preserves parent_error references through hierarchy", async () => { - const OuterEvent = BaseEvent.extend("ErrorChainOuter", {}); - const InnerEvent = BaseEvent.extend("ErrorChainInner", {}); - const DeepEvent = BaseEvent.extend("ErrorChainDeep", {}); +test('cancellation error chain preserves parent_error references through hierarchy', async () => { + const OuterEvent = BaseEvent.extend('ErrorChainOuter', {}) + const InnerEvent = BaseEvent.extend('ErrorChainInner', {}) + const DeepEvent = BaseEvent.extend('ErrorChainDeep', {}) - const bus = new EventBus("ErrorChainBus", { - event_concurrency: "bus-serial", - handler_concurrency: "bus-serial" - }); + const bus = new EventBus('ErrorChainBus', { + event_concurrency: 'bus-serial', + handler_concurrency: 'bus-serial', + }) - let inner_ref: InstanceType | null = null; - let deep_ref: InstanceType | null = null; + let inner_ref: InstanceType | null = null + let deep_ref: InstanceType | null = null // DeepEvent handler: sleeps long, will be still pending when inner times out // Because DeepEvent is emitted but NOT awaited, it stays in the queue. const deep_handler = async () => { - await delay(200); - return "deep_done"; - }; + await delay(200) + return 'deep_done' + } // InnerEvent handler: emits DeepEvent (not awaited), then sleeps long β†’ times out const inner_handler = async (event: InstanceType) => { - deep_ref = event.bus?.emit(DeepEvent({ event_timeout: 0.5 }))!; - await delay(200); // interrupted by inner timeout - return "inner_done"; - }; + deep_ref = event.bus?.emit(DeepEvent({ event_timeout: 0.5 }))! + await delay(200) // interrupted by inner timeout + return 'inner_done' + } // OuterEvent handler: emits InnerEvent (awaited), then sleeps long β†’ times out const outer_handler = async (event: InstanceType) => { - inner_ref = event.bus?.emit(InnerEvent({ event_timeout: 0.04 }))!; - await inner_ref.done(); - await delay(200); // interrupted by outer timeout - return "outer_done"; - }; + inner_ref = event.bus?.emit(InnerEvent({ event_timeout: 0.04 }))! + await inner_ref.done() + await delay(200) // interrupted by outer timeout + return 'outer_done' + } - bus.on(OuterEvent, outer_handler); - bus.on(InnerEvent, inner_handler); - bus.on(DeepEvent, deep_handler); + bus.on(OuterEvent, outer_handler) + bus.on(InnerEvent, inner_handler) + bus.on(DeepEvent, deep_handler) - const outer = bus.dispatch(OuterEvent({ event_timeout: 0.15 })); - await outer.done(); - await bus.waitUntilIdle(); + const outer = bus.dispatch(OuterEvent({ event_timeout: 0.15 })) + await outer.done() + await bus.waitUntilIdle() // Outer handler timed out - const outer_result = Array.from(outer.event_results.values())[0]; - assert.equal(outer_result.status, "error"); - assert.ok(outer_result.error instanceof EventHandlerTimeoutError); - const outer_timeout = outer_result.error as EventHandlerTimeoutError; - + const outer_result = Array.from(outer.event_results.values())[0] + assert.equal(outer_result.status, 'error') + assert.ok(outer_result.error instanceof EventHandlerTimeoutError) // Inner handler timed out (its own 40ms timeout, not outer's) - assert.ok(inner_ref); - const inner_result = Array.from(inner_ref!.event_results.values())[0]; - assert.equal(inner_result.status, "error"); - assert.ok(inner_result.error instanceof EventHandlerTimeoutError); - const inner_timeout = inner_result.error as EventHandlerTimeoutError; + assert.ok(inner_ref) + const inner_result = Array.from(inner_ref!.event_results.values())[0] + assert.equal(inner_result.status, 'error') + assert.ok(inner_result.error instanceof EventHandlerTimeoutError) + const inner_timeout = inner_result.error as EventHandlerTimeoutError // Inner's timeout is from InnerEvent's own event_timeout (40ms), // not inherited from outer - assert.ok( - inner_timeout.message.includes("inner_handler"), - "Inner timeout should name inner_handler" - ); + assert.ok(inner_timeout.message.includes('inner_handler'), 'Inner timeout should name inner_handler') // DeepEvent was cancelled when inner_handler timed out. // The cancellation error should reference inner_handler's timeout (not outer's). - assert.ok(deep_ref); - const deep_result = Array.from(deep_ref!.event_results.values())[0]; - assert.equal(deep_result.status, "error"); + assert.ok(deep_ref) + const deep_result = Array.from(deep_ref!.event_results.values())[0] + assert.equal(deep_result.status, 'error') assert.ok( deep_result.error instanceof EventHandlerCancelledError, - "DeepEvent handler should be cancelled, not timed out (it never started)" - ); - const deep_cancel = deep_result.error as EventHandlerCancelledError; - assert.ok( - deep_cancel.parent_error instanceof EventHandlerTimeoutError, - "Cancellation should reference parent timeout" - ); + 'DeepEvent handler should be cancelled, not timed out (it never started)' + ) + const deep_cancel = deep_result.error as EventHandlerCancelledError + assert.ok(deep_cancel.parent_error instanceof EventHandlerTimeoutError, 'Cancellation should reference parent timeout') // The parent_error should be the INNER handler's timeout, because that's // the handler whose cancelPendingChildProcessing actually cancelled DeepEvent. assert.ok( - deep_cancel.parent_error.message.includes("inner_handler") || - deep_cancel.parent_error.message.includes("child_handler"), - "parent_error should reference the handler that directly caused cancellation" - ); -}); + deep_cancel.parent_error.message.includes('inner_handler') || deep_cancel.parent_error.message.includes('child_handler'), + 'parent_error should reference the handler that directly caused cancellation' + ) +}) // ============================================================================= // When a parent has a timeout but a child has event_timeout: null (no timeout), @@ -959,56 +916,56 @@ test("cancellation error chain preserves parent_error references through hierarc // This tests that cancellation works across timeout/no-timeout boundaries. // ============================================================================= -test("parent timeout cancels children that have no timeout of their own", async () => { - const ParentEvent = BaseEvent.extend("TimeoutBoundaryParent", {}); - const NoTimeoutChild = BaseEvent.extend("TimeoutBoundaryChild", {}); +test('parent timeout cancels children that have no timeout of their own', async () => { + const ParentEvent = BaseEvent.extend('TimeoutBoundaryParent', {}) + const NoTimeoutChild = BaseEvent.extend('TimeoutBoundaryChild', {}) - const bus = new EventBus("TimeoutBoundaryBus", { - event_concurrency: "bus-serial", - handler_concurrency: "bus-serial", - event_timeout: null // no bus-level default - }); + const bus = new EventBus('TimeoutBoundaryBus', { + event_concurrency: 'bus-serial', + handler_concurrency: 'bus-serial', + event_timeout: null, // no bus-level default + }) - let child_ref: InstanceType | null = null; - let child_handler_ran = false; + let child_ref: InstanceType | null = null + let child_handler_ran = false // Child handler: would run forever but should be cancelled const child_slow_handler = async () => { - child_handler_ran = true; - await delay(500); - return "child_done"; - }; + child_handler_ran = true + await delay(500) + return 'child_done' + } // Parent handler: emits child (not awaited), then sleeps β†’ parent times out const parent_handler = async (event: InstanceType) => { // event_timeout: null means the child has no timeout of its own. // It would run forever if the parent didn't cancel it. - child_ref = event.bus?.emit(NoTimeoutChild({ event_timeout: null }))!; - await delay(200); - return "parent_done"; - }; + child_ref = event.bus?.emit(NoTimeoutChild({ event_timeout: null }))! + await delay(200) + return 'parent_done' + } - bus.on(ParentEvent, parent_handler); - bus.on(NoTimeoutChild, child_slow_handler); + bus.on(ParentEvent, parent_handler) + bus.on(NoTimeoutChild, child_slow_handler) - const parent = bus.dispatch(ParentEvent({ event_timeout: 0.03 })); - await parent.done(); - await bus.waitUntilIdle(); + const parent = bus.dispatch(ParentEvent({ event_timeout: 0.03 })) + await parent.done() + await bus.waitUntilIdle() // Parent timed out - const parent_result = Array.from(parent.event_results.values())[0]; - assert.equal(parent_result.status, "error"); - assert.ok(parent_result.error instanceof EventHandlerTimeoutError); + const parent_result = Array.from(parent.event_results.values())[0] + assert.equal(parent_result.status, 'error') + assert.ok(parent_result.error instanceof EventHandlerTimeoutError) // Child should exist and be cancelled (it was in the queue, never started) - assert.ok(child_ref, "Child event should have been emitted"); - assert.equal(child_ref!.event_status, "completed"); - assert.equal(child_handler_ran, false, "Child handler should never have started"); + assert.ok(child_ref, 'Child event should have been emitted') + assert.equal(child_ref!.event_status, 'completed') + assert.equal(child_handler_ran, false, 'Child handler should never have started') - const child_results = Array.from(child_ref!.event_results.values()); - assert.equal(child_results.length, 1); + const child_results = Array.from(child_ref!.event_results.values()) + assert.equal(child_results.length, 1) assert.ok( child_results[0].error instanceof EventHandlerCancelledError, - "Child handler should be cancelled by parent timeout, even though it has no timeout" - ); -}); + 'Child handler should be cancelled by parent timeout, even though it has no timeout' + ) +}) diff --git a/bubus-ts/tests/typed_results.test.ts b/bubus-ts/tests/typed_results.test.ts index 36b568b..f498349 100644 --- a/bubus-ts/tests/typed_results.test.ts +++ b/bubus-ts/tests/typed_results.test.ts @@ -1,142 +1,142 @@ -import assert from "node:assert/strict"; -import { test } from "node:test"; +import assert from 'node:assert/strict' +import { test } from 'node:test' -import { z } from "zod"; +import { z } from 'zod' -import { BaseEvent, EventBus } from "../src/index.js"; +import { BaseEvent, EventBus } from '../src/index.js' const typed_result_schema = z.object({ value: z.string(), - count: z.number() -}); + count: z.number(), +}) -const TypedResultEvent = BaseEvent.extend("TypedResultEvent", { +const TypedResultEvent = BaseEvent.extend('TypedResultEvent', { event_result_schema: typed_result_schema, - event_result_type: "TypedResult" -}); + event_result_type: 'TypedResult', +}) -const StringResultEvent = BaseEvent.extend("StringResultEvent", { +const StringResultEvent = BaseEvent.extend('StringResultEvent', { event_result_schema: z.string(), - event_result_type: "string" -}); + event_result_type: 'string', +}) -const NumberResultEvent = BaseEvent.extend("NumberResultEvent", { +const NumberResultEvent = BaseEvent.extend('NumberResultEvent', { event_result_schema: z.number(), - event_result_type: "number" -}); + event_result_type: 'number', +}) -const ComplexResultEvent = BaseEvent.extend("ComplexResultEvent", { +const ComplexResultEvent = BaseEvent.extend('ComplexResultEvent', { event_result_schema: z.object({ items: z.array(z.string()), - metadata: z.record(z.string(), z.number()) - }) -}); + metadata: z.record(z.string(), z.number()), + }), +}) -const NoSchemaEvent = BaseEvent.extend("NoSchemaEvent", {}); +const NoSchemaEvent = BaseEvent.extend('NoSchemaEvent', {}) -test("typed result schema validates and parses handler result", async () => { - const bus = new EventBus("TypedResultBus"); +test('typed result schema validates and parses handler result', async () => { + const bus = new EventBus('TypedResultBus') - bus.on(TypedResultEvent, () => ({ value: "hello", count: 42 })); + bus.on(TypedResultEvent, () => ({ value: 'hello', count: 42 })) - const event = bus.dispatch(TypedResultEvent({})); - await event.done(); + const event = bus.dispatch(TypedResultEvent({})) + await event.done() - const result = Array.from(event.event_results.values())[0]; - assert.equal(result.status, "completed"); - assert.deepEqual(result.result, { value: "hello", count: 42 }); - assert.equal(event.event_result_type, "TypedResult"); -}); + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.deepEqual(result.result, { value: 'hello', count: 42 }) + assert.equal(event.event_result_type, 'TypedResult') +}) -test("built-in result schemas validate handler results", async () => { - const bus = new EventBus("BuiltinResultBus"); +test('built-in result schemas validate handler results', async () => { + const bus = new EventBus('BuiltinResultBus') - bus.on(StringResultEvent, () => "42"); - bus.on(NumberResultEvent, () => 123); + bus.on(StringResultEvent, () => '42') + bus.on(NumberResultEvent, () => 123) - const string_event = bus.dispatch(StringResultEvent({})); - const number_event = bus.dispatch(NumberResultEvent({})); - await string_event.done(); - await number_event.done(); + const string_event = bus.dispatch(StringResultEvent({})) + const number_event = bus.dispatch(NumberResultEvent({})) + await string_event.done() + await number_event.done() - const string_result = Array.from(string_event.event_results.values())[0]; - const number_result = Array.from(number_event.event_results.values())[0]; + const string_result = Array.from(string_event.event_results.values())[0] + const number_result = Array.from(number_event.event_results.values())[0] - assert.equal(string_result.status, "completed"); - assert.equal(string_result.result, "42"); - assert.equal(number_result.status, "completed"); - assert.equal(number_result.result, 123); -}); + assert.equal(string_result.status, 'completed') + assert.equal(string_result.result, '42') + assert.equal(number_result.status, 'completed') + assert.equal(number_result.result, 123) +}) -test("invalid handler result marks error when schema is defined", async () => { - const bus = new EventBus("ResultValidationErrorBus"); +test('invalid handler result marks error when schema is defined', async () => { + const bus = new EventBus('ResultValidationErrorBus') - bus.on(NumberResultEvent, () => "not_a_number"); + bus.on(NumberResultEvent, () => 'not_a_number') - const event = bus.dispatch(NumberResultEvent({})); - await event.done(); + const event = bus.dispatch(NumberResultEvent({})) + await event.done() - const result = Array.from(event.event_results.values())[0]; - assert.equal(result.status, "error"); - assert.ok(result.error instanceof Error); - assert.ok(event.event_errors.length > 0); -}); + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'error') + assert.ok(result.error instanceof Error) + assert.ok(event.event_errors.length > 0) +}) -test("no schema leaves raw handler result untouched", async () => { - const bus = new EventBus("NoSchemaResultBus"); +test('no schema leaves raw handler result untouched', async () => { + const bus = new EventBus('NoSchemaResultBus') - bus.on(NoSchemaEvent, () => ({ raw: true })); + bus.on(NoSchemaEvent, () => ({ raw: true })) - const event = bus.dispatch(NoSchemaEvent({})); - await event.done(); + const event = bus.dispatch(NoSchemaEvent({})) + await event.done() - const result = Array.from(event.event_results.values())[0]; - assert.equal(result.status, "completed"); - assert.deepEqual(result.result, { raw: true }); -}); + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.deepEqual(result.result, { raw: true }) +}) -test("complex result schema validates nested data", async () => { - const bus = new EventBus("ComplexResultBus"); +test('complex result schema validates nested data', async () => { + const bus = new EventBus('ComplexResultBus') bus.on(ComplexResultEvent, () => ({ - items: ["a", "b"], - metadata: { a: 1, b: 2 } - })); + items: ['a', 'b'], + metadata: { a: 1, b: 2 }, + })) - const event = bus.dispatch(ComplexResultEvent({})); - await event.done(); + const event = bus.dispatch(ComplexResultEvent({})) + await event.done() - const result = Array.from(event.event_results.values())[0]; - assert.equal(result.status, "completed"); - assert.deepEqual(result.result, { items: ["a", "b"], metadata: { a: 1, b: 2 } }); -}); + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.deepEqual(result.result, { items: ['a', 'b'], metadata: { a: 1, b: 2 } }) +}) -test("fromJSON converts event_result_schema into zod schema", async () => { - const bus = new EventBus("FromJsonResultBus"); +test('fromJSON converts event_result_schema into zod schema', async () => { + const bus = new EventBus('FromJsonResultBus') const original = TypedResultEvent({ event_result_schema: typed_result_schema, - event_result_type: "TypedResult" - }); - const json = original.toJSON(); + event_result_type: 'TypedResult', + }) + const json = original.toJSON() - const restored = TypedResultEvent.fromJSON?.(json) ?? TypedResultEvent(json as never); + const restored = TypedResultEvent.fromJSON?.(json) ?? TypedResultEvent(json as never) - assert.ok(restored.event_result_schema); - assert.equal(typeof (restored.event_result_schema as { safeParse?: unknown }).safeParse, "function"); + assert.ok(restored.event_result_schema) + assert.equal(typeof (restored.event_result_schema as { safeParse?: unknown }).safeParse, 'function') - bus.on(TypedResultEvent, () => ({ value: "from-json", count: 7 })); + bus.on(TypedResultEvent, () => ({ value: 'from-json', count: 7 })) - const dispatched = bus.dispatch(restored); - await dispatched.done(); + const dispatched = bus.dispatch(restored) + await dispatched.done() - const result = Array.from(dispatched.event_results.values())[0]; - assert.equal(result.status, "completed"); - assert.deepEqual(result.result, { value: "from-json", count: 7 }); -}); + const result = Array.from(dispatched.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.deepEqual(result.result, { value: 'from-json', count: 7 }) +}) -test("roundtrip preserves complex result schema types", async () => { - const bus = new EventBus("RoundtripSchemaBus"); +test('roundtrip preserves complex result schema types', async () => { + const bus = new EventBus('RoundtripSchemaBus') const complex_schema = z.object({ title: z.string(), @@ -145,51 +145,49 @@ test("roundtrip preserves complex result schema types", async () => { active: z.boolean(), meta: z.object({ tags: z.array(z.string()), - rating: z.number() - }) - }); + rating: z.number(), + }), + }) - const ComplexRoundtripEvent = BaseEvent.extend("ComplexRoundtripEvent", { + const ComplexRoundtripEvent = BaseEvent.extend('ComplexRoundtripEvent', { event_result_schema: complex_schema, - event_result_type: "ComplexRoundtrip" - }); + event_result_type: 'ComplexRoundtrip', + }) const original = ComplexRoundtripEvent({ event_result_schema: complex_schema, - event_result_type: "ComplexRoundtrip" - }); + event_result_type: 'ComplexRoundtrip', + }) - const roundtripped = - ComplexRoundtripEvent.fromJSON?.(original.toJSON()) ?? - ComplexRoundtripEvent(original.toJSON() as never); + const roundtripped = ComplexRoundtripEvent.fromJSON?.(original.toJSON()) ?? ComplexRoundtripEvent(original.toJSON() as never) const zod_any = z as unknown as { - toJSONSchema?: (schema: unknown) => unknown; - }; - if (typeof zod_any.toJSONSchema === "function") { - const original_schema_json = zod_any.toJSONSchema(complex_schema); - const roundtrip_schema_json = zod_any.toJSONSchema(roundtripped.event_result_schema); - assert.deepEqual(roundtrip_schema_json, original_schema_json); + toJSONSchema?: (schema: unknown) => unknown + } + if (typeof zod_any.toJSONSchema === 'function') { + const original_schema_json = zod_any.toJSONSchema(complex_schema) + const roundtrip_schema_json = zod_any.toJSONSchema(roundtripped.event_result_schema) + assert.deepEqual(roundtrip_schema_json, original_schema_json) } bus.on(ComplexRoundtripEvent, () => ({ - title: "ok", + title: 'ok', count: 3, flags: [true, false, true], active: false, - meta: { tags: ["a", "b"], rating: 4 } - })); + meta: { tags: ['a', 'b'], rating: 4 }, + })) - const dispatched = bus.dispatch(roundtripped); - await dispatched.done(); + const dispatched = bus.dispatch(roundtripped) + await dispatched.done() - const result = Array.from(dispatched.event_results.values())[0]; - assert.equal(result.status, "completed"); + const result = Array.from(dispatched.event_results.values())[0] + assert.equal(result.status, 'completed') assert.deepEqual(result.result, { - title: "ok", + title: 'ok', count: 3, flags: [true, false, true], active: false, - meta: { tags: ["a", "b"], rating: 4 } - }); -}); + meta: { tags: ['a', 'b'], rating: 4 }, + }) +}) diff --git a/bubus-ts/tsconfig.json b/bubus-ts/tsconfig.json index 9071125..f653c22 100644 --- a/bubus-ts/tsconfig.json +++ b/bubus-ts/tsconfig.json @@ -1,7 +1,7 @@ { "compilerOptions": { "target": "ES2022", - "lib": ["ES2022", "DOM"], + "lib": ["ES2024", "DOM"], "module": "ESNext", "moduleResolution": "Bundler", "strict": true, From 82a346e74ee61fb793136ec3e3561fa18bd82793 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Fri, 6 Feb 2026 01:53:31 -0800 Subject: [PATCH 50/79] completed lock system refactor --- bubus-ts/TODOS.txt | 296 -------------- bubus-ts/src/base_event.ts | 17 +- bubus-ts/src/event_bus.ts | 386 ++++++++---------- bubus-ts/src/event_result.ts | 55 +-- bubus-ts/src/index.ts | 4 +- bubus-ts/src/lock_manager.ts | 355 ++++++++++++++++ bubus-ts/src/semaphores.ts | 91 ----- bubus-ts/src/types.ts | 2 +- bubus-ts/tests/comprehensive_patterns.test.ts | 85 ++-- bubus-ts/tests/eventbus_basics.test.ts | 77 +++- bubus-ts/tests/locking.test.ts | 69 +++- bubus-ts/tests/performance.test.ts | 18 +- bubus-ts/tests/timeout.test.ts | 272 +++++++++++- 13 files changed, 1013 insertions(+), 714 deletions(-) delete mode 100644 bubus-ts/TODOS.txt create mode 100644 bubus-ts/src/lock_manager.ts delete mode 100644 bubus-ts/src/semaphores.ts diff --git a/bubus-ts/TODOS.txt b/bubus-ts/TODOS.txt deleted file mode 100644 index 2e166b1..0000000 --- a/bubus-ts/TODOS.txt +++ /dev/null @@ -1,296 +0,0 @@ -Coordination Refactoring Plan -============================= -(Updated after timeout/limiter fixes and data-model cleanup landed) - -Code Quality Goals ------------------- -- Minimum unique abstractions -- Minimum fields that are directly mutated -- Fewest flags, state variables, stacks, sets, counters, callbacks, and signals -- Unified interfaces for concurrency decisions at handler, event, and bus level -- All locking-related logic in semaphores.ts and gates.ts, encapsulated from main flow -- Derive everything possible from event_results, event, bus.event_history, bus.handlers - (use getters rather than adding state variables) -- Clear, descriptive naming - -Files touched -------------- -- NEW: `src/gates.ts` -- EDIT: `src/semaphores.ts` (add HandlerLimiterLease) -- EDIT: `src/event_result.ts` -- EDIT: `src/base_event.ts` -- EDIT: `src/event_bus.ts` -- EDIT: `tests/comprehensive_patterns.test.ts` -- EDIT: `tests/timeout.test.ts` (limiter leak regression tests) - -No new exports from `index.ts` (all helpers remain internal). - -================================================================================ -COMPLETED -================================================================================ - -Done: Deferred migration - - `Deferred` type + `withResolvers()` live in `semaphores.ts` - - BaseEvent uses `_done: Deferred | null` - - `ensureDonePromise()` uses `withResolvers()` - - No separate `src/deferred.ts` needed - -Done: Limiter leak mitigation (partial) - - `runHandlerEntry` manually manages limiter acquire/release (no `runWithLimiter`) - - `handler_still_owns_limiter` check prevents double-release when limiter was yielded - - `_runImmediately` guards reacquire with `effective_result.status === "started"` - - IMPORTANT: race still exists and is NOT fully fixed yet - - window: `_runImmediately` checks `status === "started"` and then awaits - `limiter.acquire()`; handler may finish during that await, causing a leaked - reacquire token - - do not treat this as solved until Step 1 gate-based lease state machine is implemented - -Done: BaseEvent data-model cleanup - - Removed `event_created_at_ms` (redundant; use `Date.parse(event_created_at)`) - - Removed mutable `event_errors: unknown[]` array and `markFailed()` method - - `event_errors` is now a getter derived from `event_results` - - `event_children` is now a getter derived from `event_results` - -Done: Handler map consolidation - - `handlers_by_key` + `handlers_by_id` β†’ single `handlers: Map` - - `collectHandlers` uses two-pass ordering (exact-match first, wildcards second) - -Done: Event history consolidation - - `event_history: BaseEvent[]` + `event_history_by_id` β†’ single `event_history: Map` - -================================================================================ -REMAINING WORK -================================================================================ - -================================================================================ -0. Define gate namespaces and ownership boundaries (required design step) -================================================================================ - -Adopt a single naming model: -- `event_result.gate.*` for handler-execution transitions -- `event.gate.*` for event lifecycle transitions -- `event_bus.gate.*` for bus-level queue/idle/limiter coordination - -Required method names (locked): -- `event_result.gate`: - - `enterHandlerRun` - - `yieldPermitForChildRun` - - `reclaimPermitIfRunning` - - `exitHandlerRun` - - `getExecutionState` - - `runQueueJump` -- `event.gate`: - - `enterEventStarted` - - `completeIfDoneHandling` - - `waitForCompletion` - - `cancelPendingDescendants` -- `event_bus.gate`: - - `requestPause` - - `waitUntilResumed` - - `isPaused` - - `waitForIdle` - - `notifyIdleListeners` - - `getLimiterForEvent` - - `getLimiterForHandler` - -Important: do not re-introduce scattered fields like -`_runloop_hold_release`, `queue_jump_hold`, `_held_handler_limiter`, -`idle_waiters`, `idle_check_pending`, `idle_check_streak`, -`immediate_processing_stack_depth`, `immediate_processing_waiters`. - -================================================================================ -1. Implement `event_result.gate` and race-safe limiter ownership (required correctness work) -================================================================================ - -The current manual tracking (`handler_still_owns_limiter` + `status === "started"`) -still has a race and can leak permits. This is mandatory to fix first. - -Implementation shape: -- Add internal lease state machine in `src/semaphores.ts` - (or in `src/gates.ts` if colocated with other gate internals): - - state: `"held" | "yielded" | "closed"` - - race-safe reacquire behavior: if state becomes closed while awaiting acquire, - immediately release to avoid leaking a permit. -- `event_result.gate.enterHandlerRun(limiter)` claims execution ownership -- `event_result.gate.yieldPermitForChildRun()` releases permit only when currently held -- `event_result.gate.reclaimPermitIfRunning()` reacquires only when still running -- `event_result.gate.exitHandlerRun()` idempotently closes and releases if held -- `event_result.gate.runQueueJump(fn)` wraps yield β†’ run β†’ reclaim as one transition API -- `event_result.gate.getExecutionState()` is read-only debug/inspection - -Storage rule: -- keep gate-private mutable state off public EventResult fields -- use private state managed by gate internals (closure/private class/WeakMap) - -================================================================================ -2. Implement flat `event_bus.gate.*` coordination methods -================================================================================ - -Create `src/gates.ts` and move bus coordination internals behind `event_bus.gate`. - -`event_bus.gate.requestPause()`: -- increments pause depth -- returns idempotent release closure - -`event_bus.gate.waitUntilResumed()`: -- fast path if pause depth is 0 -- otherwise await waiter queue - -`event_bus.gate.isPaused()`: -- true while pause depth > 0 - -`event_bus.gate.waitForIdle()` + `event_bus.gate.notifyIdleListeners()`: -- encapsulate the existing two-snapshot idle confirmation pattern -- keep idle check scheduling private to the gate - -`event_bus.gate.getLimiterForEvent(event)` and -`event_bus.gate.getLimiterForHandler(event, options)`: -- move effective limiter resolution behind gate accessor methods -- preserve current precedence behavior - -Storage rule: -- bus coordination state (pause depth/waiters, idle waiters/check flags) is private to gate - -================================================================================ -3. Wire `event_bus.gate` into EventBus call sites -================================================================================ - -`src/event_bus.ts` call-site migration: -- `_runImmediately()`: - - replace direct queue-jump flag/field mutation with `result.gate.runQueueJump(...)` - - pause via `bus.gate.requestPause()` on each participating bus -- `runImmediatelyAcrossBuses()`: - - use `requestPause()` releases in `finally` -- `runloop()`: - - `if (this.gate.isPaused()) await this.gate.waitUntilResumed()` -- `waitUntilIdle()`: - - delegate to `this.gate.waitForIdle()` -- `scheduleEventProcessing().finally` and runloop exit path: - - call `this.gate.notifyIdleListeners()` -- `resolveEventLimiter` and `resolveHandlerLimiter`: - - fold into `this.gate.getLimiterForEvent(...)` and - `this.gate.getLimiterForHandler(...)` - -After migration remove old EventBus fields/methods: -- `idle_waiters`, `idle_check_pending`, `idle_check_streak` -- `immediate_processing_stack_depth`, `immediate_processing_waiters` -- `scheduleIdleCheck`, `resolveIdleWaitersIfDone`, - `releaseImmediateProcessingWaiters`, - `resolveEventLimiter`, `resolveHandlerLimiter` - -================================================================================ -4. Add `event.gate.*` lifecycle facade -================================================================================ - -In `src/base_event.ts`: -- add `event.gate.enterEventStarted()` as lifecycle transition wrapper -- add `event.gate.completeIfDoneHandling()` wrapper for completion checks -- add `event.gate.waitForCompletion()` wrapper around done promise -- add `event.gate.cancelPendingDescendants(reason)` lifecycle entry point - -Keep these as normal top-level getters on event (not gate methods): -- `event.event_children` (derived getter) -- `event.event_errors` (derived getter) - -Migration call sites: -- EventBus `processEvent()` uses `event.gate.enterEventStarted()` and - `event.gate.completeIfDoneHandling()` -- completion and parent notification paths use gate wrappers -- timeout cancellation paths call `event.gate.cancelPendingDescendants(reason)` - -================================================================================ -5. Tests and invariants update -================================================================================ - -Update queue-jump depth tests away from direct field access: -- remove assertions tied to `immediate_processing_stack_depth` -- assert pause semantics via `bus.gate.isPaused()` at equivalent checkpoints - -Add explicit limiter-race regressions: -- timeout during awaited `child.done()` does not leak permit -- next event still runs on same bus after timeout path -- nested queue-jump under timeout/cancellation remains permit-safe - -Keep/expand coverage for: -- cross-bus queue-jump ordering -- idle wait semantics -- forwarding + `event.bus` scoped behavior - -================================================================================ -6. Verification -================================================================================ - -Focused first: -- `node --expose-gc --test --import tsx tests/locking.test.ts` -- `node --expose-gc --test --import tsx tests/comprehensive_patterns.test.ts` -- `node --expose-gc --test --import tsx tests/timeout.test.ts` -- `node --expose-gc --test --import tsx tests/event_bus_proxy.test.ts` -- `node --expose-gc --test --import tsx tests/forwarding.test.ts` - -Then full suite: -- `pnpm test` - -================================================================================ -7. Implementation sequence (execution order) -================================================================================ - -1) Add gate surfaces first (no behavior change): -- Add `gate` accessors on EventBus/EventResult/BaseEvent. -- Keep internals on current logic temporarily so call sites can migrate safely. - -2) Implement `event_result.gate` with private execution state: -- Move permit ownership to gate-private state (`held/yielded/closed`). -- Route `_runImmediately` + `runHandlerEntry` permit transitions through: - `enterHandlerRun`, `yieldPermitForChildRun`, `reclaimPermitIfRunning`, - `exitHandlerRun`, `runQueueJump`. - -3) Migrate runloop pause to `event_bus.gate`: -- Replace queue-jump pause/depth fields with `requestPause`, - `waitUntilResumed`, `isPaused`. -- Keep release callbacks gate-internal; no public flag fields on EventResult. - -4) Migrate idle waiting to `event_bus.gate`: -- Replace idle waiters/check flags + scheduling methods with: - `waitForIdle`, `notifyIdleListeners`. -- Preserve two-snapshot confirmation semantics. - -5) Move limiter resolution behind `event_bus.gate`: -- Replace direct resolver call sites with: - `getLimiterForEvent`, `getLimiterForHandler`. -- Keep existing concurrency precedence behavior unchanged. - -6) Add `event.gate` lifecycle wrappers and switch call sites: -- Use `enterEventStarted`, `completeIfDoneHandling`, `waitForCompletion`, - `cancelPendingDescendants`. -- Keep `event.event_children` + `event.event_errors` as non-gate getters. - -7) Remove old scattered fields/methods: -- Delete queue-jump/idle/permit legacy fields and helper methods only after - all call sites use gates. - -8) Update tests in two passes: -- First migrate assertions from raw internal fields to gate semantics. -- Then add explicit limiter-race regressions (timeout + queue-jump leak checks). - -9) Verify after each phase: -- Run focused suites after each migration phase. -- Run full `pnpm test` after legacy field/method removal lands. - -================================================================================ -Net effect -================================================================================ - -API shape becomes explicit and namespaced: -- `event_result.gate.*` owns handler execution/permit transitions -- `event.gate.*` owns lifecycle transitions -- `event_bus.gate.*` owns runloop pause, idle waiting, and limiter resolution - -State ownership becomes centralized: -- no scattered coordination flags on EventResult/EventBus -- private mutable coordination state lives inside gate internals - -Correctness target after Step 1: -- impossible to double-release or leak handler permits on timeout + queue-jump races - -No new public package exports required: -- gate internals remain project-internal (`src/gates.ts`, `src/semaphores.ts`) diff --git a/bubus-ts/src/base_event.ts b/bubus-ts/src/base_event.ts index cb2d2a0..cb292d3 100644 --- a/bubus-ts/src/base_event.ts +++ b/bubus-ts/src/base_event.ts @@ -3,8 +3,8 @@ import { v7 as uuidv7 } from 'uuid' import type { EventBus } from './event_bus.js' import { EventResult } from './event_result.js' -import type { ConcurrencyMode, Deferred } from './semaphores.js' -import { CONCURRENCY_MODES, withResolvers } from './semaphores.js' +import type { ConcurrencyMode, Deferred } from './lock_manager.js' +import { CONCURRENCY_MODES, withResolvers } from './lock_manager.js' export const BaseEventSchema = z .object({ @@ -78,6 +78,9 @@ export class BaseEvent { event_status!: 'pending' | 'started' | 'completed' event_started_at?: string event_completed_at?: string + _event_created_at_ts!: number + _event_started_at_ts?: number + _event_completed_at_ts?: number bus?: EventBus event_concurrency?: ConcurrencyMode handler_concurrency?: ConcurrencyMode @@ -123,6 +126,7 @@ export class BaseEvent { this.event_result_schema = event_result_schema this.event_result_type = event_result_type this.event_results = new Map() + this._event_created_at_ts = monotonicNowMs() this._done = null this._dispatch_context = undefined @@ -255,6 +259,7 @@ export class BaseEvent { } this.event_status = 'started' this.event_started_at = BaseEvent.nextIsoTimestamp() + this._event_started_at_ts = monotonicNowMs() } markCompleted(): void { @@ -263,6 +268,7 @@ export class BaseEvent { } this.event_status = 'completed' this.event_completed_at = BaseEvent.nextIsoTimestamp() + this._event_completed_at_ts = monotonicNowMs() this._dispatch_context = null this.ensureDonePromise() this._done!.resolve(this) @@ -353,3 +359,10 @@ const to_json_schema = (schema: unknown): unknown => { } return undefined } + +const monotonicNowMs = (): number => { + if (typeof performance !== 'undefined' && typeof performance.now === 'function') { + return performance.now() + } + return Date.now() +} diff --git a/bubus-ts/src/event_bus.ts b/bubus-ts/src/event_bus.ts index c38ff57..bfe00fa 100644 --- a/bubus-ts/src/event_bus.ts +++ b/bubus-ts/src/event_bus.ts @@ -2,7 +2,14 @@ import { BaseEvent } from './base_event.js' import { EventResult } from './event_result.js' import { captureAsyncContext, runWithAsyncContext } from './async_context.js' import { v5 as uuidv5 } from 'uuid' -import { AsyncLimiter, type ConcurrencyMode, limiterForMode, resolveConcurrencyMode, runWithLimiter, withResolvers } from './semaphores.js' +import { AsyncSemaphore, type ConcurrencyMode, HandlerLock, LockManager, runWithSemaphore, withResolvers } from './lock_manager.js' + +const monotonicNowMs = (): number => { + if (typeof performance !== 'undefined' && typeof performance.now === 'function') { + return performance.now() + } + return Date.now() +} export class EventHandlerTimeoutError extends Error { event_type: string @@ -32,6 +39,22 @@ export class EventHandlerCancelledError extends Error { } } +export class EventHandlerAbortedError extends Error { + event_type: string + handler_name: string + parent_error: Error + event_result: EventResult + + constructor(message: string, params: { event_type: string; handler_name: string; parent_error: Error; event_result: EventResult }) { + super(message) + this.name = 'EventHandlerAbortedError' + this.event_type = params.event_type + this.handler_name = params.handler_name + this.parent_error = params.parent_error + this.event_result = params.event_result + } +} + import type { EventHandler, EventKey, FindOptions, HandlerOptions } from './types.js' type FindWaiter = { @@ -106,8 +129,8 @@ class EventBusInstanceRegistry { export class EventBus { static instances = new EventBusInstanceRegistry() - static global_event_limiter = new AsyncLimiter(1) - static global_handler_limiter = new AsyncLimiter(1) + static global_event_semaphore = new AsyncSemaphore(1) + static global_handler_semaphore = new AsyncSemaphore(1) static findEventById(event_id: string): BaseEvent | null { for (const bus of EventBus.instances) { const event = bus.event_history.get(event_id) @@ -123,30 +146,16 @@ export class EventBus { event_concurrency_default: ConcurrencyMode handler_concurrency_default: ConcurrencyMode event_timeout_default: number | null - bus_event_limiter: AsyncLimiter - bus_handler_limiter: AsyncLimiter + bus_event_semaphore: AsyncSemaphore + bus_handler_semaphore: AsyncSemaphore handlers: Map event_history: Map pending_event_queue: BaseEvent[] in_flight_event_ids: Set runloop_running: boolean - // Resolves for callers of waitUntilIdle(); only drained when idle is confirmed twice. - idle_waiters: Array<() => void> - // True while an idle check timeout is scheduled. - idle_check_pending: boolean - // Number of consecutive idle snapshots seen; must reach 2 to resolve waiters. - idle_check_streak: number + locks: LockManager // Pending find() callers waiting for a matching future event. find_waiters: Set - // Depth counter for "immediate processing" (queue-jump) inside handlers. - // While > 0, the runloop pauses to avoid processing unrelated events. - immediate_processing_stack_depth: number - // Runloop waiters that resume once immediate_processing_stack_depth returns to 0. - immediate_processing_waiters: Array<() => void> - // Stack of EventResults for handlers currently executing on this bus. - // Enables per-bus isInsideHandler() and gives _runImmediately access to the - // calling handler's result even when called on raw (non-proxied) events. - _event_result_stack: EventResult[] constructor(name: string = 'EventBus', options: EventBusOptions = {}) { this.name = name @@ -154,20 +163,24 @@ export class EventBus { this.event_concurrency_default = options.event_concurrency ?? 'bus-serial' this.handler_concurrency_default = options.handler_concurrency ?? 'bus-serial' this.event_timeout_default = options.event_timeout === undefined ? 60 : options.event_timeout - this.bus_event_limiter = new AsyncLimiter(1) - this.bus_handler_limiter = new AsyncLimiter(1) + this.bus_event_semaphore = new AsyncSemaphore(1) + this.bus_handler_semaphore = new AsyncSemaphore(1) this.handlers = new Map() this.event_history = new Map() this.pending_event_queue = [] this.in_flight_event_ids = new Set() this.runloop_running = false - this.idle_waiters = [] - this.idle_check_pending = false - this.idle_check_streak = 0 + this.locks = new LockManager({ + get_idle_snapshot: () => + this.pending_event_queue.length === 0 && this.in_flight_event_ids.size === 0 && !this.hasPendingResults() && !this.runloop_running, + get_event_concurrency_default: () => this.event_concurrency_default, + get_handler_concurrency_default: () => this.handler_concurrency_default, + get_bus_event_semaphore: () => this.bus_event_semaphore, + get_bus_handler_semaphore: () => this.bus_handler_semaphore, + get_global_event_semaphore: () => EventBus.global_event_semaphore, + get_global_handler_semaphore: () => EventBus.global_handler_semaphore, + }) this.find_waiters = new Set() - this.immediate_processing_stack_depth = 0 - this.immediate_processing_waiters = [] - this._event_result_stack = [] EventBus.instances.add(this) @@ -185,9 +198,7 @@ export class EventBus { this.pending_event_queue.length = 0 this.in_flight_event_ids.clear() this.find_waiters.clear() - this.idle_waiters.length = 0 - this.immediate_processing_waiters.length = 0 - this._event_result_stack.length = 0 + this.locks.clear() } on(event_key: EventKey | '*', handler: EventHandler, options: HandlerOptions = {}): void { @@ -366,10 +377,10 @@ export class EventBus { // Called when a handler does `await child.done()` β€” processes the child event // immediately ("queue-jump") instead of waiting for the runloop to pick it up. // - // Yield-and-reacquire: if the calling handler holds a handler concurrency limiter, + // Yield-and-reacquire: if the calling handler holds a handler concurrency semaphore, // we temporarily release it so child handlers on the same bus can acquire it // (preventing deadlock for bus-serial/global-serial modes). We re-acquire after - // the child completes so the parent handler can continue with the limiter held. + // the child completes so the parent handler can continue with the semaphore held. async _runImmediately(event: T, handler_result?: EventResult): Promise { const original_event = event._original_event ?? event // Find the parent handler's result: prefer the proxy-provided one (only if @@ -379,7 +390,7 @@ export class EventBus { const proxy_result = handler_result?.status === 'started' ? handler_result : undefined const effective_result = proxy_result ?? - this._event_result_stack[this._event_result_stack.length - 1] ?? + this.locks.getCurrentHandlerResult() ?? this._findInFlightAncestorResult(original_event) ?? undefined if (!effective_result) { @@ -387,24 +398,15 @@ export class EventBus { await original_event.waitForCompletion() return event } - if (!effective_result.queue_jump_hold) { - effective_result.queue_jump_hold = true - this.immediate_processing_stack_depth += 1 - } + this.locks.ensureQueueJumpPauseForResult(effective_result) if (original_event.event_status === 'completed') { return event } - // Yield the parent handler's limiter so child handlers can use it. - // Null out _held_handler_limiter so concurrent calls from the same handler - // (e.g. Promise.all([child1.done(), child2.done()])) don't double-release. - const limiter_to_yield = effective_result?._held_handler_limiter ?? null - if (limiter_to_yield) { - effective_result!._held_handler_limiter = null - limiter_to_yield.release() - } - - try { + const run_queue_jump = effective_result._lock + ? (fn: () => Promise) => effective_result._lock!.runQueueJump(fn) + : (fn: () => Promise) => fn() + return await run_queue_jump(async () => { if (original_event.event_status === 'started') { await this.runImmediatelyAcrossBuses(original_event) return event @@ -417,67 +419,11 @@ export class EventBus { await this.runImmediatelyAcrossBuses(original_event) return event - } finally { - // Re-acquire the parent handler's limiter before returning control. - // Only the call that actually released it will re-acquire. - // If the handler timed out while we were processing children, - // runHandlerEntry's finally has already run and the limiter is no longer - // needed β€” skip re-acquire to avoid leaking the limiter. - if (limiter_to_yield && effective_result!.status === 'started') { - await limiter_to_yield.acquire() - effective_result!._held_handler_limiter = limiter_to_yield - } - } - } - - async waitUntilIdle(): Promise { - if (this.isIdleSnapshot()) { - return - } - return new Promise((resolve) => { - this.idle_waiters.push(resolve) - this.scheduleIdleCheck() }) } - private scheduleIdleCheck(): void { - if (this.idle_check_pending) { - return - } - this.idle_check_pending = true - setTimeout(() => { - this.idle_check_pending = false - this.resolveIdleWaitersIfDone() - }, 0) - } - - private isIdleSnapshot(): boolean { - return ( - this.pending_event_queue.length === 0 && this.in_flight_event_ids.size === 0 && !this.hasPendingResults() && !this.runloop_running - ) - } - - private resolveIdleWaitersIfDone(): void { - if (!this.isIdleSnapshot()) { - this.idle_check_streak = 0 - if (this.idle_waiters.length > 0) { - this.scheduleIdleCheck() - } - return - } - this.idle_check_streak += 1 - if (this.idle_check_streak < 2) { - if (this.idle_waiters.length > 0) { - this.scheduleIdleCheck() - } - return - } - this.idle_check_streak = 0 - const idle_waiters = this.idle_waiters - this.idle_waiters = [] - for (const resolve of idle_waiters) { - resolve() - } + async waitUntilIdle(): Promise { + await this.locks.waitForIdle() } private hasPendingResults(): boolean { @@ -585,9 +531,9 @@ export class EventBus { // Per-bus check: true only if this specific bus has a handler on its stack. // For cross-bus queue-jumping, _runImmediately uses _findInFlightAncestorResult() // to walk up the parent event tree, and the bus proxy passes handler_result - // to _runImmediately so it can yield/reacquire the correct limiter. + // to _runImmediately so it can yield/reacquire the correct semaphore. isInsideHandler(): boolean { - return this._event_result_stack.length > 0 + return this.locks.isInsideHandlerContext() } // Walk up the parent event chain to find an in-flight ancestor handler result. @@ -609,16 +555,16 @@ export class EventBus { } // Processes a queue-jumped event across all buses that have it dispatched. - // Called from _runImmediately after the parent handler's limiter has been yielded. + // Called from _runImmediately after the parent handler's semaphore has been yielded. // - // Event limiter bypass: the initiating bus (this) always bypasses its event limiter + // Event semaphore bypass: the initiating bus (this) always bypasses its event semaphore // since we're inside a handler that already holds it. Other buses only bypass if - // they resolve to the same limiter instance (i.e. global-serial mode where all - // buses share EventBus.global_event_limiter). + // they resolve to the same semaphore instance (i.e. global-serial mode where all + // buses share EventBus.global_event_semaphore). // - // Handler limiters are NOT bypassed β€” child handlers must acquire the handler - // limiter normally. This works because _runImmediately already released the - // parent's handler limiter via yield-and-reacquire. + // Handler semaphores are NOT bypassed β€” child handlers must acquire the handler + // semaphore normally. This works because _runImmediately already released the + // parent's handler semaphore via yield-and-reacquire. private async runImmediatelyAcrossBuses(event: BaseEvent): Promise { const buses = this.getBusesForImmediateRun(event) if (buses.length === 0) { @@ -626,13 +572,11 @@ export class EventBus { return } - for (const bus of buses) { - bus.immediate_processing_stack_depth += 1 - } + const pause_releases = buses.map((bus) => bus.locks.requestPause()) - // Determine which event limiter the initiating bus resolves to, so we can + // Determine which event semaphore the initiating bus resolves to, so we can // detect when other buses share the same instance (global-serial). - const initiating_event_limiter = this.resolveEventLimiter(event) + const initiating_event_semaphore = this.locks.getSemaphoreForEvent(event) try { for (const bus of buses) { @@ -648,15 +592,15 @@ export class EventBus { } bus.in_flight_event_ids.add(event.event_id) - // Bypass event limiter on the initiating bus (we're already inside a handler + // Bypass event semaphore on the initiating bus (we're already inside a handler // that acquired it). For other buses, only bypass if they resolve to the same - // limiter instance (global-serial shares one limiter across all buses). - const bus_event_limiter = bus.resolveEventLimiter(event) - const should_bypass_event_limiter = - bus === this || (initiating_event_limiter !== null && bus_event_limiter === initiating_event_limiter) + // semaphore instance (global-serial shares one semaphore across all buses). + const bus_event_semaphore = bus.locks.getSemaphoreForEvent(event) + const should_bypass_event_semaphore = + bus === this || (initiating_event_semaphore !== null && bus_event_semaphore === initiating_event_semaphore) await bus.scheduleEventProcessing(event, { - bypass_event_limiters: should_bypass_event_limiter, + bypass_event_semaphores: should_bypass_event_semaphore, }) } @@ -664,9 +608,8 @@ export class EventBus { await event.waitForCompletion() } } finally { - for (const bus of buses) { - bus.immediate_processing_stack_depth = Math.max(0, bus.immediate_processing_stack_depth - 1) - bus.releaseImmediateProcessingWaiters() + for (const release of pause_releases) { + release() } } } @@ -701,24 +644,6 @@ export class EventBus { return ordered } - private releaseImmediateProcessingWaiters(): void { - if (this.immediate_processing_stack_depth !== 0 || this.immediate_processing_waiters.length === 0) { - return - } - const waiters = this.immediate_processing_waiters - this.immediate_processing_waiters = [] - for (const resolve of waiters) { - try { - // Each waiter is a Promise resolver created by runloop() while it was paused. - // Resolving it resumes that runloop tick so it can continue draining the queue. - resolve() - } catch (error) { - // Should never happen: these are internal Promise resolve callbacks. - console.error('[bubus] immediate processing waiter threw', error) - } - } - } - private startRunloop(): void { if (this.runloop_running) { return @@ -732,26 +657,26 @@ export class EventBus { private async scheduleEventProcessing( event: BaseEvent, options: { - bypass_event_limiters?: boolean - pre_acquired_limiter?: AsyncLimiter | null + bypass_event_semaphores?: boolean + pre_acquired_semaphore?: AsyncSemaphore | null } = {} ): Promise { try { - const limiter = options.bypass_event_limiters ? null : this.resolveEventLimiter(event) - const pre_acquired_limiter = options.pre_acquired_limiter ?? null - if (pre_acquired_limiter) { + const semaphore = options.bypass_event_semaphores ? null : this.locks.getSemaphoreForEvent(event) + const pre_acquired_semaphore = options.pre_acquired_semaphore ?? null + if (pre_acquired_semaphore) { await this.processEvent(event) } else { - await runWithLimiter(limiter, async () => { + await runWithSemaphore(semaphore, async () => { await this.processEvent(event) }) } } finally { - if (options.pre_acquired_limiter) { - options.pre_acquired_limiter.release() + if (options.pre_acquired_semaphore) { + options.pre_acquired_semaphore.release() } this.in_flight_event_ids.delete(event.event_id) - this.resolveIdleWaitersIfDone() + this.locks.notifyIdleListeners() } } @@ -759,10 +684,8 @@ export class EventBus { for (;;) { while (this.pending_event_queue.length > 0) { await Promise.resolve() - if (this.immediate_processing_stack_depth > 0) { - await new Promise((resolve) => { - this.immediate_processing_waiters.push(resolve) - }) + if (this.locks.isPaused()) { + await this.locks.waitUntilResumed() continue } const next_event = this.pending_event_queue[0] @@ -774,23 +697,23 @@ export class EventBus { this.pending_event_queue.shift() continue } - let pre_acquired_limiter: AsyncLimiter | null = null - const event_limiter = this.resolveEventLimiter(original_event) - if (event_limiter) { - await event_limiter.acquire() - pre_acquired_limiter = event_limiter + let pre_acquired_semaphore: AsyncSemaphore | null = null + const event_semaphore = this.locks.getSemaphoreForEvent(original_event) + if (event_semaphore) { + await event_semaphore.acquire() + pre_acquired_semaphore = event_semaphore } this.pending_event_queue.shift() if (this.in_flight_event_ids.has(original_event.event_id)) { - if (pre_acquired_limiter) { - pre_acquired_limiter.release() + if (pre_acquired_semaphore) { + pre_acquired_semaphore.release() } continue } this.in_flight_event_ids.add(original_event.event_id) void this.scheduleEventProcessing(original_event, { - bypass_event_limiters: true, - pre_acquired_limiter, + bypass_event_semaphores: true, + pre_acquired_semaphore, }) await Promise.resolve() } @@ -799,7 +722,7 @@ export class EventBus { this.startRunloop() return } - this.resolveIdleWaitersIfDone() + this.locks.notifyIdleListeners() return } } @@ -818,8 +741,8 @@ export class EventBus { if (event.event_status === 'completed') { return } - const started_at = event.event_started_at ?? event.event_created_at - const elapsed_ms = Date.now() - Date.parse(started_at) + const started_at_ts = event._event_started_at_ts ?? event._event_created_at_ts ?? monotonicNowMs() + const elapsed_ms = Math.max(0, monotonicNowMs() - started_at_ts) const elapsed_seconds = (elapsed_ms / 1000).toFixed(1) console.warn( `[bubus] Possible deadlock: ${event.event_type}#${event.event_id} still ${event.event_status} on ${this.name} after ${elapsed_seconds}s (timeout ${event.event_timeout}s)` @@ -844,49 +767,35 @@ export class EventBus { } } - private resolveEventLimiter(event: BaseEvent): AsyncLimiter | null { - const resolved = resolveConcurrencyMode(event.event_concurrency, this.event_concurrency_default) - return limiterForMode(resolved, EventBus.global_event_limiter, this.bus_event_limiter) - } - - private resolveHandlerLimiter(event: BaseEvent, options?: HandlerOptions): AsyncLimiter | null { - const event_override = event.handler_concurrency && event.handler_concurrency !== 'auto' ? event.handler_concurrency : undefined - const handler_override = - options?.handler_concurrency && options.handler_concurrency !== 'auto' ? options.handler_concurrency : undefined - const fallback = this.handler_concurrency_default - const resolved = resolveConcurrencyMode(event_override ?? handler_override ?? fallback, fallback) - return limiterForMode(resolved, EventBus.global_handler_limiter, this.bus_handler_limiter) - } - - // Manually manages the handler concurrency limiter instead of using runWithLimiter, - // because _runImmediately may temporarily yield it during queue-jumping. If the handler - // times out while the limiter is yielded, runWithLimiter's unconditional release() would - // double-release (and _runImmediately's later re-acquire would leak). By tracking - // _held_handler_limiter, we only release if we still own the limiter. + // Manually manages the handler concurrency semaphore instead of using runWithSemaphore, + // because _runImmediately may temporarily yield it during queue-jumping. private async runHandlerEntry(event: BaseEvent, handler: EventHandler, result: EventResult, options?: HandlerOptions): Promise { if (result.status === 'error' && result.error instanceof EventHandlerCancelledError) { return } const handler_event = this._getBusScopedEvent(event, result) - const limiter = this.resolveHandlerLimiter(event, options) + const semaphore = this.locks.getSemaphoreForHandler(event, options) - if (limiter) { - await limiter.acquire() + if (semaphore) { + await semaphore.acquire() } if (result.status === 'error' && result.error instanceof EventHandlerCancelledError) { - if (limiter) limiter.release() + if (semaphore) semaphore.release() return } - // Track which limiter this handler holds so _runImmediately can yield it - // (release before child processing, re-acquire after) to prevent deadlock. - result._held_handler_limiter = limiter - this._event_result_stack.push(result) + if (result._lock) result._lock.exitHandlerRun() + result._lock = new HandlerLock(semaphore) + this.locks.enterHandlerContext(result) try { result.markStarted() - const handler_result = await this.runHandlerWithTimeout(event, handler, handler_event) + const abort_promise = result.ensureAbortSignal() + const handler_result = await Promise.race([ + this.runHandlerWithTimeout(event, handler, handler_event), + abort_promise, + ]) if (event.event_result_schema) { const parsed = event.event_result_schema.safeParse(handler_result) if (parsed.success) { @@ -906,27 +815,15 @@ export class EventBus { handler_name: result.handler_name, parent_error: error, }) - this.cancelPendingChildProcessing(event, cancelled_error) + this.cancelPendingDescendants(event, cancelled_error) } else { result.markError(error) } } finally { - // If _runImmediately yielded our limiter (_held_handler_limiter is null), it was - // already released. Only release if we still own it (normal completion or no yield). - const handler_still_owns_limiter = result._held_handler_limiter !== null - result._held_handler_limiter = null - const stack_idx = this._event_result_stack.indexOf(result) - if (stack_idx >= 0) { - this._event_result_stack.splice(stack_idx, 1) - } - if (result.queue_jump_hold) { - result.queue_jump_hold = false - this.immediate_processing_stack_depth = Math.max(0, this.immediate_processing_stack_depth - 1) - this.releaseImmediateProcessingWaiters() - } - if (limiter && handler_still_owns_limiter) { - limiter.release() - } + result._abort = null + result._lock?.exitHandlerRun() + this.locks.exitHandlerContext(result) + this.locks.releaseQueueJumpPauseForResult(result) } } @@ -1072,7 +969,8 @@ export class EventBus { return scoped as T } - private cancelPendingChildProcessing(event: BaseEvent, error: EventHandlerCancelledError): void { + cancelPendingDescendants(event: BaseEvent, reason: unknown): void { + const cancellation_error = this.normalizeCancellationError(event, reason) const visited = new Set() const cancel_child = (child: BaseEvent): void => { const original_child = child._original_event ?? child @@ -1081,17 +979,27 @@ export class EventBus { } visited.add(original_child.event_id) + // Depth-first: cancel grandchildren before parent so + // eventAreAllChildrenComplete() returns true when we get back up. + for (const grandchild of original_child.event_children) { + cancel_child(grandchild) + } + const path = Array.isArray(original_child.event_path) ? original_child.event_path : [] const buses_to_cancel = new Set(path) for (const bus of EventBus.instances) { if (!buses_to_cancel.has(bus.name)) { continue } - bus.cancelEventOnBus(original_child, error) + bus.cancelEventOnBus(original_child, cancellation_error) } - for (const grandchild of original_child.event_children) { - cancel_child(grandchild) + // Force-complete the child event. In JS we can't stop running async + // handlers, but markCompleted() resolves the done() promise so callers + // aren't blocked waiting for background work to finish. The background + // handler's eventual markCompleted/markError is a no-op (terminal guard). + if (original_child.event_status !== 'completed') { + original_child.markCompleted() } } @@ -1100,6 +1008,19 @@ export class EventBus { } } + private normalizeCancellationError(event: BaseEvent, reason: unknown): EventHandlerCancelledError { + if (reason instanceof EventHandlerCancelledError) { + return reason + } + + const parent_error = reason instanceof Error ? reason : new Error(String(reason)) + return new EventHandlerCancelledError(`Cancelled pending handler due to ancestor cancellation: ${parent_error.message}`, { + event_type: event.event_type, + handler_name: 'unknown', + parent_error, + }) + } + private cancelEventOnBus(event: BaseEvent, error: EventHandlerCancelledError): void { const original_event = event._original_event ?? event const handler_entries = this.createPendingHandlerResults(original_event) @@ -1108,6 +1029,29 @@ export class EventBus { if (entry.result.status === 'pending') { entry.result.markError(error) updated = true + } else if (entry.result.status === 'started') { + // Abort running handlers. In JS we can't actually stop a running async + // function, but marking it as error means the event system treats it as + // done. The background handler will finish silently (its markCompleted/ + // markError call is a no-op once in terminal state). + // + // Exit handler-run ownership immediately so any held lock is released. + // If reacquire is currently pending, exit closes ownership and the + // reacquire path auto-releases when it wakes. + entry.result._lock?.exitHandlerRun() + + const aborted_error = new EventHandlerAbortedError( + `Aborted running handler due to parent timeout: ${error.message}`, + { + event_type: original_event.event_type, + handler_name: entry.result.handler_name, + parent_error: error.parent_error, + event_result: entry.result, + } + ) + entry.result.markError(aborted_error) + entry.result.signalAbort(aborted_error) + updated = true } } diff --git a/bubus-ts/src/event_result.ts b/bubus-ts/src/event_result.ts index d669a22..6a47d68 100644 --- a/bubus-ts/src/event_result.ts +++ b/bubus-ts/src/event_result.ts @@ -1,7 +1,8 @@ import { v7 as uuidv7 } from 'uuid' import type { BaseEvent } from './base_event.js' -import type { AsyncLimiter } from './semaphores.js' +import { HandlerLock, withResolvers } from './lock_manager.js' +import type { Deferred } from './lock_manager.js' export type EventResultStatus = 'pending' | 'started' | 'completed' | 'error' @@ -18,28 +19,13 @@ export class EventResult { result?: unknown error?: unknown event_children: BaseEvent[] - // Tracks whether this handler's execution has triggered a queue-jump via done(). - // - // Lifecycle: - // 1. Starts as `false` when the EventResult is created. - // 2. Set to `true` in _runImmediately() when the handler (or its raw event's - // done()) triggers immediate processing. At the same time, - // immediate_processing_stack_depth is incremented by 1 on the bus. - // The guard (!queue_jump_hold) prevents double-incrementing if the - // handler calls done() on multiple children. - // 3. Checked in runHandlerEntry()'s finally block: if true, decrements - // immediate_processing_stack_depth and releases runloop waiters. - // This keeps the runloop paused between when runImmediatelyAcrossBuses() - // returns (its own try/finally decrements) and when the handler itself - // finishes β€” without this hold, the runloop would resume prematurely - // while the handler is still executing after `await child.done()`. - // 4. Reset to `false` in the same finally block after decrementing. - queue_jump_hold: boolean - // The handler concurrency limiter currently held by this handler execution. - // Set by runHandlerEntry so that _runImmediately can temporarily release it - // (yield-and-reacquire) to let child event handlers use the same limiter - // without deadlocking. - _held_handler_limiter: AsyncLimiter | null + // Abort signal: created when handler starts, rejected by signalAbort() to + // interrupt runHandlerEntry's await via Promise.race. + _abort: Deferred | null + // Handler lock: tracks ownership of the handler concurrency semaphore + // during handler execution. Set by EventBus.runHandlerEntry, used by + // _runImmediately for yield-and-reacquire during queue-jumps. + _lock: HandlerLock | null constructor(params: { event_id: string; handler_id: string; handler_name: string; handler_file_path?: string; eventbus_name: string }) { this.id = uuidv7() @@ -50,8 +36,25 @@ export class EventResult { this.handler_file_path = params.handler_file_path this.eventbus_name = params.eventbus_name this.event_children = [] - this.queue_jump_hold = false - this._held_handler_limiter = null + this._abort = null + this._lock = null + } + + // Create the abort deferred so runHandlerEntry can race against it. + ensureAbortSignal(): Promise { + if (!this._abort) { + this._abort = withResolvers() + } + return this._abort.promise + } + + // Reject the abort promise, causing runHandlerEntry's Promise.race to + // throw immediately β€” even if the handler has no timeout. + signalAbort(error: Error): void { + if (this._abort) { + this._abort.reject(error) + this._abort = null + } } markStarted(): void { @@ -60,12 +63,14 @@ export class EventResult { } markCompleted(result: unknown): void { + if (this.status === 'completed' || this.status === 'error') return this.status = 'completed' this.result = result this.completed_at = new Date().toISOString() } markError(error: unknown): void { + if (this.status === 'completed' || this.status === 'error') return this.status = 'error' this.error = error this.completed_at = new Date().toISOString() diff --git a/bubus-ts/src/index.ts b/bubus-ts/src/index.ts index ea0071d..f57b2ea 100644 --- a/bubus-ts/src/index.ts +++ b/bubus-ts/src/index.ts @@ -1,5 +1,5 @@ export { BaseEvent, BaseEventSchema } from './base_event.js' export { EventResult } from './event_result.js' -export { EventBus, EventHandlerTimeoutError, EventHandlerCancelledError } from './event_bus.js' -export type { ConcurrencyMode } from './semaphores.js' +export { EventBus, EventHandlerTimeoutError, EventHandlerCancelledError, EventHandlerAbortedError } from './event_bus.js' +export type { ConcurrencyMode } from './lock_manager.js' export type { EventClass, EventHandler, EventKey, HandlerOptions, EventStatus, FindOptions, FindWindow } from './types.js' diff --git a/bubus-ts/src/lock_manager.ts b/bubus-ts/src/lock_manager.ts new file mode 100644 index 0000000..3d0f278 --- /dev/null +++ b/bubus-ts/src/lock_manager.ts @@ -0,0 +1,355 @@ +import type { BaseEvent } from './base_event.js' +import type { EventResult } from './event_result.js' +import type { HandlerOptions } from './types.js' + +// ─── Deferred / withResolvers ──────────────────────────────────────────────── + +export type Deferred = { + promise: Promise + resolve: (value: T | PromiseLike) => void + reject: (reason?: unknown) => void +} + +export const withResolvers = (): Deferred => { + if (typeof Promise.withResolvers === 'function') { + return Promise.withResolvers() + } + let resolve!: (value: T | PromiseLike) => void + let reject!: (reason?: unknown) => void + const promise = new Promise((resolve_fn, reject_fn) => { + resolve = resolve_fn + reject = reject_fn + }) + return { promise, resolve, reject } +} + +// ─── Concurrency modes ────────────────────────────────────────────────────── + +export const CONCURRENCY_MODES = ['global-serial', 'bus-serial', 'parallel', 'auto'] as const +export type ConcurrencyMode = (typeof CONCURRENCY_MODES)[number] + +export const resolveConcurrencyMode = (mode: ConcurrencyMode | undefined, fallback: ConcurrencyMode): ConcurrencyMode => { + const normalized_fallback = fallback === 'auto' ? 'bus-serial' : fallback + if (!mode || mode === 'auto') { + return normalized_fallback + } + return mode +} + +// ─── AsyncSemaphore ────────────────────────────────────────────────────────── + +export class AsyncSemaphore { + size: number + in_use: number + waiters: Array<() => void> + + constructor(size: number) { + this.size = size + this.in_use = 0 + this.waiters = [] + } + + async acquire(): Promise { + if (this.size === Infinity) { + return + } + if (this.in_use < this.size) { + this.in_use += 1 + return + } + await new Promise((resolve) => { + this.waiters.push(resolve) + }) + this.in_use += 1 + } + + release(): void { + if (this.size === Infinity) { + return + } + this.in_use = Math.max(0, this.in_use - 1) + const next = this.waiters.shift() + if (next) { + next() + } + } +} + +export const semaphoreForMode = (mode: ConcurrencyMode, global_semaphore: AsyncSemaphore, bus_semaphore: AsyncSemaphore): AsyncSemaphore | null => { + if (mode === 'parallel') { + return null + } + if (mode === 'global-serial') { + return global_semaphore + } + if (mode === 'bus-serial') { + return bus_semaphore + } + return bus_semaphore +} + +export const runWithSemaphore = async (semaphore: AsyncSemaphore | null, fn: () => Promise): Promise => { + if (!semaphore) { + return await fn() + } + await semaphore.acquire() + try { + return await fn() + } finally { + semaphore.release() + } +} + +// ─── HandlerLock ───────────────────────────────────────────────────────────── + +export type HandlerExecutionState = 'held' | 'yielded' | 'closed' + +// Tracks a single handler execution's ownership of a semaphore lock. +// Reacquire is race-safe: if the handler exits while waiting to reclaim, +// the reclaimed lock is immediately released to avoid leaks. +export class HandlerLock { + private semaphore: AsyncSemaphore | null + private state: HandlerExecutionState + + constructor(semaphore: AsyncSemaphore | null) { + this.semaphore = semaphore + this.state = 'held' + } + + getExecutionState(): HandlerExecutionState { + return this.state + } + + yieldHandlerLockForChildRun(): boolean { + if (!this.semaphore || this.state !== 'held') { + return false + } + this.state = 'yielded' + this.semaphore.release() + return true + } + + async reclaimHandlerLockIfRunning(): Promise { + if (!this.semaphore || this.state !== 'yielded') { + return false + } + await this.semaphore.acquire() + if (this.state !== 'yielded') { + // Handler exited while this reacquire was pending. + this.semaphore.release() + return false + } + this.state = 'held' + return true + } + + exitHandlerRun(): void { + if (this.state === 'closed') { + return + } + const should_release = !!this.semaphore && this.state === 'held' + this.state = 'closed' + if (should_release) { + this.semaphore!.release() + } + } + + async runQueueJump(fn: () => Promise): Promise { + const yielded = this.yieldHandlerLockForChildRun() + try { + return await fn() + } finally { + if (yielded) { + await this.reclaimHandlerLockIfRunning() + } + } + } +} + +// ─── LockManager ───────────────────────────────────────────────────────────── + +type LockManagerOptions = { + get_idle_snapshot: () => boolean + get_event_concurrency_default: () => ConcurrencyMode + get_handler_concurrency_default: () => ConcurrencyMode + get_bus_event_semaphore: () => AsyncSemaphore + get_bus_handler_semaphore: () => AsyncSemaphore + get_global_event_semaphore: () => AsyncSemaphore + get_global_handler_semaphore: () => AsyncSemaphore +} + +export class LockManager { + private get_idle_snapshot: () => boolean + private get_event_concurrency_default: () => ConcurrencyMode + private get_handler_concurrency_default: () => ConcurrencyMode + private get_bus_event_semaphore: () => AsyncSemaphore + private get_bus_handler_semaphore: () => AsyncSemaphore + private get_global_event_semaphore: () => AsyncSemaphore + private get_global_handler_semaphore: () => AsyncSemaphore + + private pause_depth: number + private pause_waiters: Array<() => void> + private queue_jump_pause_releases: WeakMap void> + private active_handler_results: EventResult[] + + private idle_waiters: Array<() => void> + private idle_check_pending: boolean + private idle_check_streak: number + + constructor(options: LockManagerOptions) { + this.get_idle_snapshot = options.get_idle_snapshot + this.get_event_concurrency_default = options.get_event_concurrency_default + this.get_handler_concurrency_default = options.get_handler_concurrency_default + this.get_bus_event_semaphore = options.get_bus_event_semaphore + this.get_bus_handler_semaphore = options.get_bus_handler_semaphore + this.get_global_event_semaphore = options.get_global_event_semaphore + this.get_global_handler_semaphore = options.get_global_handler_semaphore + + this.pause_depth = 0 + this.pause_waiters = [] + this.queue_jump_pause_releases = new WeakMap() + this.active_handler_results = [] + + this.idle_waiters = [] + this.idle_check_pending = false + this.idle_check_streak = 0 + } + + requestPause(): () => void { + this.pause_depth += 1 + let released = false + return () => { + if (released) { + return + } + released = true + this.pause_depth = Math.max(0, this.pause_depth - 1) + if (this.pause_depth !== 0) { + return + } + const waiters = this.pause_waiters + this.pause_waiters = [] + for (const resolve of waiters) { + resolve() + } + } + } + + waitUntilResumed(): Promise { + if (this.pause_depth === 0) { + return Promise.resolve() + } + return new Promise((resolve) => { + this.pause_waiters.push(resolve) + }) + } + + isPaused(): boolean { + return this.pause_depth > 0 + } + + enterHandlerContext(result: EventResult): void { + this.active_handler_results.push(result) + } + + exitHandlerContext(result: EventResult): void { + const idx = this.active_handler_results.indexOf(result) + if (idx >= 0) { + this.active_handler_results.splice(idx, 1) + } + } + + getCurrentHandlerResult(): EventResult | undefined { + return this.active_handler_results[this.active_handler_results.length - 1] + } + + isInsideHandlerContext(): boolean { + return this.active_handler_results.length > 0 + } + + ensureQueueJumpPauseForResult(result: EventResult): void { + if (this.queue_jump_pause_releases.has(result)) { + return + } + this.queue_jump_pause_releases.set(result, this.requestPause()) + } + + releaseQueueJumpPauseForResult(result: EventResult): void { + const release_pause = this.queue_jump_pause_releases.get(result) + if (!release_pause) { + return + } + this.queue_jump_pause_releases.delete(result) + release_pause() + } + + waitForIdle(): Promise { + if (this.get_idle_snapshot()) { + return Promise.resolve() + } + return new Promise((resolve) => { + this.idle_waiters.push(resolve) + this.scheduleIdleCheck() + }) + } + + notifyIdleListeners(): void { + if (!this.get_idle_snapshot()) { + this.idle_check_streak = 0 + if (this.idle_waiters.length > 0) { + this.scheduleIdleCheck() + } + return + } + + this.idle_check_streak += 1 + if (this.idle_check_streak < 2) { + if (this.idle_waiters.length > 0) { + this.scheduleIdleCheck() + } + return + } + + this.idle_check_streak = 0 + const waiters = this.idle_waiters + this.idle_waiters = [] + for (const resolve of waiters) { + resolve() + } + } + + getSemaphoreForEvent(event: BaseEvent): AsyncSemaphore | null { + const resolved = resolveConcurrencyMode(event.event_concurrency, this.get_event_concurrency_default()) + return semaphoreForMode(resolved, this.get_global_event_semaphore(), this.get_bus_event_semaphore()) + } + + getSemaphoreForHandler(event: BaseEvent, options?: HandlerOptions): AsyncSemaphore | null { + const event_override = event.handler_concurrency && event.handler_concurrency !== 'auto' ? event.handler_concurrency : undefined + const handler_override = + options?.handler_concurrency && options.handler_concurrency !== 'auto' ? options.handler_concurrency : undefined + const fallback = this.get_handler_concurrency_default() + const resolved = resolveConcurrencyMode(event_override ?? handler_override ?? fallback, fallback) + return semaphoreForMode(resolved, this.get_global_handler_semaphore(), this.get_bus_handler_semaphore()) + } + + clear(): void { + this.pause_depth = 0 + this.pause_waiters = [] + this.queue_jump_pause_releases = new WeakMap() + this.active_handler_results = [] + this.idle_waiters = [] + this.idle_check_pending = false + this.idle_check_streak = 0 + } + + private scheduleIdleCheck(): void { + if (this.idle_check_pending) { + return + } + this.idle_check_pending = true + setTimeout(() => { + this.idle_check_pending = false + this.notifyIdleListeners() + }, 0) + } +} diff --git a/bubus-ts/src/semaphores.ts b/bubus-ts/src/semaphores.ts deleted file mode 100644 index 3693389..0000000 --- a/bubus-ts/src/semaphores.ts +++ /dev/null @@ -1,91 +0,0 @@ -export type Deferred = { - promise: Promise - resolve: (value: T | PromiseLike) => void - reject: (reason?: unknown) => void -} - -export const withResolvers = (): Deferred => { - if (typeof Promise.withResolvers === 'function') { - return Promise.withResolvers() - } - let resolve!: (value: T | PromiseLike) => void - let reject!: (reason?: unknown) => void - const promise = new Promise((resolve_fn, reject_fn) => { - resolve = resolve_fn - reject = reject_fn - }) - return { promise, resolve, reject } -} - -export const CONCURRENCY_MODES = ['global-serial', 'bus-serial', 'parallel', 'auto'] as const -export type ConcurrencyMode = (typeof CONCURRENCY_MODES)[number] - -export class AsyncLimiter { - size: number - in_use: number - waiters: Array<() => void> - - constructor(size: number) { - this.size = size - this.in_use = 0 - this.waiters = [] - } - - async acquire(): Promise { - if (this.size === Infinity) { - return - } - if (this.in_use < this.size) { - this.in_use += 1 - return - } - await new Promise((resolve) => { - this.waiters.push(resolve) - }) - this.in_use += 1 - } - - release(): void { - if (this.size === Infinity) { - return - } - this.in_use = Math.max(0, this.in_use - 1) - const next = this.waiters.shift() - if (next) { - next() - } - } -} - -export const resolveConcurrencyMode = (mode: ConcurrencyMode | undefined, fallback: ConcurrencyMode): ConcurrencyMode => { - const normalized_fallback = fallback === 'auto' ? 'bus-serial' : fallback - if (!mode || mode === 'auto') { - return normalized_fallback - } - return mode -} - -export const limiterForMode = (mode: ConcurrencyMode, global_limiter: AsyncLimiter, bus_limiter: AsyncLimiter): AsyncLimiter | null => { - if (mode === 'parallel') { - return null - } - if (mode === 'global-serial') { - return global_limiter - } - if (mode === 'bus-serial') { - return bus_limiter - } - return bus_limiter -} - -export const runWithLimiter = async (limiter: AsyncLimiter | null, fn: () => Promise): Promise => { - if (!limiter) { - return await fn() - } - await limiter.acquire() - try { - return await fn() - } finally { - limiter.release() - } -} diff --git a/bubus-ts/src/types.ts b/bubus-ts/src/types.ts index 5f44cdf..c78e16f 100644 --- a/bubus-ts/src/types.ts +++ b/bubus-ts/src/types.ts @@ -1,5 +1,5 @@ import type { BaseEvent } from './base_event.js' -import type { ConcurrencyMode } from './semaphores.js' +import type { ConcurrencyMode } from './lock_manager.js' export type EventStatus = 'pending' | 'started' | 'completed' diff --git a/bubus-ts/tests/comprehensive_patterns.test.ts b/bubus-ts/tests/comprehensive_patterns.test.ts index 5c5f2d2..1358f52 100644 --- a/bubus-ts/tests/comprehensive_patterns.test.ts +++ b/bubus-ts/tests/comprehensive_patterns.test.ts @@ -244,12 +244,12 @@ test('awaited child jumps queue without overshoot', async () => { assert.ok(child_event!.event_started_at! < event3_from_history!.event_started_at!) }) -test('done() on non-proxied event still holds immediate_processing_stack_depth', async () => { +test('done() on non-proxied event keeps bus paused during queue-jump', async () => { const bus = new EventBus('RawDoneBus', { max_history_size: 100 }) const Event1 = BaseEvent.extend('Event1', {}) const ChildEvent = BaseEvent.extend('RawChild', {}) - let depth_after_done = -1 + let paused_after_done = false bus.on(ChildEvent, () => {}) @@ -260,29 +260,26 @@ test('done() on non-proxied event still holds immediate_processing_stack_depth', const raw_child = child._original_event ?? child // done() on raw event bypasses handler_result injection from proxy await raw_child.done() - // After done() returns, depth should still be > 0 because + // After done() returns, bus should still be paused because // we're still inside a handler doing queue-jump processing - depth_after_done = bus.immediate_processing_stack_depth + paused_after_done = bus.locks.isPaused() }) bus.dispatch(Event1({})) await bus.waitUntilIdle() - assert.ok( - depth_after_done > 0, - `immediate_processing_stack_depth should be > 0 after raw done() ` + `but before handler returns, got ${depth_after_done}` - ) + assert.equal(paused_after_done, true, 'bus should be paused after raw done() but before handler returns') }) -test('immediate_processing_stack_depth returns to 0 after queue-jump completes', async () => { +test('bus pause state clears after queue-jump completes', async () => { const bus = new EventBus('DepthBalanceBus', { max_history_size: 100 }) const Event1 = BaseEvent.extend('DepthEvent1', {}) const ChildA = BaseEvent.extend('DepthChildA', {}) const ChildB = BaseEvent.extend('DepthChildB', {}) - let depth_during_handler = -1 - let depth_between_dones = -1 - let depth_after_second_done = -1 + let paused_during_handler = false + let paused_between_dones = false + let paused_after_second_done = false bus.on(ChildA, () => {}) bus.on(ChildB, () => {}) @@ -291,33 +288,29 @@ test('immediate_processing_stack_depth returns to 0 after queue-jump completes', // First queue-jump const child_a = event.bus?.emit(ChildA({}))! await child_a.done() - depth_during_handler = bus.immediate_processing_stack_depth + paused_during_handler = bus.locks.isPaused() - // Second queue-jump β€” should NOT double-increment (queue_jump_hold guard) + // Second queue-jump β€” bus should remain paused across both awaits. const child_b = event.bus?.emit(ChildB({}))! - depth_between_dones = bus.immediate_processing_stack_depth + paused_between_dones = bus.locks.isPaused() await child_b.done() - depth_after_second_done = bus.immediate_processing_stack_depth + paused_after_second_done = bus.locks.isPaused() }) bus.dispatch(Event1({})) await bus.waitUntilIdle() - // During handler, depth should be > 0 (held by queue_jump_hold) - assert.ok(depth_during_handler > 0, `depth should be > 0 after first done(), got ${depth_during_handler}`) + // During handler, pause should still be held. + assert.equal(paused_during_handler, true, 'bus should remain paused after first done()') - // Between done() calls, depth should still be held - assert.ok(depth_between_dones > 0, `depth should be > 0 between done() calls, got ${depth_between_dones}`) + // Between done() calls, pause should still be held. + assert.equal(paused_between_dones, true, 'bus should remain paused between done() calls') - // After second done(), still held until handler returns - assert.ok(depth_after_second_done > 0, `depth should be > 0 after second done(), got ${depth_after_second_done}`) + // After second done(), pause is still held until handler returns. + assert.equal(paused_after_second_done, true, 'bus should remain paused after second done()') - // After handler finishes and bus is idle, depth must be exactly 0 - assert.equal( - bus.immediate_processing_stack_depth, - 0, - `depth should return to 0 after handler completes, got ${bus.immediate_processing_stack_depth}` - ) + // After handler finishes and bus is idle, pause must be released. + assert.equal(bus.locks.isPaused(), false, 'bus should no longer be paused after handler completes') }) test('isInsideHandler() is per-bus, not global', async () => { @@ -729,23 +722,23 @@ test('deeply nested awaited children', async () => { // ============================================================================= // Queue-Jump Concurrency Tests (Two-Bus) // -// BUG: runImmediatelyAcrossBuses passes { bypass_handler_limiters: true, -// bypass_event_limiters: true } for ALL buses. This causes: +// BUG: runImmediatelyAcrossBuses passes { bypass_handler_semaphores: true, +// bypass_event_semaphores: true } for ALL buses. This causes: // 1. Handlers to run in parallel regardless of configured concurrency -// 2. Event limiters on remote buses to be skipped +// 2. Event semaphores on remote buses to be skipped // // The fix requires "yield-and-reacquire": -// - Before processing the child, temporarily RELEASE the limiter the parent +// - Before processing the child, temporarily RELEASE the semaphore the parent // handler holds (the parent is suspended in `await child.done()` and isn't // using it). // - Process the child event NORMALLY β€” handlers acquire/release the real -// limiter, serializing among themselves as configured. -// - After the child completes, RE-ACQUIRE the limiter for the parent handler +// semaphore, serializing among themselves as configured. +// - After the child completes, RE-ACQUIRE the semaphore for the parent handler // before it resumes. // -// For event limiters, only bypass on the initiating bus (where the parent holds -// the limiter). On other buses, respect their event concurrency β€” bypass only -// if they resolve to the SAME limiter instance (i.e. global-serial). +// For event semaphores, only bypass on the initiating bus (where the parent holds +// the semaphore). On other buses, respect their event concurrency β€” bypass only +// if they resolve to the SAME semaphore instance (i.e. global-serial). // // All tests use two buses. The pattern is: // bus_a: origin bus where TriggerEvent handler dispatches a child @@ -893,7 +886,7 @@ test('BUG: queue-jump two-bus global-serial handlers should serialize across bot // Check: bus_a handlers all finish before bus_b handlers start // (because runImmediatelyAcrossBuses processes sequentially and - // all share the global handler limiter) + // all share the global handler semaphore) const a2_end = log.indexOf('a2_end') const b1_start = log.indexOf('b1_start') assert.ok(a2_end < b1_start, `global-serial: bus_a should finish before bus_b starts. Got: [${log.join(', ')}]`) @@ -1029,12 +1022,12 @@ test('BUG: queue-jump two-bus mixed: bus_a parallel, bus_b bus-serial', async () // // When the forward bus (bus_b) has bus-serial event concurrency and is already // processing an event, a queue-jumped child should WAIT for bus_b's in-flight -// event to finish. The current code bypasses event limiters for ALL buses, +// event to finish. The current code bypasses event semaphores for ALL buses, // causing the child to cut in front of the in-flight event. // -// The fix should only bypass event limiters on the INITIATING bus (where the -// parent event holds the limiter). On other buses, bypass only if they resolve -// to the SAME limiter instance (global-serial shares one global limiter). +// The fix should only bypass event semaphores on the INITIATING bus (where the +// parent event holds the semaphore). On other buses, bypass only if they resolve +// to the SAME semaphore instance (global-serial shares one global semaphore). // ============================================================================= test('BUG: queue-jump should respect bus-serial event concurrency on forward bus', async () => { @@ -1053,7 +1046,7 @@ test('BUG: queue-jump should respect bus-serial event concurrency on forward bus const log: string[] = [] - // SlowEvent handler: occupies bus_b's event limiter for 40ms + // SlowEvent handler: occupies bus_b's event semaphore for 40ms bus_b.on(SlowEvent, async () => { log.push('slow_start') await delay(40) @@ -1102,7 +1095,7 @@ test('BUG: queue-jump should respect bus-serial event concurrency on forward bus `bus_b (bus-serial events): child should wait for slow event to finish. ` + `Got: [${log.join(', ')}]` ) - // The child on bus_a should have processed (queue-jumped, bypasses bus_a's event limiter) + // The child on bus_a should have processed (queue-jumped, bypasses bus_a's event semaphore) assert.ok(log.includes('child_a_start'), 'child on bus_a should have run') assert.ok(log.includes('child_a_end'), 'child on bus_a should have completed') }) @@ -1160,8 +1153,8 @@ test('queue-jump with fully-parallel forward bus starts immediately', async () = test('queue-jump with parallel events but bus-serial handlers on forward bus serializes handlers', async () => { // When bus_b has parallel event concurrency but bus-serial handler concurrency, - // the child event can start processing immediately (event limiter is parallel), - // but its handler must wait for the slow handler to release the handler limiter. + // the child event can start processing immediately (event semaphore is parallel), + // but its handler must wait for the slow handler to release the handler semaphore. const TriggerEvent = BaseEvent.extend('QJEvtParHSer_Trigger', {}) const ChildEvent = BaseEvent.extend('QJEvtParHSer_Child', {}) diff --git a/bubus-ts/tests/eventbus_basics.test.ts b/bubus-ts/tests/eventbus_basics.test.ts index 060d7a5..72a36ab 100644 --- a/bubus-ts/tests/eventbus_basics.test.ts +++ b/bubus-ts/tests/eventbus_basics.test.ts @@ -11,7 +11,7 @@ const delay = (ms: number): Promise => // ─── Constructor defaults ──────────────────────────────────────────────────── -test('EventBus initializes with correct defaults', () => { +test('EventBus initializes with correct defaults', async () => { const bus = new EventBus('DefaultsBus') assert.equal(bus.name, 'DefaultsBus') @@ -20,9 +20,8 @@ test('EventBus initializes with correct defaults', () => { assert.equal(bus.handler_concurrency_default, 'bus-serial') assert.equal(bus.event_timeout_default, 60) assert.equal(bus.event_history.size, 0) - assert.equal(bus.pending_event_queue.length, 0) - assert.equal(bus.in_flight_event_ids.size, 0) assert.ok(EventBus.instances.has(bus)) + await bus.waitUntilIdle() }) test('EventBus applies custom options', () => { @@ -54,6 +53,78 @@ test('EventBus auto-generates name when not provided', () => { assert.equal(bus.name, 'EventBus') }) +test('EventBus exposes locks API surface', () => { + const bus = new EventBus('GateSurfaceBus') + const locks = bus.locks as unknown as Record + + assert.equal(typeof locks.requestPause, 'function') + assert.equal(typeof locks.waitUntilResumed, 'function') + assert.equal(typeof locks.isPaused, 'function') + assert.equal(typeof locks.waitForIdle, 'function') + assert.equal(typeof locks.notifyIdleListeners, 'function') + assert.equal(typeof locks.getSemaphoreForEvent, 'function') + assert.equal(typeof locks.getSemaphoreForHandler, 'function') +}) + +test('EventBus locks methods are callable and preserve semaphore resolution behavior', async () => { + const bus = new EventBus('GateInvocationBus', { + event_concurrency: 'bus-serial', + handler_concurrency: 'bus-serial', + }) + const GateEvent = BaseEvent.extend('GateInvocationEvent', {}) + + const release_pause = bus.locks.requestPause() + assert.equal(bus.locks.isPaused(), true) + + let resumed = false + const resumed_promise = bus.locks.waitUntilResumed().then(() => { + resumed = true + }) + await Promise.resolve() + assert.equal(resumed, false) + + release_pause() + await resumed_promise + assert.equal(bus.locks.isPaused(), false) + + const event_with_global = GateEvent({ + event_concurrency: 'global-serial', + handler_concurrency: 'global-serial', + }) + assert.equal(bus.locks.getSemaphoreForEvent(event_with_global), EventBus.global_event_semaphore) + assert.equal(bus.locks.getSemaphoreForHandler(event_with_global), EventBus.global_handler_semaphore) + + const event_with_parallel = GateEvent({ + event_concurrency: 'parallel', + handler_concurrency: 'parallel', + }) + assert.equal(bus.locks.getSemaphoreForEvent(event_with_parallel), null) + assert.equal(bus.locks.getSemaphoreForHandler(event_with_parallel), null) + + const event_using_handler_options = GateEvent({}) + assert.equal(bus.locks.getSemaphoreForHandler(event_using_handler_options, { handler_concurrency: 'parallel' }), null) + + bus.dispatch(GateEvent({})) + bus.locks.notifyIdleListeners() + await bus.locks.waitForIdle() +}) + +test('BaseEvent lifecycle methods are callable and preserve lifecycle behavior', async () => { + const LifecycleEvent = BaseEvent.extend('LifecycleMethodInvocationEvent', {}) + + const standalone = LifecycleEvent({}) + standalone.markStarted() + assert.equal(standalone.event_status, 'started') + standalone.tryFinalizeCompletion() + assert.equal(standalone.event_status, 'completed') + await standalone.waitForCompletion() + + const bus = new EventBus('LifecycleMethodInvocationBus') + const dispatched = bus.dispatch(LifecycleEvent({})) + await dispatched.waitForCompletion() + assert.equal(dispatched.event_status, 'completed') +}) + // ─── Event dispatch and status lifecycle ───────────────────────────────────── test('dispatch returns pending event with correct initial state', async () => { diff --git a/bubus-ts/tests/locking.test.ts b/bubus-ts/tests/locking.test.ts index f9bd0d8..9244819 100644 --- a/bubus-ts/tests/locking.test.ts +++ b/bubus-ts/tests/locking.test.ts @@ -11,13 +11,13 @@ Potential failure modes A) Event concurrency modes - global-serial not enforcing strict FIFO across multiple buses (events interleave). - bus-serial allows cross-bus interleaving but still must be FIFO within a bus; breaks under forwarding. -- parallel accidentally serializes (e.g., limiter still used) or breaks queue-jump semantics. +- parallel accidentally serializes (e.g., semaphore still used) or breaks queue-jump semantics. - auto not resolving correctly to bus defaults. B) Handler concurrency modes - global-serial not enforcing strict handler order across buses. - bus-serial leaks parallelism between handlers on the same bus. -- parallel accidentally serializes or fails to gate per-handler ordering. +- parallel accidentally serializes or fails to enforce per-handler ordering. - auto not resolving correctly to handler options or bus defaults. C) Precedence resolution @@ -27,7 +27,7 @@ C) Precedence resolution D) Queue-jump / awaited events - event.done() inside handler doesn’t jump the queue across buses. -- Queue-jump bypasses limiters incorrectly in contexts where it shouldn’t. +- Queue-jump bypasses semaphores incorrectly in contexts where it shouldn’t. - Queue-jump fails when event already in-flight. E) FIFO correctness @@ -68,7 +68,7 @@ K) Idle / completion L) Reentrancy / nested awaits - Nested awaited child events starve sibling handlers. -- Awaited child events skip limiter incorrectly (deadlocks or ordering regressions). +- Awaited child events skip semaphore incorrectly (deadlocks or ordering regressions). M) Edge-cases - Multiple handlers for same event type with different options collide. @@ -180,7 +180,7 @@ test('global-serial: awaited child jumps ahead of queued events across buses', a assert.ok(child_end_idx < queued_start_idx) }) -test('global-serial: handler limiter serializes handlers across buses', async () => { +test('global-serial: handler semaphore serializes handlers across buses', async () => { const HandlerEvent = BaseEvent.extend('HandlerEvent', { order: z.number(), source: z.string(), @@ -415,7 +415,7 @@ test('parallel: handlers overlap for same event when handler_concurrency is para assert.ok(max_in_flight >= 2) }) -test('parallel: global-serial handler limiter still serializes across buses', async () => { +test('parallel: global-serial handler semaphore still serializes across buses', async () => { const ParallelEvent = BaseEvent.extend('ParallelEventGlobalHandler', { source: z.string(), }) @@ -837,6 +837,63 @@ test('queue-jump: awaited child preempts queued sibling on same bus', async () = assert.ok(child_end_idx < sibling_start_idx) }) +test('queue-jump: same event handlers on separate buses stay isolated without forwarding', async () => { + const ParentEvent = BaseEvent.extend('QueueJumpIsolatedParent', {}) + const SharedEvent = BaseEvent.extend('QueueJumpIsolatedShared', {}) + const SiblingEvent = BaseEvent.extend('QueueJumpIsolatedSibling', {}) + + const bus_a = new EventBus('QueueJumpIsolatedA', { event_concurrency: 'bus-serial' }) + const bus_b = new EventBus('QueueJumpIsolatedB', { event_concurrency: 'bus-serial' }) + + const order: string[] = [] + let bus_a_shared_runs = 0 + let bus_b_shared_runs = 0 + + bus_a.on(SharedEvent, async () => { + bus_a_shared_runs += 1 + order.push('bus_a_shared_start') + await sleep(2) + order.push('bus_a_shared_end') + }) + + bus_b.on(SharedEvent, async () => { + bus_b_shared_runs += 1 + order.push('bus_b_shared_start') + await sleep(2) + order.push('bus_b_shared_end') + }) + + bus_a.on(SiblingEvent, async () => { + order.push('bus_a_sibling_start') + await sleep(1) + order.push('bus_a_sibling_end') + }) + + bus_a.on(ParentEvent, async (event) => { + order.push('parent_start') + bus_a.emit(SiblingEvent({})) + const shared = event.bus?.emit(SharedEvent({}))! + order.push('shared_dispatched') + await shared.done() + order.push('shared_awaited') + order.push('parent_end') + }) + + const parent = bus_a.dispatch(ParentEvent({})) + await parent.done() + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]) + + assert.equal(bus_a_shared_runs, 1) + assert.equal(bus_b_shared_runs, 0) + assert.equal(order.includes('bus_b_shared_start'), false) + + const bus_a_shared_end_idx = order.indexOf('bus_a_shared_end') + const bus_a_sibling_start_idx = order.indexOf('bus_a_sibling_start') + assert.ok(bus_a_shared_end_idx !== -1) + assert.ok(bus_a_sibling_start_idx !== -1) + assert.ok(bus_a_shared_end_idx < bus_a_sibling_start_idx) +}) + test('queue-jump: awaiting in-flight event does not double-run handlers', async () => { const InFlightEvent = BaseEvent.extend('InFlightEvent', {}) const bus = new EventBus('InFlightBus', { diff --git a/bubus-ts/tests/performance.test.ts b/bubus-ts/tests/performance.test.ts index ea71efa..171bbfa 100644 --- a/bubus-ts/tests/performance.test.ts +++ b/bubus-ts/tests/performance.test.ts @@ -228,9 +228,17 @@ test( await gc.done() }) - // Persistent handler on bus_c for grandchild - bus_c.on(GrandchildEvent, () => { + // Persistent handler on bus_c for grandchild β€” slow on timeout iterations + // so the child's 5ms timeout fires while this is still sleeping. + // This creates EventHandlerTimeoutError β†’ EventHandlerCancelledError chains. + // Sleep is 50ms but child timeout is 5ms β€” with cancellation of started handlers, + // the child completes immediately when timeout fires. Background sleep continues + // silently (JS can't cancel async functions, but the event system moves on). + bus_c.on(GrandchildEvent, async (event) => { grandchild_handled += 1 + if ((event as any).iteration % 5 === 0) { + await new Promise((r) => setTimeout(r, 50)) + } }) global.gc?.() @@ -243,7 +251,7 @@ test( // Ephemeral handler on bus_a β€” queue-jumps a child to bus_c const ephemeral_handler = async (event: any) => { parent_handled_a += 1 - const child_timeout = should_timeout ? 0.001 : null // 1ms timeout β†’ will fire + const child_timeout = should_timeout ? 0.005 : null // 5ms timeout β†’ fires while grandchild sleeps 50ms const child = event.bus?.emit(ChildEvent({ iteration: i, event_timeout: child_timeout, @@ -263,7 +271,9 @@ test( bus_b.dispatch(parent) await ev_a.done() - await bus_c.waitUntilIdle() + // Don't waitUntilIdle on bus_c here β€” timed-out grandchild handlers are + // still sleeping in the background (JS can't cancel async functions). + // Let them pile up; the final waitUntilIdle() outside the loop will drain. // Deregister ephemeral handler bus_a.off(ParentEvent, ephemeral_handler) diff --git a/bubus-ts/tests/timeout.test.ts b/bubus-ts/tests/timeout.test.ts index cfb272a..7cc7b9f 100644 --- a/bubus-ts/tests/timeout.test.ts +++ b/bubus-ts/tests/timeout.test.ts @@ -1,7 +1,7 @@ import assert from 'node:assert/strict' import { test } from 'node:test' -import { BaseEvent, EventBus, EventHandlerCancelledError, EventHandlerTimeoutError } from '../src/index.js' +import { BaseEvent, EventBus, EventHandlerCancelledError, EventHandlerAbortedError, EventHandlerTimeoutError } from '../src/index.js' const TimeoutEvent = BaseEvent.extend('TimeoutEvent', {}) @@ -300,7 +300,244 @@ test('queue-jump awaited child timeouts still fire across buses', async () => { assert.ok(timeout_result) }) -test('parent timeout cancels pending child handler results under serial handler limiter', async () => { +const STEP1_HANDLER_MODES = ['bus-serial', 'global-serial'] as const +type Step1HandlerMode = (typeof STEP1_HANDLER_MODES)[number] + +const getHandlerSemaphore = (bus: EventBus, mode: Step1HandlerMode) => + mode === 'global-serial' ? EventBus.global_handler_semaphore : bus.bus_handler_semaphore + +for (const handler_mode of STEP1_HANDLER_MODES) { + test(`regression: timeout during awaited child.done() does not leak handler semaphore lock [${handler_mode}]`, async () => { + const ParentEvent = BaseEvent.extend(`TimeoutLeakParent-${handler_mode}`, {}) + const ChildEvent = BaseEvent.extend(`TimeoutLeakChild-${handler_mode}`, {}) + + const bus = new EventBus(`TimeoutLeakBus-${handler_mode}`, { + event_concurrency: 'bus-serial', + handler_concurrency: handler_mode, + }) + const semaphore = getHandlerSemaphore(bus, handler_mode) + const baseline_in_use = semaphore.in_use + const original_acquire = semaphore.acquire.bind(semaphore) + let acquire_count = 0 + + semaphore.acquire = async () => { + acquire_count += 1 + // Third acquire is the parent reclaim in _runImmediately finally. + // Delay it so the parent handler timeout can fire in the middle. + if (acquire_count === 3) { + await delay(30) + } + await original_acquire() + } + + try { + bus.on(ChildEvent, async () => { + await delay(1) + return 'child_done' + }) + + bus.on(ParentEvent, async (event) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: 0.2 }))! + await child.done() + return 'parent_done' + }) + + const parent = bus.dispatch(ParentEvent({ event_timeout: 0.01 })) + await parent.done() + await bus.waitUntilIdle() + + const parent_result = Array.from(parent.event_results.values())[0] + assert.equal(parent_result.status, 'error') + assert.ok(parent_result.error instanceof EventHandlerTimeoutError) + assert.equal( + semaphore.in_use, + baseline_in_use, + `handler semaphore leaked lock (mode=${handler_mode}, in_use=${semaphore.in_use}, baseline=${baseline_in_use}, acquires=${acquire_count})` + ) + } finally { + semaphore.acquire = original_acquire + while (semaphore.in_use > baseline_in_use) { + semaphore.release() + } + } + }) +} + +for (const handler_mode of STEP1_HANDLER_MODES) { + test(`regression: parent timeout while reacquire waits behind third serial handler is lock-safe [${handler_mode}]`, async () => { + const ParentEvent = BaseEvent.extend(`TimeoutContentionParent-${handler_mode}`, {}) + const ChildEvent = BaseEvent.extend(`TimeoutContentionChild-${handler_mode}`, {}) + + const bus = new EventBus(`TimeoutContentionBus-${handler_mode}`, { + event_concurrency: 'bus-serial', + handler_concurrency: handler_mode, + }) + const semaphore = getHandlerSemaphore(bus, handler_mode) + const baseline_in_use = semaphore.in_use + + bus.on(ChildEvent, async () => { + await delay(2) + return 'child_done' + }) + + bus.on(ParentEvent, async (event) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: 0.2, handler_concurrency: 'parallel' }))! + await child.done() + return 'parent_main' + }) + + // This handler queues behind parent_main, then holds the serial semaphore + // while parent_main is trying to reclaim after child.done() completes. + bus.on(ParentEvent, async () => { + await delay(40) + return 'parent_blocker' + }) + + const parent = bus.dispatch(ParentEvent({ event_timeout: 0.01 })) + await parent.done() + await bus.waitUntilIdle() + + const parent_results = Array.from(parent.event_results.values()) + const timeout_results = parent_results.filter((result) => result.error instanceof EventHandlerTimeoutError) + assert.ok(timeout_results.length >= 1, `expected at least one timeout result in ${handler_mode}`) + assert.equal(semaphore.in_use, baseline_in_use) + }) +} + +for (const handler_mode of STEP1_HANDLER_MODES) { + test(`regression: next event still runs on same bus after timeout queue-jump path [${handler_mode}]`, async () => { + const ParentEvent = BaseEvent.extend(`TimeoutFollowupParent-${handler_mode}`, {}) + const ChildEvent = BaseEvent.extend(`TimeoutFollowupChild-${handler_mode}`, {}) + const FollowupEvent = BaseEvent.extend(`TimeoutFollowupTail-${handler_mode}`, {}) + + const bus = new EventBus(`TimeoutFollowupBus-${handler_mode}`, { + event_concurrency: 'bus-serial', + handler_concurrency: handler_mode, + }) + const semaphore = getHandlerSemaphore(bus, handler_mode) + const baseline_in_use = semaphore.in_use + const original_acquire = semaphore.acquire.bind(semaphore) + let acquire_count = 0 + semaphore.acquire = async () => { + acquire_count += 1 + if (acquire_count === 3) { + await delay(30) + } + await original_acquire() + } + + let followup_runs = 0 + + try { + bus.on(ChildEvent, async () => { + await delay(1) + }) + + bus.on(ParentEvent, async (event) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: 0.2 }))! + await child.done() + }) + + bus.on(FollowupEvent, async () => { + followup_runs += 1 + return 'followup_done' + }) + + const parent = bus.dispatch(ParentEvent({ event_timeout: 0.01 })) + await parent.done() + await bus.waitUntilIdle() + + const followup = bus.dispatch(FollowupEvent({ event_timeout: 0.05 })) + const followup_completed = await Promise.race([followup.done().then(() => true), delay(100).then(() => false)]) + + assert.equal( + followup_completed, + true, + `follow-up event stalled after timeout queue-jump path (mode=${handler_mode}, in_use=${semaphore.in_use}, acquires=${acquire_count})` + ) + assert.equal(followup_runs, 1) + assert.equal(semaphore.in_use, baseline_in_use) + } finally { + semaphore.acquire = original_acquire + while (semaphore.in_use > baseline_in_use) { + semaphore.release() + } + } + }) +} + +for (const handler_mode of STEP1_HANDLER_MODES) { + test(`regression: nested queue-jump with timeout cancellation remains lock-safe [${handler_mode}]`, async () => { + const ParentEvent = BaseEvent.extend(`NestedPermitParent-${handler_mode}`, {}) + const ChildEvent = BaseEvent.extend(`NestedPermitChild-${handler_mode}`, {}) + const GrandchildEvent = BaseEvent.extend(`NestedPermitGrandchild-${handler_mode}`, {}) + const QueuedSiblingEvent = BaseEvent.extend(`NestedPermitQueuedSibling-${handler_mode}`, {}) + const TailEvent = BaseEvent.extend(`NestedPermitTail-${handler_mode}`, {}) + + const bus = new EventBus(`NestedPermitBus-${handler_mode}`, { + event_concurrency: 'bus-serial', + handler_concurrency: handler_mode, + }) + const semaphore = getHandlerSemaphore(bus, handler_mode) + const baseline_in_use = semaphore.in_use + + let queued_sibling_runs = 0 + let tail_runs = 0 + let queued_sibling_ref: InstanceType | null = null + + bus.on(GrandchildEvent, async () => { + await delay(1) + return 'grandchild_done' + }) + + bus.on(ChildEvent, async (event) => { + const grandchild = event.bus?.emit(GrandchildEvent({ event_timeout: 0.2 }))! + await grandchild.done() + await delay(40) + return 'child_done' + }) + + bus.on(QueuedSiblingEvent, async () => { + queued_sibling_runs += 1 + return 'queued_sibling_done' + }) + + bus.on(ParentEvent, async (event) => { + queued_sibling_ref = event.bus?.emit(QueuedSiblingEvent({ event_timeout: 0.2 }))! + const child = event.bus?.emit(ChildEvent({ event_timeout: 0.02 }))! + await child.done() + await delay(40) + }) + + bus.on(TailEvent, async () => { + tail_runs += 1 + return 'tail_done' + }) + + const parent = bus.dispatch(ParentEvent({ event_timeout: 0.03 })) + await parent.done() + await bus.waitUntilIdle() + + const parent_result = Array.from(parent.event_results.values())[0] + assert.equal(parent_result.status, 'error') + assert.ok(parent_result.error instanceof EventHandlerTimeoutError) + + assert.ok(queued_sibling_ref) + assert.equal(queued_sibling_runs, 0) + const queued_sibling_results = Array.from(queued_sibling_ref!.event_results.values()) + assert.ok(queued_sibling_results.some((result) => result.error instanceof EventHandlerCancelledError)) + + assert.equal(semaphore.in_use, baseline_in_use) + + const tail = bus.dispatch(TailEvent({ event_timeout: 0.05 })) + const tail_completed = await Promise.race([tail.done().then(() => true), delay(100).then(() => false)]) + assert.equal(tail_completed, true) + assert.equal(tail_runs, 1) + assert.equal(semaphore.in_use, baseline_in_use) + }) +} + +test('parent timeout cancels pending child handler results under serial handler semaphore', async () => { const ParentEvent = BaseEvent.extend('TimeoutCancelParentEvent', {}) const ChildEvent = BaseEvent.extend('TimeoutCancelChildEvent', {}) @@ -488,10 +725,11 @@ test('multi-level timeout cascade with mixed cancellations', async () => { const immediate_results = Array.from(immediate_grandchild!.event_results.values()) // With bus-serial handler concurrency (no longer bypassed during queue-jump), // only the first grandchild handler starts before the awaited child's 30ms timeout fires. - // The second handler is still pending (waiting for limiter) β†’ cancelled. + // The second handler is still pending (waiting for semaphore) β†’ cancelled. + // The first handler was already started β†’ aborted (EventHandlerAbortedError). assert.equal(immediate_grandchild_runs, 1) - const immediate_completed = immediate_results.filter((result) => result.status === 'completed') - assert.equal(immediate_completed.length, 1) + const immediate_aborted = immediate_results.filter((result) => result.error instanceof EventHandlerAbortedError) + assert.equal(immediate_aborted.length, 1) const immediate_cancelled = immediate_results.filter((result) => result.error instanceof EventHandlerCancelledError) assert.equal(immediate_cancelled.length, 1) @@ -517,7 +755,7 @@ test('multi-level timeout cascade with mixed cancellations', async () => { // // KEY MECHANIC: When a child event is awaited via event.done() inside a handler, // it triggers "queue-jumping" via _runImmediately β†’ runImmediatelyAcrossBuses. -// Queue-jumped events use yield-and-reacquire: the parent handler's limiter is +// Queue-jumped events use yield-and-reacquire: the parent handler's semaphore is // temporarily released so child handlers can acquire it normally. This means // child handlers run SERIALLY on a bus-serial bus (respecting concurrency limits). // Non-awaited child events stay in the pending_event_queue and are blocked by @@ -525,9 +763,9 @@ test('multi-level timeout cascade with mixed cancellations', async () => { // // TIMEOUT BEHAVIOR: Each handler gets its OWN timeout window starting from when // that handler begins execution β€” NOT from when the event was dispatched. -// With serial handlers, each timeout starts when the handler acquires the limiter. +// With serial handlers, each timeout starts when the handler acquires the semaphore. // -// CANCELLATION CASCADE: When a handler times out, cancelPendingChildProcessing() +// CANCELLATION CASCADE: When a handler times out, bus.cancelPendingDescendants() // walks the event's children tree and marks any "pending" handler results as // EventHandlerCancelledError. Only "pending" results are cancelled β€” handlers // that already started ("started" status) continue running in the background. @@ -553,8 +791,8 @@ test('three-level timeout cascade with per-level timeouts and cascading cancella // ── GrandchildEvent handlers ────────────────────────────────────────── // These run SERIALLY because queue-jumped events respect the bus-serial - // handler limiter (yield-and-reacquire). Each handler gets its own 35ms - // timeout window starting from when that handler acquires the limiter. + // handler semaphore (yield-and-reacquire). Each handler gets its own 35ms + // timeout window starting from when that handler acquires the semaphore. // // Serial order: a(35ms timeout) β†’ b(sync) β†’ c(35ms timeout) β†’ d(10ms) β†’ e(35ms timeout) // Total time for all 5: ~35+0+35+10+35 = ~115ms (within child's 150ms timeout) @@ -595,7 +833,7 @@ test('three-level timeout cascade with per-level timeouts and cascading cancella // ── QueuedGrandchildEvent handler ───────────────────────────────────── // This event is emitted by child_handler but NOT awaited, so it sits in // pending_event_queue. When child_handler times out at 80ms, - // cancelPendingChildProcessing walks ChildEvent.event_children and finds + // bus.cancelPendingDescendants walks ChildEvent.event_children and finds // this event still pending β†’ its handler results are marked as cancelled. const queued_gc_handler = () => { execution_log.push('queued_gc_start') // should never reach here @@ -622,14 +860,14 @@ test('three-level timeout cascade with per-level timeouts and cascading cancella // ── SiblingEvent handler ────────────────────────────────────────────── // This event is emitted by top_handler_main but NOT awaited. Stays in // pending_event_queue until top_handler_main times out at 250ms β†’ - // cancelled by cancelPendingChildProcessing. + // cancelled by bus.cancelPendingDescendants. const sibling_handler = () => { execution_log.push('sibling_start') // should never reach here return 'sibling_done' } // ── TopEvent handlers ───────────────────────────────────────────────── - // These run SERIALLY (via bus handler limiter) because TopEvent is + // These run SERIALLY (via bus handler semaphore) because TopEvent is // processed by the normal runloop (not queue-jumped). top_handler_fast // goes first, completes quickly, then top_handler_main starts. @@ -729,7 +967,7 @@ test('three-level timeout cascade with per-level timeouts and cascading cancella // ── QueuedGrandchildEvent: CANCELLED by child_handler timeout ─────── // This event was emitted but never awaited. It sat in pending_event_queue - // until child_handler timed out, which triggered cancelPendingChildProcessing + // until child_handler timed out, which triggered bus.cancelPendingDescendants // to walk ChildEvent.event_children and cancel all pending handlers. assert.ok(queued_grandchild_ref, 'QueuedGrandchildEvent should have been emitted') assert.equal(queued_grandchild_ref!.event_status, 'completed') @@ -749,7 +987,7 @@ test('three-level timeout cascade with per-level timeouts and cascading cancella // ── SiblingEvent: CANCELLED by top_handler_main timeout ───────────── // Same pattern: emitted but never awaited, stays in queue, cancelled when - // top_handler_main times out and cancelPendingChildProcessing runs. + // top_handler_main times out and bus.cancelPendingDescendants runs. assert.ok(sibling_ref, 'SiblingEvent should have been emitted') assert.equal(sibling_ref!.event_status, 'completed') @@ -902,7 +1140,7 @@ test('cancellation error chain preserves parent_error references through hierarc const deep_cancel = deep_result.error as EventHandlerCancelledError assert.ok(deep_cancel.parent_error instanceof EventHandlerTimeoutError, 'Cancellation should reference parent timeout') // The parent_error should be the INNER handler's timeout, because that's - // the handler whose cancelPendingChildProcessing actually cancelled DeepEvent. + // the handler whose bus.cancelPendingDescendants actually cancelled DeepEvent. assert.ok( deep_cancel.parent_error.message.includes('inner_handler') || deep_cancel.parent_error.message.includes('child_handler'), 'parent_error should reference the handler that directly caused cancellation' @@ -912,7 +1150,7 @@ test('cancellation error chain preserves parent_error references through hierarc // ============================================================================= // When a parent has a timeout but a child has event_timeout: null (no timeout), // the child's handlers run indefinitely on their own β€” but if the PARENT times -// out, cancelPendingChildProcessing still cancels any pending child handlers. +// out, bus.cancelPendingDescendants still cancels any pending child handlers. // This tests that cancellation works across timeout/no-timeout boundaries. // ============================================================================= From 1be5da2dc696fda7400acb24725628a2f8bc94fd Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Fri, 6 Feb 2026 01:55:57 -0800 Subject: [PATCH 51/79] readme updates --- bubus-ts/README.md | 49 +++++++++++++++++++++++----------------------- 1 file changed, 24 insertions(+), 25 deletions(-) diff --git a/bubus-ts/README.md b/bubus-ts/README.md index 09ed50b..8b2cf82 100644 --- a/bubus-ts/README.md +++ b/bubus-ts/README.md @@ -16,7 +16,7 @@ gotchas we uncovered while matching behavior. It intentionally does **not** re-d - Python uses a global re-entrant lock to let awaited events process immediately on every bus where they appear. - TS optionally uses `AsyncLocalStorage` on Node.js (auto-detected) to capture dispatch context, but falls back gracefully in browsers. -- `EventBus.instances` + `immediate_processing_stack_depth` pauses each runloop and processes the same event immediately across buses. +- `EventBus.instances` + the `LockManager` pause mechanism pauses each runloop and processes the same event immediately across buses. ### 3) `event.bus` is a BusScopedEvent view @@ -104,14 +104,14 @@ bus.on(SomeEvent, handler, { If an event sets `handler_concurrency: "parallel"`, that wins even if a handler is ordered. -## Limiters (how concurrency is enforced) +## Semaphores (how concurrency is enforced) -We use four limiters: +We use four semaphores: -- `EventBus.global_event_limiter` -- `EventBus.global_handler_limiter` -- `bus.bus_event_limiter` -- `bus.bus_handler_limiter` +- `EventBus.global_event_semaphore` +- `EventBus.global_handler_semaphore` +- `bus.bus_event_semaphore` +- `bus.bus_handler_semaphore` They are applied centrally when scheduling events and handlers, so concurrency is controlled without scattering mutex checks throughout the code. @@ -139,19 +139,19 @@ under different `event_concurrency` / `handler_concurrency` configurations. 1. `runloop()` drains `pending_event_queue`. 2. Adds event id to `in_flight_event_ids`. 3. Calls `scheduleEventProcessing()` (async). -4. `scheduleEventProcessing()` selects the event limiter and runs `processEvent()`. +4. `scheduleEventProcessing()` selects the event semaphore and runs `processEvent()`. 5. `processEvent()`: - `event.markStarted()` - `notifyFinders(event)` - creates handler results (`event_results`) - - runs handlers (respecting handler limiter) + - runs handlers (respecting handler semaphore) - decrements `event_pending_buses` and calls `event.tryFinalizeCompletion()` ### 2) Event concurrency modes (`event_concurrency`) -- **`global-serial`**: events are serialized across _all_ buses using the global event limiter. +- **`global-serial`**: events are serialized across _all_ buses using the global event semaphore. - **`bus-serial`**: events are serialized per bus; different buses can overlap. -- **`parallel`**: no event limiter; events can run concurrently on the same bus. +- **`parallel`**: no event semaphore; events can run concurrently on the same bus. - **`auto`**: resolves to the bus default. **Mixed buses:** each bus enforces its own event mode. Forwarding to another bus does not inherit the source bus’s mode. @@ -160,7 +160,7 @@ under different `event_concurrency` / `handler_concurrency` configurations. `handler_concurrency` controls how handlers run **for a single event**: -- **`global-serial`**: only one handler at a time across all buses using the global handler limiter. +- **`global-serial`**: only one handler at a time across all buses using the global handler semaphore. - **`bus-serial`**: handlers serialize per bus. - **`parallel`**: handlers run concurrently for the event. - **`auto`**: resolves to the bus default. @@ -175,23 +175,22 @@ When a handler on Bus A calls `bus_b.dispatch(event)` without awaiting: - Bus A continues running its handler. - Bus B queues and processes the event according to **Bus B’s** concurrency settings. -- No coupling unless both buses use the global limiters. +- No coupling unless both buses use the global semaphores. ### 5) Queue-jump (`await event.done()` inside handlers) When `event.done()` is awaited inside a handler, **queue-jump** happens: 1. `BaseEvent.done()` detects it's inside a handler and calls `_runImmediately()`. -2. `_runImmediately()` **yields** the parent handler's concurrency limiter (if held) so child handlers can acquire it. +2. `_runImmediately()` **yields** the parent handler's concurrency semaphore (if held) so child handlers can acquire it. 3. `_runImmediately()` removes the event from the pending queue (if present). 4. `runImmediatelyAcrossBuses()` processes the event immediately on all buses where it is queued. -5. While immediate processing is active, each affected bus increments `immediate_processing_stack_depth`, - and its `runloop()` pauses to prevent unrelated events from running. -6. Once immediate processing completes, `_runImmediately()` **re-acquires** the parent handler's limiter +5. While immediate processing is active, each affected bus's runloop is paused to prevent unrelated events from running. +6. Once immediate processing completes, `_runImmediately()` **re-acquires** the parent handler's semaphore (unless the parent timed out while the child was processing). -7. `immediate_processing_waiters` resume the paused runloops. +7. Paused runloops resume. -**Important:** queue-jump bypasses event limiters but **respects** handler limiters via yield-and-reacquire. +**Important:** queue-jump bypasses event semaphores but **respects** handler semaphores via yield-and-reacquire. This means queue-jumped handlers run serially on a `bus-serial` bus, not in parallel. ### 6) Precedence recap @@ -217,13 +216,13 @@ We need to know **which handler emitted a child** to correctly assign: In TS we do this by injecting a **BusScopedEvent** into handlers, which captures the active handler id and propagates it via `event_emitted_by_handler_id`. This keeps parentage deterministic even with nested awaits. -### B) Why `immediate_processing_stack_depth` exists +### B) Why runloop pausing exists When an event is awaited inside a handler, the event must **jump the queue**. If the runloop continues normally, it could process unrelated events ("overshoot"), breaking FIFO guarantees. -`immediate_processing_stack_depth` pauses the runloop while we run the awaited event immediately. Once the queue-jump completes, -the runloop resumes in FIFO order. This matches the Python behavior. +The `LockManager` pause mechanism (`requestPause`/`waitUntilResumed`) pauses the runloop while we run the awaited +event immediately. Once the queue-jump completes, the runloop resumes in FIFO order. This matches the Python behavior. ### C) BusScopedEvent: why it exists and how it works @@ -262,6 +261,6 @@ The core contract is preserved: - forwarding - await-inside-handler queue jump -But the **implementation details are different** because JS needs browser compatibility and lacks Python’s -contextvars + asyncio primitives. The stack, runloop pause, and BusScopedEvent proxy are the key differences -that make the behavior match in practice. +But the **implementation details are different** because JS needs browser compatibility and lacks Python's +contextvars + asyncio primitives. The `LockManager` (runloop pause + semaphore coordination), `HandlerLock` +(yield-and-reacquire), and `BusScopedEvent` proxy are the key differences that make the behavior match in practice. From 4d038a625fae3f6f30980a189834b6b126aaf65d Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Fri, 6 Feb 2026 15:22:25 -0800 Subject: [PATCH 52/79] LockManager consolidation --- bubus-ts/README.md | 20 +- bubus-ts/src/base_event.ts | 126 ++++--- bubus-ts/src/event_bus.ts | 337 +++++++++++------- bubus-ts/src/event_result.ts | 27 +- bubus-ts/src/index.ts | 2 +- bubus-ts/src/lock_manager.ts | 85 ++--- bubus-ts/tests/comprehensive_patterns.test.ts | 12 +- bubus-ts/tests/eventbus_basics.test.ts | 25 +- bubus-ts/tests/forwarding.test.ts | 4 +- bubus-ts/tests/locking.test.ts | 4 +- bubus-ts/tests/performance.test.ts | 8 +- bubus-ts/tests/timeout.test.ts | 114 +++++- 12 files changed, 480 insertions(+), 284 deletions(-) diff --git a/bubus-ts/README.md b/bubus-ts/README.md index 8b2cf82..f487dec 100644 --- a/bubus-ts/README.md +++ b/bubus-ts/README.md @@ -16,7 +16,7 @@ gotchas we uncovered while matching behavior. It intentionally does **not** re-d - Python uses a global re-entrant lock to let awaited events process immediately on every bus where they appear. - TS optionally uses `AsyncLocalStorage` on Node.js (auto-detected) to capture dispatch context, but falls back gracefully in browsers. -- `EventBus.instances` + the `LockManager` pause mechanism pauses each runloop and processes the same event immediately across buses. +- `EventBus._all_instances` + the `LockManager` pause mechanism pauses each runloop and processes the same event immediately across buses. ### 3) `event.bus` is a BusScopedEvent view @@ -108,10 +108,10 @@ If an event sets `handler_concurrency: "parallel"`, that wins even if a handler We use four semaphores: -- `EventBus.global_event_semaphore` -- `EventBus.global_handler_semaphore` -- `bus.bus_event_semaphore` -- `bus.bus_handler_semaphore` +- `LockManager.global_event_semaphore` +- `LockManager.global_handler_semaphore` +- `bus.locks.bus_event_semaphore` +- `bus.locks.bus_handler_semaphore` They are applied centrally when scheduling events and handlers, so concurrency is controlled without scattering mutex checks throughout the code. @@ -131,7 +131,7 @@ under different `event_concurrency` / `handler_concurrency` configurations. 4. If this bus is already in `event_path` (or `eventHasVisited()`), return a BusScopedEvent without queueing. 5. Append bus name to `event_path`, record child relationship (if `event_parent_id` is set). 6. Add to `event_history` (a `Map` keyed by event id). -7. Increment `event_pending_buses`. +7. Increment `event_pending_bus_count`. 8. Push to `pending_event_queue` and `startRunloop()`. **Runloop + processing:** @@ -145,11 +145,11 @@ under different `event_concurrency` / `handler_concurrency` configurations. - `notifyFinders(event)` - creates handler results (`event_results`) - runs handlers (respecting handler semaphore) - - decrements `event_pending_buses` and calls `event.tryFinalizeCompletion()` + - decrements `event_pending_bus_count` and calls `event.tryFinalizeCompletion()` ### 2) Event concurrency modes (`event_concurrency`) -- **`global-serial`**: events are serialized across _all_ buses using the global event semaphore. +- **`global-serial`**: events are serialized across _all_ buses using `LockManager.global_event_semaphore`. - **`bus-serial`**: events are serialized per bus; different buses can overlap. - **`parallel`**: no event semaphore; events can run concurrently on the same bus. - **`auto`**: resolves to the bus default. @@ -160,7 +160,7 @@ under different `event_concurrency` / `handler_concurrency` configurations. `handler_concurrency` controls how handlers run **for a single event**: -- **`global-serial`**: only one handler at a time across all buses using the global handler semaphore. +- **`global-serial`**: only one handler at a time across all buses using `LockManager.global_handler_semaphore`. - **`bus-serial`**: handlers serialize per bus. - **`parallel`**: handlers run concurrently for the event. - **`auto`**: resolves to the bus default. @@ -240,7 +240,7 @@ To prevent that: When you `await event.done()` inside a handler: -- the system finds all buses that have this event queued (using `EventBus.instances` + `event_path`) +- the system finds all buses that have this event queued (using `EventBus._all_instances` + `event_path`) - pauses their runloops - processes the event immediately on each bus - then resumes the runloops diff --git a/bubus-ts/src/base_event.ts b/bubus-ts/src/base_event.ts index cb292d3..4e5efac 100644 --- a/bubus-ts/src/base_event.ts +++ b/bubus-ts/src/base_event.ts @@ -63,29 +63,29 @@ type ZodShapeFrom> = { } export class BaseEvent { - static _last_timestamp_ms = 0 - event_id!: string - event_created_at!: string - event_type!: string - event_timeout!: number | null - event_parent_id?: string - event_path!: string[] - event_result_schema?: z.ZodTypeAny - event_result_type?: string + event_id!: string // unique uuidv7 identifier for the event + event_created_at!: string // ISO datetime string version of event_created_ts + event_created_ts!: number // nanosecond monotonic version of event_created_at + event_type!: string // should match the class name of the event, e.g. BaseEvent.extend("MyEvent").event_type === "MyEvent" + event_timeout!: number | null // maximum time in seconds that each handler for the event is allowed to run before it is aborted + event_parent_id?: string // id of the parent event that triggered this event, if this event was emitted during handling of another event + event_path!: string[] // list of bus names that the event has been dispatched to, including the current bus + event_result_schema?: z.ZodTypeAny // optional zod schema to enforce the shape of return values from handlers + event_result_type?: string // optional string identifier of the type of the return values from handlers, to make it easier to reference common shapes across networkboundaries e.g. ScreenshotEventResultType event_results!: Map - event_emitted_by_handler_id?: string - event_pending_buses!: number + event_emitted_by_handler_id?: string // if event was emitted inside a handler while it was running, this will be set to the enclosing handler's handler id + event_pending_bus_count!: number // Number of buses that have accepted this event and not yet finished processing or removed it from their queues. event_status!: 'pending' | 'started' | 'completed' event_started_at?: string + event_started_ts?: number event_completed_at?: string - _event_created_at_ts!: number - _event_started_at_ts?: number - _event_completed_at_ts?: number - bus?: EventBus + event_completed_ts?: number event_concurrency?: ConcurrencyMode handler_concurrency?: ConcurrencyMode - _original_event?: BaseEvent - _dispatch_context?: unknown | null + + bus?: EventBus // shortcut to the bus that dispatched this event, for event.bus.dispatch(event) auto-child tracking via proxy wrapping + _original_event?: BaseEvent // underlying event object that was dispatched, if this is a bus-scoped proxy wrapping it + _dispatch_context?: unknown | null // captured AsyncLocalStorage context at dispatch site, used to restore that context when running handlers static schema = BaseEventSchema static event_type?: string @@ -101,7 +101,8 @@ export class BaseEvent { const event_result_schema = (data.event_result_schema ?? ctor.event_result_schema) as z.ZodTypeAny | undefined const event_result_type = data.event_result_type ?? ctor.event_result_type const event_id = data.event_id ?? uuidv7() - const event_created_at = data.event_created_at ?? new Date().toISOString() + const { isostring: default_event_created_at, ts: event_created_ts } = BaseEvent.nextTimestamp() + const event_created_at = data.event_created_at ?? default_event_created_at const event_timeout = data.event_timeout ?? null const base_data = { @@ -121,36 +122,33 @@ export class BaseEvent { const parsed_path = (parsed as { event_path?: string[] }).event_path this.event_path = Array.isArray(parsed_path) ? [...parsed_path] : [] - this.event_pending_buses = 0 + this.event_pending_bus_count = 0 this.event_status = 'pending' this.event_result_schema = event_result_schema this.event_result_type = event_result_type this.event_results = new Map() - this._event_created_at_ts = monotonicNowMs() + this.event_created_ts = event_created_ts this._done = null this._dispatch_context = undefined } - static nextIsoTimestamp(): string { - const now_ms = Date.now() - const next_ms = Math.max(now_ms, BaseEvent._last_timestamp_ms + 1) - BaseEvent._last_timestamp_ms = next_ms - return new Date(next_ms).toISOString() + static nextTimestamp(): { date: Date; isostring: string; ts: number } { + const ts = performance.now() + const date = new Date(performance.timeOrigin + ts) + return { date, isostring: date.toISOString(), ts } } - static extend(shape: TShape): EventFactory - static extend>(shape: TShape): EventFactory> - static extend>(event_type: string, shape: TShape): EventFactory> - static extend>(arg1: string | TShape, arg2?: TShape): EventFactory> { - const event_type = typeof arg1 === 'string' ? arg1 : undefined - const raw_shape = (typeof arg1 === 'string' ? (arg2 ?? {}) : arg1) as Record + static extend(event_type: string, shape?: TShape): EventFactory + static extend>(event_type: string, shape?: TShape): EventFactory> + static extend>(event_type: string, shape: TShape = {} as TShape): EventFactory> { + const raw_shape = shape as Record const event_result_schema = is_zod_schema(raw_shape.event_result_schema) ? (raw_shape.event_result_schema as z.ZodTypeAny) : undefined const event_result_type = typeof raw_shape.event_result_type === 'string' ? raw_shape.event_result_type : undefined - const shape = extract_zod_shape(raw_shape) - const full_schema = BaseEventSchema.extend(shape) + const zod_shape = extract_zod_shape(raw_shape) + const full_schema = BaseEventSchema.extend(zod_shape) class ExtendedEvent extends BaseEvent { static schema = full_schema as unknown as typeof BaseEvent.schema @@ -215,6 +213,7 @@ export class BaseEvent { } } + // get all direct children of this event get event_children(): BaseEvent[] { const children: BaseEvent[] = [] const seen = new Set() @@ -229,6 +228,35 @@ export class BaseEvent { return children } + // get all children grandchildren etc. recursively + get event_descendants(): BaseEvent[] { + const descendants: BaseEvent[] = []; + const visited = new Set(); + const root_id = this.event_id; + const stack = [...this.event_children]; + + while (stack.length > 0) { + const child = stack.pop(); + if (!child) { + continue; + } + const child_id = child.event_id; + if (child_id === root_id) { + continue; + } + if (visited.has(child_id)) { + continue; + } + visited.add(child_id); + descendants.push(child); + if (child.event_children.length > 0) { + stack.push(...child.event_children); + } + } + + return descendants; + } + done(): Promise { if (!this.bus) { return Promise.reject(new Error('event has no bus attached')) @@ -258,8 +286,9 @@ export class BaseEvent { return } this.event_status = 'started' - this.event_started_at = BaseEvent.nextIsoTimestamp() - this._event_started_at_ts = monotonicNowMs() + const { isostring: event_started_at, ts: event_started_ts } = BaseEvent.nextTimestamp() + this.event_started_at = event_started_at + this.event_started_ts = event_started_ts } markCompleted(): void { @@ -267,8 +296,9 @@ export class BaseEvent { return } this.event_status = 'completed' - this.event_completed_at = BaseEvent.nextIsoTimestamp() - this._event_completed_at_ts = monotonicNowMs() + const { isostring: event_completed_at, ts: event_completed_ts } = BaseEvent.nextTimestamp() + this.event_completed_at = event_completed_at + this.event_completed_ts = event_completed_ts this._dispatch_context = null this.ensureDonePromise() this._done!.resolve(this) @@ -285,16 +315,9 @@ export class BaseEvent { return errors } - eventAreAllChildrenComplete(visited: Set = new Set()): boolean { - if (visited.has(this.event_id)) { - return true - } - visited.add(this.event_id) - for (const child of this.event_children) { - if (child.event_status !== 'completed') { - return false - } - if (!child.eventAreAllChildrenComplete(visited)) { + eventAreAllChildrenComplete(): boolean { + for (const descendant of this.event_descendants) { + if (descendant.event_status !== 'completed') { return false } } @@ -302,7 +325,7 @@ export class BaseEvent { } tryFinalizeCompletion(): void { - if (this.event_pending_buses > 0) { + if (this.event_pending_bus_count > 0) { return } if (!this.eventAreAllChildrenComplete()) { @@ -359,10 +382,3 @@ const to_json_schema = (schema: unknown): unknown => { } return undefined } - -const monotonicNowMs = (): number => { - if (typeof performance !== 'undefined' && typeof performance.now === 'function') { - return performance.now() - } - return Date.now() -} diff --git a/bubus-ts/src/event_bus.ts b/bubus-ts/src/event_bus.ts index bfe00fa..d81a6cf 100644 --- a/bubus-ts/src/event_bus.ts +++ b/bubus-ts/src/event_bus.ts @@ -4,74 +4,141 @@ import { captureAsyncContext, runWithAsyncContext } from './async_context.js' import { v5 as uuidv5 } from 'uuid' import { AsyncSemaphore, type ConcurrencyMode, HandlerLock, LockManager, runWithSemaphore, withResolvers } from './lock_manager.js' -const monotonicNowMs = (): number => { - if (typeof performance !== 'undefined' && typeof performance.now === 'function') { - return performance.now() +export class TimeoutError extends Error { + constructor(message: string) { + super(message) + this.name = 'TimeoutError' } - return Date.now() } -export class EventHandlerTimeoutError extends Error { - event_type: string - handler_name: string - timeout_seconds: number +export class EventHandlerError extends Error { + event_result: EventResult + timeout_seconds: number | null + cause: Error - constructor(message: string, params: { event_type: string; handler_name: string; timeout_seconds: number }) { + constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause: Error }) { super(message) - this.name = 'EventHandlerTimeoutError' - this.event_type = params.event_type - this.handler_name = params.handler_name - this.timeout_seconds = params.timeout_seconds + this.name = 'EventHandlerError' + this.event_result = params.event_result + this.cause = params.cause + this.timeout_seconds = params.timeout_seconds ?? this.event_result.event?.event_timeout ?? null + } + + get event(): BaseEvent { + return this.event_result.event! + } + + get event_type(): string { + return this.event.event_type + } + + get handler_name(): string { + return this.event_result.handler_name + } + + get handler_id(): string { + return this.event_result.handler_id + } + + get event_timeout(): number | null { + return this.event.event_timeout } } -export class EventHandlerCancelledError extends Error { - event_type: string - handler_name: string - parent_error: Error +// EventHandlerTimeoutError: when the handler itself timed out while executing (due to event.event_timeout being exceeded) +export class EventHandlerTimeoutError extends EventHandlerError { + constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause?: Error }) { + super(message, { + event_result: params.event_result, + timeout_seconds: params.timeout_seconds, + cause: params.cause ?? new TimeoutError(message), + }) + this.name = 'EventHandlerTimeoutError' + } +} - constructor(message: string, params: { event_type: string; handler_name: string; parent_error: Error }) { - super(message) +// EventHandlerCancelledError: when a pending handler was cancelled and never run due to an error (e.g. timeout) in a parent scope +export class EventHandlerCancelledError extends EventHandlerError { + constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause: Error }) { + super(message, params) this.name = 'EventHandlerCancelledError' - this.event_type = params.event_type - this.handler_name = params.handler_name - this.parent_error = params.parent_error } } -export class EventHandlerAbortedError extends Error { - event_type: string - handler_name: string - parent_error: Error - event_result: EventResult - - constructor(message: string, params: { event_type: string; handler_name: string; parent_error: Error; event_result: EventResult }) { - super(message) +// EventHandlerAbortedError: when a handler that was already running was aborted due to an error in the parent scope, not due to an error in its own logic / exceeding its own timeout +export class EventHandlerAbortedError extends EventHandlerError { + constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause: Error }) { + super(message, params) this.name = 'EventHandlerAbortedError' - this.event_type = params.event_type - this.handler_name = params.handler_name - this.parent_error = params.parent_error - this.event_result = params.event_result } } import type { EventHandler, EventKey, FindOptions, HandlerOptions } from './types.js' type FindWaiter = { + // similar to a handler, except its for .find() calls + // needs to be different because it's resolved on dispatch not event processing time + // also is ephemeral, gets unregistered the moment it resolves and + // doesnt show up in event processing tree, doesn't block runloop, etc. event_key: EventKey matches: (event: BaseEvent) => boolean resolve: (event: BaseEvent) => void timeout_id?: ReturnType } -type HandlerEntry = { - id: string +class HandlerEntry { + // an entry in the list of handlers that are registered on a bus + id: string // unique uuidv5 based on hash of bus name, handler name, handler file path:lineno, registered at timestamp, and event key handler: EventHandler handler_name: string handler_file_path?: string handler_registered_at: string + handler_registered_ts: number options?: HandlerOptions event_key: string | '*' + + constructor(params: { + id: string + handler: EventHandler + handler_name: string + handler_file_path?: string + handler_registered_at: string + handler_registered_ts: number + options?: HandlerOptions + event_key: string | '*' + }) { + this.id = params.id + this.handler = params.handler + this.handler_name = params.handler_name + this.handler_file_path = params.handler_file_path + this.handler_registered_at = params.handler_registered_at + this.handler_registered_ts = params.handler_registered_ts + this.options = params.options + this.event_key = params.event_key + } + + static computeHandlerId(params: { + bus_name: string + handler_name: string + handler_file_path?: string + handler_registered_at: string + event_key: string | '*' + }): string { + const file_path = HandlerEntry.normalizeHandlerFilePath(params.handler_file_path) + const seed = `${params.bus_name}|${params.handler_name}|${file_path}|${params.handler_registered_at}|${params.event_key}` + return uuidv5(seed, HANDLER_ID_NAMESPACE) + } + + private static normalizeHandlerFilePath(file_path?: string): string { + if (!file_path) { + return 'unknown' + } + const match = file_path.match(/^(.*?):(\d+)(?::\d+)?$/) + if (match) { + return `${match[1]}:${match[2]}` + } + return file_path + } } const HANDLER_ID_NAMESPACE = uuidv5('bubus-handler', uuidv5.DNS) @@ -83,7 +150,7 @@ type EventBusOptions = { event_timeout?: number | null } -class EventBusInstanceRegistry { +class GlobalEventBusInstanceRegistry { private _refs = new Set>() private _lookup = new WeakMap>() private _gc = @@ -128,11 +195,9 @@ class EventBusInstanceRegistry { } export class EventBus { - static instances = new EventBusInstanceRegistry() - static global_event_semaphore = new AsyncSemaphore(1) - static global_handler_semaphore = new AsyncSemaphore(1) + static _all_instances = new GlobalEventBusInstanceRegistry() static findEventById(event_id: string): BaseEvent | null { - for (const bus of EventBus.instances) { + for (const bus of EventBus._all_instances) { const event = bus.event_history.get(event_id) if (event) { return event @@ -146,8 +211,6 @@ export class EventBus { event_concurrency_default: ConcurrencyMode handler_concurrency_default: ConcurrencyMode event_timeout_default: number | null - bus_event_semaphore: AsyncSemaphore - bus_handler_semaphore: AsyncSemaphore handlers: Map event_history: Map pending_event_queue: BaseEvent[] @@ -163,33 +226,22 @@ export class EventBus { this.event_concurrency_default = options.event_concurrency ?? 'bus-serial' this.handler_concurrency_default = options.handler_concurrency ?? 'bus-serial' this.event_timeout_default = options.event_timeout === undefined ? 60 : options.event_timeout - this.bus_event_semaphore = new AsyncSemaphore(1) - this.bus_handler_semaphore = new AsyncSemaphore(1) this.handlers = new Map() this.event_history = new Map() this.pending_event_queue = [] this.in_flight_event_ids = new Set() this.runloop_running = false - this.locks = new LockManager({ - get_idle_snapshot: () => - this.pending_event_queue.length === 0 && this.in_flight_event_ids.size === 0 && !this.hasPendingResults() && !this.runloop_running, - get_event_concurrency_default: () => this.event_concurrency_default, - get_handler_concurrency_default: () => this.handler_concurrency_default, - get_bus_event_semaphore: () => this.bus_event_semaphore, - get_bus_handler_semaphore: () => this.bus_handler_semaphore, - get_global_event_semaphore: () => EventBus.global_event_semaphore, - get_global_handler_semaphore: () => EventBus.global_handler_semaphore, - }) + this.locks = new LockManager(this) this.find_waiters = new Set() - EventBus.instances.add(this) + EventBus._all_instances.add(this) this.dispatch = this.dispatch.bind(this) this.emit = this.emit.bind(this) } destroy(): void { - EventBus.instances.delete(this) + EventBus._all_instances.delete(this) this.handlers.clear() for (const event of this.event_history.values()) { event._gc() @@ -205,18 +257,28 @@ export class EventBus { const normalized_key = this.normalizeEventKey(event_key) const handler_name = handler.name || 'anonymous' const handler_file_path = this.inferHandlerFilePath() ?? undefined - const handler_registered_at = BaseEvent.nextIsoTimestamp() - const handler_id = this.computeHandlerId(normalized_key, handler_name, handler_file_path, handler_registered_at) - - this.handlers.set(handler_id, { - id: handler_id, - handler: handler as EventHandler, + const { isostring: handler_registered_at, ts: handler_registered_ts } = BaseEvent.nextTimestamp() + const handler_id = HandlerEntry.computeHandlerId({ + bus_name: this.name, handler_name, handler_file_path, handler_registered_at, - options: Object.keys(options).length > 0 ? options : undefined, event_key: normalized_key, }) + + this.handlers.set( + handler_id, + new HandlerEntry({ + id: handler_id, + handler: handler as EventHandler, + handler_name, + handler_file_path, + handler_registered_at, + handler_registered_ts, + options: Object.keys(options).length > 0 ? options : undefined, + event_key: normalized_key, + }) + ) } off(event_key: EventKey | '*', handler?: EventHandler | string): void { @@ -232,26 +294,19 @@ export class EventBus { } } - private computeHandlerId( - event_key: string | '*', - handler_name: string, - handler_file_path: string | undefined, - handler_registered_at: string - ): string { - const file_path = handler_file_path ?? 'unknown' - const seed = `${this.name}|${event_key}|${handler_name}|${file_path}|${handler_registered_at}` - return uuidv5(seed, HANDLER_ID_NAMESPACE) - } - dispatch(event: T, _event_key?: EventKey): T { - const original_event = event._original_event ?? event + const original_event = event._original_event ?? event // if event is a bus-scoped proxy already, get the original underlying event object if (!original_event.bus) { + // if we are the first bus to dispatch this event, set the bus property on the original event object original_event.bus = this } if (!Array.isArray(original_event.event_path)) { original_event.event_path = [] } if (original_event._dispatch_context === undefined) { + // when used in fastify/nextjs/other contexts with tracing based on AsyncLocalStorage in node + // we want to capture the context at the dispatch site and use it when running handlers + // because events may be handled async in a separate context than the dispatch site original_event._dispatch_context = captureAsyncContext() } if (original_event.event_timeout === null) { @@ -276,7 +331,7 @@ export class EventBus { this.event_history.set(original_event.event_id, original_event) this.trimHistory() - original_event.event_pending_buses += 1 + original_event.event_pending_bus_count += 1 this.pending_event_queue.push(original_event) this.startRunloop() @@ -327,7 +382,7 @@ export class EventBus { } if (past !== false || future !== false) { - const now_ms = Date.now() + const now_ms = performance.timeOrigin + performance.now() const cutoff_ms = past === true ? null : now_ms - Math.max(0, Number(past)) * 1000 const history_values = Array.from(this.event_history.values()) @@ -426,7 +481,7 @@ export class EventBus { await this.locks.waitForIdle() } - private hasPendingResults(): boolean { + hasPendingResults(): boolean { for (const event of this.event_history.values()) { for (const result of event.event_results.values()) { if (result.eventbus_name !== this.name) { @@ -480,22 +535,14 @@ export class EventBus { } logTree(): string { - const parent_to_children = new Map() + const parent_to_children = new Map() - const add_child = (parent_id: string | null, child: BaseEvent): void => { + const add_child = (parent_id: string, child: BaseEvent): void => { const existing = parent_to_children.get(parent_id) ?? [] existing.push(child) parent_to_children.set(parent_id, existing) } - for (const event of this.event_history.values()) { - add_child(event.event_parent_id ?? null, event) - } - - for (const children of parent_to_children.values()) { - children.sort((a, b) => (a.event_created_at < b.event_created_at ? -1 : a.event_created_at > b.event_created_at ? 1 : 0)) - } - const root_events: BaseEvent[] = [] const seen = new Set() @@ -513,6 +560,31 @@ export class EventBus { return '(No events in history)' } + const nodes_by_id = new Map() + for (const root of root_events) { + nodes_by_id.set(root.event_id, root) + for (const descendant of root.event_descendants) { + nodes_by_id.set(descendant.event_id, descendant) + } + } + + for (const node of nodes_by_id.values()) { + const parent_id = node.event_parent_id + if (!parent_id || parent_id === node.event_id) { + continue + } + if (!nodes_by_id.has(parent_id)) { + continue + } + add_child(parent_id, node) + } + + for (const children of parent_to_children.values()) { + children.sort((a, b) => + a.event_created_at < b.event_created_at ? -1 : a.event_created_at > b.event_created_at ? 1 : 0 + ) + } + const lines: string[] = [] lines.push(`πŸ“Š Event History Tree for ${this.name}`) lines.push('='.repeat(80)) @@ -560,7 +632,7 @@ export class EventBus { // Event semaphore bypass: the initiating bus (this) always bypasses its event semaphore // since we're inside a handler that already holds it. Other buses only bypass if // they resolve to the same semaphore instance (i.e. global-serial mode where all - // buses share EventBus.global_event_semaphore). + // buses share LockManager.global_event_semaphore). // // Handler semaphores are NOT bypassed β€” child handlers must acquire the handler // semaphore normally. This works because _runImmediately already released the @@ -594,7 +666,7 @@ export class EventBus { // Bypass event semaphore on the initiating bus (we're already inside a handler // that acquired it). For other buses, only bypass if they resolve to the same - // semaphore instance (global-serial shares one semaphore across all buses). + // semaphore instance (global-serial shares one semaphore across all buses). const bus_event_semaphore = bus.locks.getSemaphoreForEvent(event) const should_bypass_event_semaphore = bus === this || (initiating_event_semaphore !== null && bus_event_semaphore === initiating_event_semaphore) @@ -620,7 +692,7 @@ export class EventBus { const event_path = Array.isArray(event.event_path) ? event.event_path : [] for (const name of event_path) { - for (const bus of EventBus.instances) { + for (const bus of EventBus._all_instances) { if (bus.name !== name) { continue } @@ -741,8 +813,8 @@ export class EventBus { if (event.event_status === 'completed') { return } - const started_at_ts = event._event_started_at_ts ?? event._event_created_at_ts ?? monotonicNowMs() - const elapsed_ms = Math.max(0, monotonicNowMs() - started_at_ts) + const started_ts = event.event_started_ts ?? event.event_created_ts ?? performance.now() + const elapsed_ms = Math.max(0, performance.now() - started_ts) const elapsed_seconds = (elapsed_ms / 1000).toFixed(1) console.warn( `[bubus] Possible deadlock: ${event.event_type}#${event.event_id} still ${event.event_status} on ${this.name} after ${elapsed_seconds}s (timeout ${event.event_timeout}s)` @@ -755,7 +827,7 @@ export class EventBus { const handler_promises = handler_entries.map((entry) => this.runHandlerEntry(event, entry.handler, entry.result, entry.options)) await Promise.all(handler_promises) - event.event_pending_buses = Math.max(0, event.event_pending_buses - 1) + event.event_pending_bus_count = Math.max(0, event.event_pending_bus_count - 1) event.tryFinalizeCompletion() if (event.event_status === 'completed') { this.notifyParentsFor(event) @@ -793,7 +865,7 @@ export class EventBus { result.markStarted() const abort_promise = result.ensureAbortSignal() const handler_result = await Promise.race([ - this.runHandlerWithTimeout(event, handler, handler_event), + this.runHandlerWithTimeout(event, handler, handler_event, result), abort_promise, ]) if (event.event_result_schema) { @@ -810,12 +882,7 @@ export class EventBus { } catch (error) { if (error instanceof EventHandlerTimeoutError) { result.markError(error) - const cancelled_error = new EventHandlerCancelledError(`Cancelled pending handler due to parent timeout: ${error.message}`, { - event_type: event.event_type, - handler_name: result.handler_name, - parent_error: error, - }) - this.cancelPendingDescendants(event, cancelled_error) + this.cancelPendingDescendants(event, error) } else { result.markError(error) } @@ -827,14 +894,19 @@ export class EventBus { } } - private async runHandlerWithTimeout(event: BaseEvent, handler: EventHandler, handler_event: BaseEvent = event): Promise { + private async runHandlerWithTimeout( + event: BaseEvent, + handler: EventHandler, + handler_event: BaseEvent = event, + result: EventResult + ): Promise { const handler_name = handler.name || 'anonymous' const warn_ms = 15000 - const started_at_ms = Date.now() + const started_at_ms = performance.now() const should_warn = event.event_timeout === null || event.event_timeout * 1000 > warn_ms const warn_timer = should_warn ? setTimeout(() => { - const elapsed_ms = Date.now() - started_at_ms + const elapsed_ms = performance.now() - started_at_ms const elapsed_seconds = (elapsed_ms / 1000).toFixed(1) console.warn(`[bubus] Slow handler: ${event.event_type}.${handler_name} running ${elapsed_seconds}s on ${this.name}`) }, warn_ms) @@ -872,8 +944,7 @@ export class EventBus { const timer = setTimeout(() => { finalize(reject)( new EventHandlerTimeoutError(`handler ${handler_name} timed out after ${timeout_seconds}s`, { - event_type: event.event_type, - handler_name, + event_result: result, timeout_seconds, }) ) @@ -970,7 +1041,7 @@ export class EventBus { } cancelPendingDescendants(event: BaseEvent, reason: unknown): void { - const cancellation_error = this.normalizeCancellationError(event, reason) + const cancellation_cause = this.normalizeCancellationCause(reason) const visited = new Set() const cancel_child = (child: BaseEvent): void => { const original_child = child._original_event ?? child @@ -987,11 +1058,11 @@ export class EventBus { const path = Array.isArray(original_child.event_path) ? original_child.event_path : [] const buses_to_cancel = new Set(path) - for (const bus of EventBus.instances) { + for (const bus of EventBus._all_instances) { if (!buses_to_cancel.has(bus.name)) { continue } - bus.cancelEventOnBus(original_child, cancellation_error) + bus.cancelEventOnBus(original_child, cancellation_cause) } // Force-complete the child event. In JS we can't stop running async @@ -1008,26 +1079,27 @@ export class EventBus { } } - private normalizeCancellationError(event: BaseEvent, reason: unknown): EventHandlerCancelledError { - if (reason instanceof EventHandlerCancelledError) { + private normalizeCancellationCause(reason: unknown): Error { + if (reason instanceof EventHandlerCancelledError || reason instanceof EventHandlerAbortedError) { + return reason.cause instanceof Error ? reason.cause : reason + } + if (reason instanceof EventHandlerTimeoutError) { return reason } - - const parent_error = reason instanceof Error ? reason : new Error(String(reason)) - return new EventHandlerCancelledError(`Cancelled pending handler due to ancestor cancellation: ${parent_error.message}`, { - event_type: event.event_type, - handler_name: 'unknown', - parent_error, - }) + return reason instanceof Error ? reason : new Error(String(reason)) } - private cancelEventOnBus(event: BaseEvent, error: EventHandlerCancelledError): void { + private cancelEventOnBus(event: BaseEvent, cause: Error): void { const original_event = event._original_event ?? event const handler_entries = this.createPendingHandlerResults(original_event) let updated = false for (const entry of handler_entries) { if (entry.result.status === 'pending') { - entry.result.markError(error) + const cancelled_error = new EventHandlerCancelledError(`Cancelled pending handler due to parent error: ${cause.message}`, { + event_result: entry.result, + cause, + }) + entry.result.markError(cancelled_error) updated = true } else if (entry.result.status === 'started') { // Abort running handlers. In JS we can't actually stop a running async @@ -1040,15 +1112,10 @@ export class EventBus { // reacquire path auto-releases when it wakes. entry.result._lock?.exitHandlerRun() - const aborted_error = new EventHandlerAbortedError( - `Aborted running handler due to parent timeout: ${error.message}`, - { - event_type: original_event.event_type, - handler_name: entry.result.handler_name, - parent_error: error.parent_error, - event_result: entry.result, - } - ) + const aborted_error = new EventHandlerAbortedError(`Aborted running handler due to parent error: ${cause.message}`, { + event_result: entry.result, + cause, + }) entry.result.markError(aborted_error) entry.result.signalAbort(aborted_error) updated = true @@ -1065,7 +1132,7 @@ export class EventBus { } if (removed > 0 && !this.in_flight_event_ids.has(original_event.event_id)) { - original_event.event_pending_buses = Math.max(0, original_event.event_pending_buses - 1) + original_event.event_pending_bus_count = Math.max(0, original_event.event_pending_bus_count - 1) } if (updated || removed > 0) { @@ -1080,7 +1147,7 @@ export class EventBus { event: BaseEvent, indent: string, is_last: boolean, - parent_to_children: Map, + parent_to_children: Map, visited: Set ): string { const connector = is_last ? '└── ' : 'β”œβ”€β”€ ' @@ -1152,7 +1219,7 @@ export class EventBus { result: EventResult, indent: string, is_last: boolean, - parent_to_children: Map, + parent_to_children: Map, visited: Set ): string { const connector = is_last ? '└── ' : 'β”œβ”€β”€ ' @@ -1301,6 +1368,9 @@ export class EventBus { const handlers = this.collectHandlers(event) return handlers.map(({ handler_id, handler, handler_name, handler_file_path, options }) => { const existing = event.event_results.get(handler_id) + if (existing && !existing.event) { + existing.event = event + } const result = existing ?? new EventResult({ @@ -1309,6 +1379,7 @@ export class EventBus { handler_name, handler_file_path, eventbus_name: this.name, + event, }) if (!existing) { event.event_results.set(handler_id, result) diff --git a/bubus-ts/src/event_result.ts b/bubus-ts/src/event_result.ts index 6a47d68..364af01 100644 --- a/bubus-ts/src/event_result.ts +++ b/bubus-ts/src/event_result.ts @@ -1,6 +1,6 @@ import { v7 as uuidv7 } from 'uuid' -import type { BaseEvent } from './base_event.js' +import { BaseEvent } from './base_event.js' import { HandlerLock, withResolvers } from './lock_manager.js' import type { Deferred } from './lock_manager.js' @@ -9,13 +9,16 @@ export type EventResultStatus = 'pending' | 'started' | 'completed' | 'error' export class EventResult { id: string status: EventResultStatus + event?: BaseEvent event_id: string handler_id: string handler_name: string handler_file_path?: string eventbus_name: string started_at?: string + started_ts?: number completed_at?: string + completed_ts?: number result?: unknown error?: unknown event_children: BaseEvent[] @@ -27,9 +30,17 @@ export class EventResult { // _runImmediately for yield-and-reacquire during queue-jumps. _lock: HandlerLock | null - constructor(params: { event_id: string; handler_id: string; handler_name: string; handler_file_path?: string; eventbus_name: string }) { + constructor(params: { + event_id: string + handler_id: string + handler_name: string + handler_file_path?: string + eventbus_name: string + event?: BaseEvent + }) { this.id = uuidv7() this.status = 'pending' + this.event = params.event this.event_id = params.event_id this.handler_id = params.handler_id this.handler_name = params.handler_name @@ -59,20 +70,26 @@ export class EventResult { markStarted(): void { this.status = 'started' - this.started_at = new Date().toISOString() + const { isostring: started_at, ts: started_ts } = BaseEvent.nextTimestamp() + this.started_at = started_at + this.started_ts = started_ts } markCompleted(result: unknown): void { if (this.status === 'completed' || this.status === 'error') return this.status = 'completed' this.result = result - this.completed_at = new Date().toISOString() + const { isostring: completed_at, ts: completed_ts } = BaseEvent.nextTimestamp() + this.completed_at = completed_at + this.completed_ts = completed_ts } markError(error: unknown): void { if (this.status === 'completed' || this.status === 'error') return this.status = 'error' this.error = error - this.completed_at = new Date().toISOString() + const { isostring: completed_at, ts: completed_ts } = BaseEvent.nextTimestamp() + this.completed_at = completed_at + this.completed_ts = completed_ts } } diff --git a/bubus-ts/src/index.ts b/bubus-ts/src/index.ts index f57b2ea..b2f9a5d 100644 --- a/bubus-ts/src/index.ts +++ b/bubus-ts/src/index.ts @@ -1,5 +1,5 @@ export { BaseEvent, BaseEventSchema } from './base_event.js' export { EventResult } from './event_result.js' export { EventBus, EventHandlerTimeoutError, EventHandlerCancelledError, EventHandlerAbortedError } from './event_bus.js' -export type { ConcurrencyMode } from './lock_manager.js' +export type { ConcurrencyMode, EventBusInterfaceForLockManager } from './lock_manager.js' export type { EventClass, EventHandler, EventKey, HandlerOptions, EventStatus, FindOptions, FindWindow } from './types.js' diff --git a/bubus-ts/src/lock_manager.ts b/bubus-ts/src/lock_manager.ts index 3d0f278..58c288f 100644 --- a/bubus-ts/src/lock_manager.ts +++ b/bubus-ts/src/lock_manager.ts @@ -168,42 +168,36 @@ export class HandlerLock { // ─── LockManager ───────────────────────────────────────────────────────────── -type LockManagerOptions = { - get_idle_snapshot: () => boolean - get_event_concurrency_default: () => ConcurrencyMode - get_handler_concurrency_default: () => ConcurrencyMode - get_bus_event_semaphore: () => AsyncSemaphore - get_bus_handler_semaphore: () => AsyncSemaphore - get_global_event_semaphore: () => AsyncSemaphore - get_global_handler_semaphore: () => AsyncSemaphore +export type EventBusInterfaceForLockManager = { + pending_event_queue: BaseEvent[] + in_flight_event_ids: Set + runloop_running: boolean + hasPendingResults: () => boolean + event_concurrency_default: ConcurrencyMode + handler_concurrency_default: ConcurrencyMode } export class LockManager { - private get_idle_snapshot: () => boolean - private get_event_concurrency_default: () => ConcurrencyMode - private get_handler_concurrency_default: () => ConcurrencyMode - private get_bus_event_semaphore: () => AsyncSemaphore - private get_bus_handler_semaphore: () => AsyncSemaphore - private get_global_event_semaphore: () => AsyncSemaphore - private get_global_handler_semaphore: () => AsyncSemaphore - - private pause_depth: number - private pause_waiters: Array<() => void> - private queue_jump_pause_releases: WeakMap void> - private active_handler_results: EventResult[] - - private idle_waiters: Array<() => void> - private idle_check_pending: boolean - private idle_check_streak: number - - constructor(options: LockManagerOptions) { - this.get_idle_snapshot = options.get_idle_snapshot - this.get_event_concurrency_default = options.get_event_concurrency_default - this.get_handler_concurrency_default = options.get_handler_concurrency_default - this.get_bus_event_semaphore = options.get_bus_event_semaphore - this.get_bus_handler_semaphore = options.get_bus_handler_semaphore - this.get_global_event_semaphore = options.get_global_event_semaphore - this.get_global_handler_semaphore = options.get_global_handler_semaphore + static global_event_semaphore = new AsyncSemaphore(1) + static global_handler_semaphore = new AsyncSemaphore(1) + + private bus: EventBusInterfaceForLockManager // Live bus reference; used to read defaults and idle state. + readonly bus_event_semaphore: AsyncSemaphore // Per-bus event semaphore; created with LockManager and never swapped. + readonly bus_handler_semaphore: AsyncSemaphore // Per-bus handler semaphore; created with LockManager and never swapped. + + private pause_depth: number // Re-entrant pause counter; increments on requestPause, decrements on release. + private pause_waiters: Array<() => void> // Resolvers for waitUntilResumed; drained when pause_depth hits 0. + private queue_jump_pause_releases: WeakMap void> // Per-handler pause release for queue-jump; cleared on handler exit. + private active_handler_results: EventResult[] // Stack of active handler results for "inside handler" detection. + + private idle_waiters: Array<() => void> // Resolvers waiting for stable idle; cleared when idle confirmed. + private idle_check_pending: boolean // Debounce flag to avoid scheduling redundant idle checks. + private idle_check_streak: number // Counts consecutive idle checks; used to require two ticks of idle. + + constructor(bus: EventBusInterfaceForLockManager) { + this.bus = bus + this.bus_event_semaphore = new AsyncSemaphore(1) + this.bus_handler_semaphore = new AsyncSemaphore(1) this.pause_depth = 0 this.pause_waiters = [] @@ -284,7 +278,7 @@ export class LockManager { } waitForIdle(): Promise { - if (this.get_idle_snapshot()) { + if (this.getIdleSnapshot()) { return Promise.resolve() } return new Promise((resolve) => { @@ -294,7 +288,7 @@ export class LockManager { } notifyIdleListeners(): void { - if (!this.get_idle_snapshot()) { + if (!this.getIdleSnapshot()) { this.idle_check_streak = 0 if (this.idle_waiters.length > 0) { this.scheduleIdleCheck() @@ -319,17 +313,18 @@ export class LockManager { } getSemaphoreForEvent(event: BaseEvent): AsyncSemaphore | null { - const resolved = resolveConcurrencyMode(event.event_concurrency, this.get_event_concurrency_default()) - return semaphoreForMode(resolved, this.get_global_event_semaphore(), this.get_bus_event_semaphore()) + const resolved = resolveConcurrencyMode(event.event_concurrency, this.bus.event_concurrency_default) + return semaphoreForMode(resolved, LockManager.global_event_semaphore, this.bus_event_semaphore) } getSemaphoreForHandler(event: BaseEvent, options?: HandlerOptions): AsyncSemaphore | null { - const event_override = event.handler_concurrency && event.handler_concurrency !== 'auto' ? event.handler_concurrency : undefined + const event_override = + event.handler_concurrency && event.handler_concurrency !== 'auto' ? event.handler_concurrency : undefined const handler_override = options?.handler_concurrency && options.handler_concurrency !== 'auto' ? options.handler_concurrency : undefined - const fallback = this.get_handler_concurrency_default() + const fallback = this.bus.handler_concurrency_default const resolved = resolveConcurrencyMode(event_override ?? handler_override ?? fallback, fallback) - return semaphoreForMode(resolved, this.get_global_handler_semaphore(), this.get_bus_handler_semaphore()) + return semaphoreForMode(resolved, LockManager.global_handler_semaphore, this.bus_handler_semaphore) } clear(): void { @@ -352,4 +347,14 @@ export class LockManager { this.notifyIdleListeners() }, 0) } + + // Compute instantaneous idle snapshot from live bus state; used to gate waiters. + private getIdleSnapshot(): boolean { + return ( + this.bus.pending_event_queue.length === 0 && + this.bus.in_flight_event_ids.size === 0 && + !this.bus.hasPendingResults() && + !this.bus.runloop_running + ) + } } diff --git a/bubus-ts/tests/comprehensive_patterns.test.ts b/bubus-ts/tests/comprehensive_patterns.test.ts index 1358f52..ef5dec9 100644 --- a/bubus-ts/tests/comprehensive_patterns.test.ts +++ b/bubus-ts/tests/comprehensive_patterns.test.ts @@ -236,12 +236,12 @@ test('awaited child jumps queue without overshoot', async () => { const event2_from_history = history_list.find((event) => event.event_type === 'Event2') const event3_from_history = history_list.find((event) => event.event_type === 'Event3') - assert.ok(child_event?.event_started_at) - assert.ok(event2_from_history?.event_started_at) - assert.ok(event3_from_history?.event_started_at) + assert.ok(child_event?.event_started_ts !== undefined) + assert.ok(event2_from_history?.event_started_ts !== undefined) + assert.ok(event3_from_history?.event_started_ts !== undefined) - assert.ok(child_event!.event_started_at! < event2_from_history!.event_started_at!) - assert.ok(child_event!.event_started_at! < event3_from_history!.event_started_at!) + assert.ok(child_event!.event_started_ts! <= event2_from_history!.event_started_ts!) + assert.ok(child_event!.event_started_ts! <= event3_from_history!.event_started_ts!) }) test('done() on non-proxied event keeps bus paused during queue-jump', async () => { @@ -886,7 +886,7 @@ test('BUG: queue-jump two-bus global-serial handlers should serialize across bot // Check: bus_a handlers all finish before bus_b handlers start // (because runImmediatelyAcrossBuses processes sequentially and - // all share the global handler semaphore) + // all share LockManager.global_handler_semaphore) const a2_end = log.indexOf('a2_end') const b1_start = log.indexOf('b1_start') assert.ok(a2_end < b1_start, `global-serial: bus_a should finish before bus_b starts. Got: [${log.join(', ')}]`) diff --git a/bubus-ts/tests/eventbus_basics.test.ts b/bubus-ts/tests/eventbus_basics.test.ts index 72a36ab..82d9b69 100644 --- a/bubus-ts/tests/eventbus_basics.test.ts +++ b/bubus-ts/tests/eventbus_basics.test.ts @@ -2,6 +2,7 @@ import assert from 'node:assert/strict' import { test } from 'node:test' import { BaseEvent, EventBus } from '../src/index.js' +import { LockManager } from '../src/lock_manager.js' import { z } from 'zod' const delay = (ms: number): Promise => @@ -20,7 +21,7 @@ test('EventBus initializes with correct defaults', async () => { assert.equal(bus.handler_concurrency_default, 'bus-serial') assert.equal(bus.event_timeout_default, 60) assert.equal(bus.event_history.size, 0) - assert.ok(EventBus.instances.has(bus)) + assert.ok(EventBus._all_instances.has(bus)) await bus.waitUntilIdle() }) @@ -91,8 +92,8 @@ test('EventBus locks methods are callable and preserve semaphore resolution beha event_concurrency: 'global-serial', handler_concurrency: 'global-serial', }) - assert.equal(bus.locks.getSemaphoreForEvent(event_with_global), EventBus.global_event_semaphore) - assert.equal(bus.locks.getSemaphoreForHandler(event_with_global), EventBus.global_handler_semaphore) + assert.equal(bus.locks.getSemaphoreForEvent(event_with_global), LockManager.global_event_semaphore) + assert.equal(bus.locks.getSemaphoreForHandler(event_with_global), LockManager.global_handler_semaphore) const event_with_parallel = GateEvent({ event_concurrency: 'parallel', @@ -417,16 +418,16 @@ test('event with explicit timeout is not overridden by bus default', async () => await bus.waitUntilIdle() }) -// ─── EventBus.instances tracking ───────────────────────────────────────────── +// ─── EventBus._all_instances tracking ───────────────────────────────────────────── -test('EventBus.instances tracks all created buses', () => { - const initial_count = EventBus.instances.size +test('EventBus._all_instances tracks all created buses', () => { + const initial_count = EventBus._all_instances.size const bus_a = new EventBus('TrackA') const bus_b = new EventBus('TrackB') - assert.ok(EventBus.instances.has(bus_a)) - assert.ok(EventBus.instances.has(bus_b)) - assert.equal(EventBus.instances.size, initial_count + 2) + assert.ok(EventBus._all_instances.has(bus_a)) + assert.ok(EventBus._all_instances.has(bus_b)) + assert.equal(EventBus._all_instances.size, initial_count + 2) }) // ─── Circular forwarding prevention ────────────────────────────────────────── @@ -478,7 +479,7 @@ test('circular forwarding does not cause infinite loop', async () => { // ─── EventBus GC / memory leak ─────────────────────────────────────────────── -test('unreferenced EventBus can be garbage collected (not retained by instances)', async () => { +test('unreferenced EventBus can be garbage collected (not retained by _all_instances)', async () => { // This test requires --expose-gc to force garbage collection const gc = globalThis.gc as (() => void) | undefined if (typeof gc !== 'function') { @@ -499,14 +500,14 @@ test('unreferenced EventBus can be garbage collected (not retained by instances) await delay(50) gc() - // If EventBus.instances holds a strong reference (Set), + // If EventBus._all_instances holds a strong reference (Set), // the bus will NOT be collected β€” proving the memory leak. // After the fix (WeakRef-based storage), the bus should be collected. assert.equal( weak_ref!.deref(), undefined, 'bus should be garbage collected when no external references remain β€” ' + - 'EventBus.instances is holding a strong reference (memory leak)' + 'EventBus._all_instances is holding a strong reference (memory leak)' ) }) diff --git a/bubus-ts/tests/forwarding.test.ts b/bubus-ts/tests/forwarding.test.ts index 27c8d92..cb69616 100644 --- a/bubus-ts/tests/forwarding.test.ts +++ b/bubus-ts/tests/forwarding.test.ts @@ -83,7 +83,7 @@ test('await event.done waits for handlers on forwarded buses', async () => { await event.done() assert.deepEqual(completion_log.sort(), ['A', 'B', 'C']) - assert.equal(event.event_pending_buses, 0) + assert.equal(event.event_pending_bus_count, 0) }) test('circular forwarding A->B->C->A does not loop', async () => { @@ -181,6 +181,6 @@ test('await event.done waits when forwarding handler is async-delayed', async () assert.equal(bus_a_done, true) assert.equal(bus_b_done, true) - assert.equal(event.event_pending_buses, 0) + assert.equal(event.event_pending_bus_count, 0) assert.deepEqual(event.event_path, ['BusA', 'BusB']) }) diff --git a/bubus-ts/tests/locking.test.ts b/bubus-ts/tests/locking.test.ts index 9244819..f7ac09b 100644 --- a/bubus-ts/tests/locking.test.ts +++ b/bubus-ts/tests/locking.test.ts @@ -44,7 +44,7 @@ F) Forwarding & bus context G) Parent/child tracking - Child events not correctly linked to the parent handler when emitted via event.bus. - event_children missing under concurrency due to async timing. -- event_pending_buses not decremented properly, leaving events stuck. +- event_pending_bus_count not decremented properly, leaving events stuck. H) Find semantics under concurrency - find(past) returns event not yet completed. @@ -944,7 +944,7 @@ test('edge-case: event with no handlers completes immediately', async () => { await bus.waitUntilIdle() assert.equal(event.event_status, 'completed') - assert.equal(event.event_pending_buses, 0) + assert.equal(event.event_pending_bus_count, 0) }) test('fifo: forwarded events preserve order on target bus (bus-serial)', async () => { diff --git a/bubus-ts/tests/performance.test.ts b/bubus-ts/tests/performance.test.ts index 171bbfa..f9bc9ea 100644 --- a/bubus-ts/tests/performance.test.ts +++ b/bubus-ts/tests/performance.test.ts @@ -102,13 +102,13 @@ test('500 ephemeral buses with 100 events each', { timeout: 30_000 }, async () = `\n perf: ${total_buses} buses Γ— ${events_per_bus} events = ${total_events} total in ${total_ms}ms (${Math.round(total_events / (total_ms / 1000))}/s)` + `\n memory: before=${mb(mem_before.heapUsed)}MB β†’ done=${mb(mem_done.heapUsed)}MB β†’ gc=${mb(mem_gc.heapUsed)}MB` + `\n rss: before=${mb(mem_before.rss)}MB β†’ done=${mb(mem_done.rss)}MB β†’ gc=${mb(mem_gc.rss)}MB` + - `\n live bus instances: ${EventBus.instances.size}` + `\n live bus instances: ${EventBus._all_instances.size}` ) assert.equal(processed_count, total_events) assert.ok(total_ms < 30_000, `Processing took ${total_ms}ms`) // All buses should have been cleaned up from the registry - assert.equal(EventBus.instances.size, 0, 'All buses should be destroyed') + assert.equal(EventBus._all_instances.size, 0, 'All buses should be destroyed') }) // Simulates per-request handler registration pattern: a shared bus where each @@ -318,7 +318,7 @@ test( `\n rss: before=${mb(mem_before.rss)}MB β†’ done=${mb(mem_done.rss)}MB β†’ gc=${mb(mem_gc.rss)}MB` + `\n history: a=${bus_a.event_history.size} b=${bus_b.event_history.size} c=${bus_c.event_history.size}` + `\n handlers: a=${bus_a.handlers.size} b=${bus_b.handlers.size} c=${bus_c.handlers.size}` + - `\n instances: ${EventBus.instances.size}` + `\n instances: ${EventBus._all_instances.size}` ) // All iterations processed @@ -343,6 +343,6 @@ test( bus_b.destroy() bus_c.destroy() - assert.equal(EventBus.instances.size, 0, 'All buses destroyed') + assert.equal(EventBus._all_instances.size, 0, 'All buses destroyed') } ) diff --git a/bubus-ts/tests/timeout.test.ts b/bubus-ts/tests/timeout.test.ts index 7cc7b9f..ec21042 100644 --- a/bubus-ts/tests/timeout.test.ts +++ b/bubus-ts/tests/timeout.test.ts @@ -2,6 +2,7 @@ import assert from 'node:assert/strict' import { test } from 'node:test' import { BaseEvent, EventBus, EventHandlerCancelledError, EventHandlerAbortedError, EventHandlerTimeoutError } from '../src/index.js' +import { LockManager } from '../src/lock_manager.js' const TimeoutEvent = BaseEvent.extend('TimeoutEvent', {}) @@ -42,6 +43,91 @@ test('handler completes within timeout', async () => { assert.equal(result.result, 'fast') }) +test('event handler errors expose event_result, cause, and timeout metadata', async () => { + const bus = new EventBus('ErrorMetadataBus') + + const ParentCancelEvent = BaseEvent.extend('ParentCancelEvent', {}) + const PendingChildEvent = BaseEvent.extend('PendingChildEvent', {}) + const ParentAbortEvent = BaseEvent.extend('ParentAbortEvent', {}) + const AbortChildEvent = BaseEvent.extend('AbortChildEvent', {}) + + bus.on(TimeoutEvent, async () => { + await delay(40) + return 'slow' + }) + + bus.on(PendingChildEvent, async () => { + await delay(5) + return 'pending_child' + }) + + let pending_child: BaseEvent | null = null + bus.on(ParentCancelEvent, async (event) => { + pending_child = event.bus?.emit(PendingChildEvent({ event_timeout: 0.5 })) ?? null + await delay(80) + }) + + bus.on(AbortChildEvent, async () => { + await delay(120) + return 'abort_child' + }) + + let aborted_child: BaseEvent | null = null + bus.on(ParentAbortEvent, async (event) => { + aborted_child = event.bus?.emit(AbortChildEvent({ event_timeout: 0.5 })) ?? null + await aborted_child?.done() + }) + + const timeout_event = bus.dispatch(TimeoutEvent({ event_timeout: 0.02 })) + await timeout_event.done() + + const timeout_result = Array.from(timeout_event.event_results.values())[0] + const timeout_error = timeout_result.error as EventHandlerTimeoutError + assert.ok(timeout_error.cause instanceof Error) + assert.equal(timeout_error.cause.name, 'TimeoutError') + assert.equal(timeout_error.event_result, timeout_result) + assert.equal(timeout_error.timeout_seconds, timeout_event.event_timeout) + assert.equal(timeout_error.event.event_id, timeout_event.event_id) + assert.equal(timeout_error.event_type, timeout_event.event_type) + assert.equal(timeout_error.handler_name, timeout_result.handler_name) + assert.equal(timeout_error.handler_id, timeout_result.handler_id) + assert.equal(timeout_error.event_timeout, timeout_event.event_timeout) + + const cancel_parent = bus.dispatch(ParentCancelEvent({ event_timeout: 0.02 })) + await cancel_parent.done() + await bus.waitUntilIdle() + + assert.ok(pending_child, 'pending_child should have been emitted') + const pending_result = Array.from(pending_child!.event_results.values())[0] + const cancelled_error = pending_result.error as EventHandlerCancelledError + const cancel_parent_result = Array.from(cancel_parent.event_results.values())[0] + const cancel_parent_error = cancel_parent_result.error as EventHandlerTimeoutError + assert.equal(cancelled_error.cause, cancel_parent_error) + assert.equal(cancelled_error.event_result, pending_result) + assert.equal(cancelled_error.event.event_id, pending_child!.event_id) + assert.equal(cancelled_error.timeout_seconds, pending_child!.event_timeout) + assert.equal(cancelled_error.event_type, pending_child!.event_type) + assert.equal(cancelled_error.handler_name, pending_result.handler_name) + assert.equal(cancelled_error.handler_id, pending_result.handler_id) + + const abort_parent = bus.dispatch(ParentAbortEvent({ event_timeout: 0.05 })) + await abort_parent.done() + await bus.waitUntilIdle() + + assert.ok(aborted_child, 'aborted_child should have been emitted') + const aborted_result = Array.from(aborted_child!.event_results.values())[0] + const aborted_error = aborted_result.error as EventHandlerAbortedError + const abort_parent_result = Array.from(abort_parent.event_results.values())[0] + const abort_parent_error = abort_parent_result.error as EventHandlerTimeoutError + assert.equal(aborted_error.cause, abort_parent_error) + assert.equal(aborted_error.event_result, aborted_result) + assert.equal(aborted_error.event.event_id, aborted_child!.event_id) + assert.equal(aborted_error.timeout_seconds, aborted_child!.event_timeout) + assert.equal(aborted_error.event_type, aborted_child!.event_type) + assert.equal(aborted_error.handler_name, aborted_result.handler_name) + assert.equal(aborted_error.handler_id, aborted_result.handler_id) +}) + test('handler timeouts fire across concurrency modes', async () => { const modes = ['global-serial', 'bus-serial', 'parallel'] as const @@ -304,7 +390,7 @@ const STEP1_HANDLER_MODES = ['bus-serial', 'global-serial'] as const type Step1HandlerMode = (typeof STEP1_HANDLER_MODES)[number] const getHandlerSemaphore = (bus: EventBus, mode: Step1HandlerMode) => - mode === 'global-serial' ? EventBus.global_handler_semaphore : bus.bus_handler_semaphore + mode === 'global-serial' ? LockManager.global_handler_semaphore : bus.locks.bus_handler_semaphore for (const handler_mode of STEP1_HANDLER_MODES) { test(`regression: timeout during awaited child.done() does not leak handler semaphore lock [${handler_mode}]`, async () => { @@ -711,7 +797,7 @@ test('multi-level timeout cascade with mixed cancellations', async () => { for (const result of queued_results) { assert.equal(result.status, 'error') assert.ok(result.error instanceof EventHandlerCancelledError) - assert.ok((result.error as EventHandlerCancelledError).parent_error instanceof EventHandlerTimeoutError) + assert.ok((result.error as EventHandlerCancelledError).cause instanceof EventHandlerTimeoutError) } assert.ok(awaited_child) @@ -867,7 +953,7 @@ test('three-level timeout cascade with per-level timeouts and cascading cancella } // ── TopEvent handlers ───────────────────────────────────────────────── - // These run SERIALLY (via bus handler semaphore) because TopEvent is + // These run SERIALLY (via bus.locks.bus_handler_semaphore) because TopEvent is // processed by the normal runloop (not queue-jumped). top_handler_fast // goes first, completes quickly, then top_handler_main starts. @@ -979,10 +1065,10 @@ test('three-level timeout cascade with per-level timeouts and cascading cancella queued_gc_results[0].error instanceof EventHandlerCancelledError, 'QueuedGC handler should be EventHandlerCancelledError (not timeout β€” it never ran)' ) - // Verify the cancellation error chain: CancelledError.parent_error β†’ TimeoutError + // Verify the cancellation error chain: CancelledError.cause β†’ TimeoutError assert.ok( - (queued_gc_results[0].error as EventHandlerCancelledError).parent_error instanceof EventHandlerTimeoutError, - "QueuedGC cancellation should reference the child_handler's timeout as parent_error" + (queued_gc_results[0].error as EventHandlerCancelledError).cause instanceof EventHandlerTimeoutError, + "QueuedGC cancellation should reference the child_handler's timeout as cause" ) // ── SiblingEvent: CANCELLED by top_handler_main timeout ───────────── @@ -996,8 +1082,8 @@ test('three-level timeout cascade with per-level timeouts and cascading cancella assert.equal(sibling_results[0].status, 'error') assert.ok(sibling_results[0].error instanceof EventHandlerCancelledError, 'SiblingEvent handler should be EventHandlerCancelledError') assert.ok( - (sibling_results[0].error as EventHandlerCancelledError).parent_error instanceof EventHandlerTimeoutError, - "SiblingEvent cancellation should reference top_handler_main's timeout as parent_error" + (sibling_results[0].error as EventHandlerCancelledError).cause instanceof EventHandlerTimeoutError, + "SiblingEvent cancellation should reference top_handler_main's timeout as cause" ) // ── Execution log: verify what ran and what didn't ────────────────── @@ -1065,12 +1151,12 @@ test('three-level timeout cascade with per-level timeouts and cascading cancella // ============================================================================= // Verify the timeoutβ†’cancellation error chain is intact at every level. // When a parent handler times out and cancels a child's pending handlers, -// the EventHandlerCancelledError.parent_error must reference the specific +// the EventHandlerCancelledError.cause must reference the specific // EventHandlerTimeoutError that caused the cascade. This test creates a // 2-level chain where each level's cancellation error can be inspected. // ============================================================================= -test('cancellation error chain preserves parent_error references through hierarchy', async () => { +test('cancellation error chain preserves cause references through hierarchy', async () => { const OuterEvent = BaseEvent.extend('ErrorChainOuter', {}) const InnerEvent = BaseEvent.extend('ErrorChainInner', {}) const DeepEvent = BaseEvent.extend('ErrorChainDeep', {}) @@ -1138,12 +1224,12 @@ test('cancellation error chain preserves parent_error references through hierarc 'DeepEvent handler should be cancelled, not timed out (it never started)' ) const deep_cancel = deep_result.error as EventHandlerCancelledError - assert.ok(deep_cancel.parent_error instanceof EventHandlerTimeoutError, 'Cancellation should reference parent timeout') - // The parent_error should be the INNER handler's timeout, because that's + assert.ok(deep_cancel.cause instanceof EventHandlerTimeoutError, 'Cancellation should reference parent timeout') + // The cause should be the INNER handler's timeout, because that's // the handler whose bus.cancelPendingDescendants actually cancelled DeepEvent. assert.ok( - deep_cancel.parent_error.message.includes('inner_handler') || deep_cancel.parent_error.message.includes('child_handler'), - 'parent_error should reference the handler that directly caused cancellation' + deep_cancel.cause.message.includes('inner_handler') || deep_cancel.cause.message.includes('child_handler'), + 'cause should reference the handler that directly caused cancellation' ) }) From a1f51f1fd016856d91c33aa750f4734e2ee7d284 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Fri, 6 Feb 2026 16:00:40 -0800 Subject: [PATCH 53/79] tests passing --- bubus-ts/src/base_event.ts | 27 +++++++++++++------------- bubus-ts/src/event_bus.ts | 26 ++++++++++++------------- bubus-ts/tests/eventbus_basics.test.ts | 2 +- bubus-ts/tests/timeout.test.ts | 2 +- 4 files changed, 27 insertions(+), 30 deletions(-) diff --git a/bubus-ts/src/base_event.ts b/bubus-ts/src/base_event.ts index 4e5efac..3bb437a 100644 --- a/bubus-ts/src/base_event.ts +++ b/bubus-ts/src/base_event.ts @@ -257,6 +257,7 @@ export class BaseEvent { return descendants; } + // awaitable to trigger immediate processing of the event on all buses where it is queued done(): Promise { if (!this.bus) { return Promise.reject(new Error('event has no bus attached')) @@ -277,7 +278,7 @@ export class BaseEvent { if (this.event_status === 'completed') { return Promise.resolve(this) } - this.ensureDonePromise() + this._notifyDoneListeners() return this._done!.promise } @@ -291,16 +292,24 @@ export class BaseEvent { this.event_started_ts = event_started_ts } - markCompleted(): void { + markCompleted(force: boolean = true): void { if (this.event_status === 'completed') { return } + if (!force) { + if (this.event_pending_bus_count > 0) { + return + } + if (!this.eventAreAllChildrenComplete()) { + return + } + } this.event_status = 'completed' const { isostring: event_completed_at, ts: event_completed_ts } = BaseEvent.nextTimestamp() this.event_completed_at = event_completed_at this.event_completed_ts = event_completed_ts this._dispatch_context = null - this.ensureDonePromise() + this._notifyDoneListeners() this._done!.resolve(this) this._done = null } @@ -324,17 +333,7 @@ export class BaseEvent { return true } - tryFinalizeCompletion(): void { - if (this.event_pending_bus_count > 0) { - return - } - if (!this.eventAreAllChildrenComplete()) { - return - } - this.markCompleted() - } - - ensureDonePromise(): void { + _notifyDoneListeners(): void { if (this._done) { return } diff --git a/bubus-ts/src/event_bus.ts b/bubus-ts/src/event_bus.ts index d81a6cf..9328244 100644 --- a/bubus-ts/src/event_bus.ts +++ b/bubus-ts/src/event_bus.ts @@ -352,14 +352,6 @@ export class EventBus { const where = typeof where_or_options === 'function' ? where_or_options : () => true const options = typeof where_or_options === 'function' ? maybe_options : where_or_options - return this.findInternal(event_key, where, options) - } - - private async findInternal( - event_key: EventKey, - where: (event: T) => boolean, - options: FindOptions - ): Promise { const past = options.past ?? true const future = options.future ?? true const child_of = options.child_of ?? null @@ -410,7 +402,7 @@ export class EventBus { return null } - return new Promise((resolve, _reject) => { + return new Promise((resolve) => { const waiter: FindWaiter = { event_key, matches, @@ -686,6 +678,11 @@ export class EventBus { } } + // Collects buses that currently "own" this event so queue-jump can run it immediately + // across all forwarded buses. Called by runImmediatelyAcrossBuses(), which itself is + // invoked from _runImmediately (via BaseEvent.done()) when an event is awaited inside + // a handler. Uses event.event_path ordering to pick candidate buses and filters out + // buses that haven't seen the event or already processed it. private getBusesForImmediateRun(event: BaseEvent): EventBus[] { const ordered: EventBus[] = [] const seen = new Set() @@ -815,9 +812,10 @@ export class EventBus { } const started_ts = event.event_started_ts ?? event.event_created_ts ?? performance.now() const elapsed_ms = Math.max(0, performance.now() - started_ts) - const elapsed_seconds = (elapsed_ms / 1000).toFixed(1) + const elapsed_seconds = (elapsed_ms / 1000).toFixed(2) + const active_handler = [...event.event_results.values()].find((result: EventResult) => result.status === 'started')?.handler_file_path ?? 'handlers' console.warn( - `[bubus] Possible deadlock: ${event.event_type}#${event.event_id} still ${event.event_status} on ${this.name} after ${elapsed_seconds}s (timeout ${event.event_timeout}s)` + `[bubus] Slow handler: ${this.name}.on(${event.event_type}#${event.event_id.slice(-8, -1)}, ${active_handler}) still running after ${elapsed_seconds}s (timeout=${event.event_timeout}s)` ) }, event.event_timeout * 1000) @@ -828,7 +826,7 @@ export class EventBus { await Promise.all(handler_promises) event.event_pending_bus_count = Math.max(0, event.event_pending_bus_count - 1) - event.tryFinalizeCompletion() + event.markCompleted(false) if (event.event_status === 'completed') { this.notifyParentsFor(event) } @@ -972,7 +970,7 @@ export class EventBus { if (!parent) { break } - parent.tryFinalizeCompletion() + parent.markCompleted(false) if (parent.event_status !== 'completed') { break } @@ -1136,7 +1134,7 @@ export class EventBus { } if (updated || removed > 0) { - original_event.tryFinalizeCompletion() + original_event.markCompleted(false) if (original_event.event_status === 'completed') { this.notifyParentsFor(original_event) } diff --git a/bubus-ts/tests/eventbus_basics.test.ts b/bubus-ts/tests/eventbus_basics.test.ts index 82d9b69..0ac3d1e 100644 --- a/bubus-ts/tests/eventbus_basics.test.ts +++ b/bubus-ts/tests/eventbus_basics.test.ts @@ -116,7 +116,7 @@ test('BaseEvent lifecycle methods are callable and preserve lifecycle behavior', const standalone = LifecycleEvent({}) standalone.markStarted() assert.equal(standalone.event_status, 'started') - standalone.tryFinalizeCompletion() + standalone.markCompleted(false) assert.equal(standalone.event_status, 'completed') await standalone.waitForCompletion() diff --git a/bubus-ts/tests/timeout.test.ts b/bubus-ts/tests/timeout.test.ts index ec21042..6074899 100644 --- a/bubus-ts/tests/timeout.test.ts +++ b/bubus-ts/tests/timeout.test.ts @@ -214,7 +214,7 @@ test('deadlock warning triggers when event exceeds timeout', async () => { } assert.ok( - warnings.some((message) => message.includes('Possible deadlock')), + warnings.some((message) => message.includes('Slow handler')), 'Expected deadlock warning' ) }) From 328af87a9c2e039ec8c135e6c5054f89621d3edd Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Fri, 6 Feb 2026 18:28:41 -0800 Subject: [PATCH 54/79] code cleanup and better naming --- bubus-ts/README.md | 21 +- bubus-ts/src/base_event.ts | 71 +- bubus-ts/src/event_bus.ts | 812 +++++------------- bubus-ts/src/event_handler.ts | 181 ++++ bubus-ts/src/event_result.ts | 85 +- bubus-ts/src/index.ts | 18 +- bubus-ts/src/lock_manager.ts | 20 +- bubus-ts/src/logging.ts | 242 ++++++ bubus-ts/src/types.ts | 3 +- bubus-ts/tests/_perf_profile.ts | 10 +- bubus-ts/tests/comprehensive_patterns.test.ts | 20 +- bubus-ts/tests/eventbus_basics.test.ts | 2 +- bubus-ts/tests/log_tree.test.ts | 59 +- bubus-ts/tests/performance.test.ts | 301 +++---- bubus-ts/tests/timeout.test.ts | 86 +- 15 files changed, 1050 insertions(+), 881 deletions(-) create mode 100644 bubus-ts/src/event_handler.ts create mode 100644 bubus-ts/src/logging.ts diff --git a/bubus-ts/README.md b/bubus-ts/README.md index f487dec..cdf9ae7 100644 --- a/bubus-ts/README.md +++ b/bubus-ts/README.md @@ -38,7 +38,7 @@ gotchas we uncovered while matching behavior. It intentionally does **not** re-d - `BaseEvent.event_timeout` defaults to `null`. - When dispatched, `EventBus` applies its default `event_timeout` (60s unless configured). - You can set `{ event_timeout: null }` on the bus to disable timeouts entirely. -- Handlers that exceed 15s emit a warning (deadlock detection signal); the event still continues unless a timeout is hit. +- Slow handler warnings fire after `event_handler_slow_timeout` (default: `30s`). Slow event warnings fire after `event_slow_timeout` (default: `300s`). ## EventBus Options @@ -58,6 +58,13 @@ All options are passed to `new EventBus(name, options)`. - `event_timeout?: number | null` (default: `60`) - Default handler timeout in seconds, applied when `event.event_timeout` is `null`. - Set to `null` to disable timeouts globally for the bus. +- `event_handler_slow_timeout?: number | null` (default: `30`) + - Warn after this many seconds for slow handlers. + - Only warns when the handler's timeout is `null` or greater than this value. + - Set to `null` to disable slow handler warnings. +- `event_slow_timeout?: number | null` (default: `300`) + - Warn after this many seconds for slow event processing. + - Set to `null` to disable slow event warnings. ## Concurrency Overrides and Precedence @@ -128,7 +135,7 @@ under different `event_concurrency` / `handler_concurrency` configurations. 1. `dispatch()` normalizes to `original_event`, sets `bus` if missing. 2. Captures `_dispatch_context` (AsyncLocalStorage if available). 3. Applies `event_timeout_default` if `event.event_timeout === null`. -4. If this bus is already in `event_path` (or `eventHasVisited()`), return a BusScopedEvent without queueing. +4. If this bus is already in `event_path` (or `bus.hasProcessedEvent()`), return a BusScopedEvent without queueing. 5. Append bus name to `event_path`, record child relationship (if `event_parent_id` is set). 6. Add to `event_history` (a `Map` keyed by event id). 7. Increment `event_pending_bus_count`. @@ -142,7 +149,7 @@ under different `event_concurrency` / `handler_concurrency` configurations. 4. `scheduleEventProcessing()` selects the event semaphore and runs `processEvent()`. 5. `processEvent()`: - `event.markStarted()` - - `notifyFinders(event)` + - `notifyFindListeners(event)` - creates handler results (`event_results`) - runs handlers (respecting handler semaphore) - decrements `event_pending_bus_count` and calls `event.tryFinalizeCompletion()` @@ -181,12 +188,12 @@ When a handler on Bus A calls `bus_b.dispatch(event)` without awaiting: When `event.done()` is awaited inside a handler, **queue-jump** happens: -1. `BaseEvent.done()` detects it's inside a handler and calls `_runImmediately()`. -2. `_runImmediately()` **yields** the parent handler's concurrency semaphore (if held) so child handlers can acquire it. -3. `_runImmediately()` removes the event from the pending queue (if present). +1. `BaseEvent.done()` detects it's inside a handler and calls `processEventImmediately()`. +2. `processEventImmediately()` **yields** the parent handler's concurrency semaphore (if held) so child handlers can acquire it. +3. `processEventImmediately()` removes the event from the pending queue (if present). 4. `runImmediatelyAcrossBuses()` processes the event immediately on all buses where it is queued. 5. While immediate processing is active, each affected bus's runloop is paused to prevent unrelated events from running. -6. Once immediate processing completes, `_runImmediately()` **re-acquires** the parent handler's semaphore +6. Once immediate processing completes, `processEventImmediately()` **re-acquires** the parent handler's semaphore (unless the parent timed out while the child was processing). 7. Paused runloops resume. diff --git a/bubus-ts/src/base_event.ts b/bubus-ts/src/base_event.ts index 3bb437a..2d8f293 100644 --- a/bubus-ts/src/base_event.ts +++ b/bubus-ts/src/base_event.ts @@ -63,17 +63,17 @@ type ZodShapeFrom> = { } export class BaseEvent { - event_id!: string // unique uuidv7 identifier for the event - event_created_at!: string // ISO datetime string version of event_created_ts - event_created_ts!: number // nanosecond monotonic version of event_created_at - event_type!: string // should match the class name of the event, e.g. BaseEvent.extend("MyEvent").event_type === "MyEvent" - event_timeout!: number | null // maximum time in seconds that each handler for the event is allowed to run before it is aborted - event_parent_id?: string // id of the parent event that triggered this event, if this event was emitted during handling of another event - event_path!: string[] // list of bus names that the event has been dispatched to, including the current bus - event_result_schema?: z.ZodTypeAny // optional zod schema to enforce the shape of return values from handlers - event_result_type?: string // optional string identifier of the type of the return values from handlers, to make it easier to reference common shapes across networkboundaries e.g. ScreenshotEventResultType + event_id!: string // unique uuidv7 identifier for the event + event_created_at!: string // ISO datetime string version of event_created_ts + event_created_ts!: number // nanosecond monotonic version of event_created_at + event_type!: string // should match the class name of the event, e.g. BaseEvent.extend("MyEvent").event_type === "MyEvent" + event_timeout!: number | null // maximum time in seconds that each handler for the event is allowed to run before it is aborted + event_parent_id?: string // id of the parent event that triggered this event, if this event was emitted during handling of another event + event_path!: string[] // list of bus names that the event has been dispatched to, including the current bus + event_result_schema?: z.ZodTypeAny // optional zod schema to enforce the shape of return values from handlers + event_result_type?: string // optional string identifier of the type of the return values from handlers, to make it easier to reference common shapes across networkboundaries e.g. ScreenshotEventResultType event_results!: Map - event_emitted_by_handler_id?: string // if event was emitted inside a handler while it was running, this will be set to the enclosing handler's handler id + event_emitted_by_handler_id?: string // if event was emitted inside a handler while it was running, this will be set to the enclosing handler's handler id event_pending_bus_count!: number // Number of buses that have accepted this event and not yet finished processing or removed it from their queues. event_status!: 'pending' | 'started' | 'completed' event_started_at?: string @@ -82,10 +82,10 @@ export class BaseEvent { event_completed_ts?: number event_concurrency?: ConcurrencyMode handler_concurrency?: ConcurrencyMode - - bus?: EventBus // shortcut to the bus that dispatched this event, for event.bus.dispatch(event) auto-child tracking via proxy wrapping - _original_event?: BaseEvent // underlying event object that was dispatched, if this is a bus-scoped proxy wrapping it - _dispatch_context?: unknown | null // captured AsyncLocalStorage context at dispatch site, used to restore that context when running handlers + + bus?: EventBus // shortcut to the bus that dispatched this event, for event.bus.dispatch(event) auto-child tracking via proxy wrapping + _original_event?: BaseEvent // underlying event object that was dispatched, if this is a bus-scoped proxy wrapping it + _dispatch_context?: unknown | null // captured AsyncLocalStorage context at dispatch site, used to restore that context when running handlers static schema = BaseEventSchema static event_type?: string @@ -133,6 +133,10 @@ export class BaseEvent { this._dispatch_context = undefined } + toString(): string { + return `${this.event_type}#${this.event_id.slice(-4)}` + } + static nextTimestamp(): { date: Date; isostring: string; ts: number } { const ts = performance.now() const date = new Date(performance.timeOrigin + ts) @@ -141,7 +145,10 @@ export class BaseEvent { static extend(event_type: string, shape?: TShape): EventFactory static extend>(event_type: string, shape?: TShape): EventFactory> - static extend>(event_type: string, shape: TShape = {} as TShape): EventFactory> { + static extend>( + event_type: string, + shape: TShape = {} as TShape + ): EventFactory> { const raw_shape = shape as Record const event_result_schema = is_zod_schema(raw_shape.event_result_schema) ? (raw_shape.event_result_schema as z.ZodTypeAny) : undefined @@ -230,34 +237,35 @@ export class BaseEvent { // get all children grandchildren etc. recursively get event_descendants(): BaseEvent[] { - const descendants: BaseEvent[] = []; - const visited = new Set(); - const root_id = this.event_id; - const stack = [...this.event_children]; + const descendants: BaseEvent[] = [] + const visited = new Set() + const root_id = this.event_id + const stack = [...this.event_children] while (stack.length > 0) { - const child = stack.pop(); + const child = stack.pop() if (!child) { - continue; + continue } - const child_id = child.event_id; + const child_id = child.event_id if (child_id === root_id) { - continue; + continue } if (visited.has(child_id)) { - continue; + continue } - visited.add(child_id); - descendants.push(child); + visited.add(child_id) + descendants.push(child) if (child.event_children.length > 0) { - stack.push(...child.event_children); + stack.push(...child.event_children) } } - return descendants; + return descendants } // awaitable to trigger immediate processing of the event on all buses where it is queued + // TODO: rename to immediate() done(): Promise { if (!this.bus) { return Promise.reject(new Error('event has no bus attached')) @@ -265,15 +273,16 @@ export class BaseEvent { if (this.event_status === 'completed') { return Promise.resolve(this) } - // Always delegate to _runImmediately β€” it walks up the parent event tree + // Always delegate to processEventImmediately β€” it walks up the parent event tree // to determine whether we're inside a handler (works cross-bus). If no // ancestor handler is in-flight, it falls back to waitForCompletion(). const runner_bus = this.bus as { - _runImmediately: (event: BaseEvent) => Promise + processEventImmediately: (event: BaseEvent) => Promise } - return runner_bus._runImmediately(this) as Promise + return runner_bus.processEventImmediately(this) as Promise } + // TODO: rename to done() waitForCompletion(): Promise { if (this.event_status === 'completed') { return Promise.resolve(this) diff --git a/bubus-ts/src/event_bus.ts b/bubus-ts/src/event_bus.ts index 9328244..404c417 100644 --- a/bubus-ts/src/event_bus.ts +++ b/bubus-ts/src/event_bus.ts @@ -1,84 +1,22 @@ import { BaseEvent } from './base_event.js' import { EventResult } from './event_result.js' import { captureAsyncContext, runWithAsyncContext } from './async_context.js' -import { v5 as uuidv5 } from 'uuid' import { AsyncSemaphore, type ConcurrencyMode, HandlerLock, LockManager, runWithSemaphore, withResolvers } from './lock_manager.js' +import { + EventHandlerAbortedError, + EventHandlerCancelledError, + EventHandlerTimeoutError, + EventHandlerResultSchemaError, + EventHandler, +} from './event_handler.js' +import { logTree } from './logging.js' -export class TimeoutError extends Error { - constructor(message: string) { - super(message) - this.name = 'TimeoutError' - } -} - -export class EventHandlerError extends Error { - event_result: EventResult - timeout_seconds: number | null - cause: Error - - constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause: Error }) { - super(message) - this.name = 'EventHandlerError' - this.event_result = params.event_result - this.cause = params.cause - this.timeout_seconds = params.timeout_seconds ?? this.event_result.event?.event_timeout ?? null - } - - get event(): BaseEvent { - return this.event_result.event! - } - - get event_type(): string { - return this.event.event_type - } - - get handler_name(): string { - return this.event_result.handler_name - } - - get handler_id(): string { - return this.event_result.handler_id - } - - get event_timeout(): number | null { - return this.event.event_timeout - } -} - -// EventHandlerTimeoutError: when the handler itself timed out while executing (due to event.event_timeout being exceeded) -export class EventHandlerTimeoutError extends EventHandlerError { - constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause?: Error }) { - super(message, { - event_result: params.event_result, - timeout_seconds: params.timeout_seconds, - cause: params.cause ?? new TimeoutError(message), - }) - this.name = 'EventHandlerTimeoutError' - } -} - -// EventHandlerCancelledError: when a pending handler was cancelled and never run due to an error (e.g. timeout) in a parent scope -export class EventHandlerCancelledError extends EventHandlerError { - constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause: Error }) { - super(message, params) - this.name = 'EventHandlerCancelledError' - } -} - -// EventHandlerAbortedError: when a handler that was already running was aborted due to an error in the parent scope, not due to an error in its own logic / exceeding its own timeout -export class EventHandlerAbortedError extends EventHandlerError { - constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause: Error }) { - super(message, params) - this.name = 'EventHandlerAbortedError' - } -} - -import type { EventHandler, EventKey, FindOptions, HandlerOptions } from './types.js' +import type { EventHandlerFunction, EventKey, FindOptions, HandlerOptions } from './types.js' type FindWaiter = { // similar to a handler, except its for .find() calls // needs to be different because it's resolved on dispatch not event processing time - // also is ephemeral, gets unregistered the moment it resolves and + // also is ephemeral, gets unregistered the moment it resolves and // doesnt show up in event processing tree, doesn't block runloop, etc. event_key: EventKey matches: (event: BaseEvent) => boolean @@ -86,68 +24,13 @@ type FindWaiter = { timeout_id?: ReturnType } -class HandlerEntry { - // an entry in the list of handlers that are registered on a bus - id: string // unique uuidv5 based on hash of bus name, handler name, handler file path:lineno, registered at timestamp, and event key - handler: EventHandler - handler_name: string - handler_file_path?: string - handler_registered_at: string - handler_registered_ts: number - options?: HandlerOptions - event_key: string | '*' - - constructor(params: { - id: string - handler: EventHandler - handler_name: string - handler_file_path?: string - handler_registered_at: string - handler_registered_ts: number - options?: HandlerOptions - event_key: string | '*' - }) { - this.id = params.id - this.handler = params.handler - this.handler_name = params.handler_name - this.handler_file_path = params.handler_file_path - this.handler_registered_at = params.handler_registered_at - this.handler_registered_ts = params.handler_registered_ts - this.options = params.options - this.event_key = params.event_key - } - - static computeHandlerId(params: { - bus_name: string - handler_name: string - handler_file_path?: string - handler_registered_at: string - event_key: string | '*' - }): string { - const file_path = HandlerEntry.normalizeHandlerFilePath(params.handler_file_path) - const seed = `${params.bus_name}|${params.handler_name}|${file_path}|${params.handler_registered_at}|${params.event_key}` - return uuidv5(seed, HANDLER_ID_NAMESPACE) - } - - private static normalizeHandlerFilePath(file_path?: string): string { - if (!file_path) { - return 'unknown' - } - const match = file_path.match(/^(.*?):(\d+)(?::\d+)?$/) - if (match) { - return `${match[1]}:${match[2]}` - } - return file_path - } -} - -const HANDLER_ID_NAMESPACE = uuidv5('bubus-handler', uuidv5.DNS) - type EventBusOptions = { max_history_size?: number | null event_concurrency?: ConcurrencyMode handler_concurrency?: ConcurrencyMode event_timeout?: number | null + event_handler_slow_timeout?: number | null + event_slow_timeout?: number | null } class GlobalEventBusInstanceRegistry { @@ -192,12 +75,9 @@ class GlobalEventBusInstanceRegistry { else this._refs.delete(ref) } } -} -export class EventBus { - static _all_instances = new GlobalEventBusInstanceRegistry() - static findEventById(event_id: string): BaseEvent | null { - for (const bus of EventBus._all_instances) { + findEventById(event_id: string): BaseEvent | null { + for (const bus of this) { const event = bus.event_history.get(event_id) if (event) { return event @@ -205,14 +85,26 @@ export class EventBus { } return null } +} + +export class EventBus { + static _all_instances = new GlobalEventBusInstanceRegistry() name: string + + // configuration options max_history_size: number | null event_concurrency_default: ConcurrencyMode handler_concurrency_default: ConcurrencyMode event_timeout_default: number | null - handlers: Map + event_handler_slow_timeout: number | null + event_slow_timeout: number | null + + // public runtime state + handlers: Map event_history: Map + + // internal runtime state pending_event_queue: BaseEvent[] in_flight_event_ids: Set runloop_running: boolean @@ -222,10 +114,16 @@ export class EventBus { constructor(name: string = 'EventBus', options: EventBusOptions = {}) { this.name = name + + // set configuration options this.max_history_size = options.max_history_size === undefined ? 100 : options.max_history_size this.event_concurrency_default = options.event_concurrency ?? 'bus-serial' this.handler_concurrency_default = options.handler_concurrency ?? 'bus-serial' this.event_timeout_default = options.event_timeout === undefined ? 60 : options.event_timeout + this.event_handler_slow_timeout = options.event_handler_slow_timeout === undefined ? 30 : options.event_handler_slow_timeout + this.event_slow_timeout = options.event_slow_timeout === undefined ? 300 : options.event_slow_timeout + + // initialize runtime state this.handlers = new Map() this.event_history = new Map() this.pending_event_queue = [] @@ -240,6 +138,13 @@ export class EventBus { this.emit = this.emit.bind(this) } + toString(): string { + if (this.name.toLowerCase().includes('bus')) { + return `${this.name}` + } + return `EventBus(${this.name})` // for clarity that its a bus if bus is not in the name + } + destroy(): void { EventBus._all_instances.delete(this) this.handlers.clear() @@ -253,49 +158,45 @@ export class EventBus { this.locks.clear() } - on(event_key: EventKey | '*', handler: EventHandler, options: HandlerOptions = {}): void { + on(event_key: EventKey | '*', handler: EventHandlerFunction, options: HandlerOptions = {}): EventHandler { const normalized_key = this.normalizeEventKey(event_key) const handler_name = handler.name || 'anonymous' - const handler_file_path = this.inferHandlerFilePath() ?? undefined const { isostring: handler_registered_at, ts: handler_registered_ts } = BaseEvent.nextTimestamp() - const handler_id = HandlerEntry.computeHandlerId({ - bus_name: this.name, + const handler_timeout = options.handler_timeout ?? this.event_timeout_default + const handler_entry = new EventHandler({ + handler: handler as EventHandlerFunction, handler_name, - handler_file_path, + handler_timeout, handler_registered_at, + handler_registered_ts, + options: Object.keys(options).length > 0 ? options : undefined, event_key: normalized_key, + eventbus_name: this.name, }) - this.handlers.set( - handler_id, - new HandlerEntry({ - id: handler_id, - handler: handler as EventHandler, - handler_name, - handler_file_path, - handler_registered_at, - handler_registered_ts, - options: Object.keys(options).length > 0 ? options : undefined, - event_key: normalized_key, - }) - ) + this.handlers.set(handler_entry.id, handler_entry) + return handler_entry } - off(event_key: EventKey | '*', handler?: EventHandler | string): void { + off(event_key: EventKey | '*', handler?: EventHandlerFunction | string | EventHandler): void { const normalized_key = this.normalizeEventKey(event_key) + if (typeof handler === 'object' && handler instanceof EventHandler && handler.id !== undefined) { + handler = handler.id + } const match_by_id = typeof handler === 'string' - for (const [handler_id, entry] of this.handlers) { + for (const entry of this.handlers.values()) { if (entry.event_key !== normalized_key) { continue } - if (handler === undefined || (match_by_id ? handler_id === handler : entry.handler === (handler as EventHandler))) { + const handler_id = entry.id + if (handler === undefined || (match_by_id ? handler_id === handler : entry.handler === (handler as EventHandlerFunction))) { this.handlers.delete(handler_id) } } } dispatch(event: T, _event_key?: EventKey): T { - const original_event = event._original_event ?? event // if event is a bus-scoped proxy already, get the original underlying event object + const original_event = event._original_event ?? event // if event is a bus-scoped proxy already, get the original underlying event object if (!original_event.bus) { // if we are the first bus to dispatch this event, set the bus property on the original event object original_event.bus = this @@ -313,8 +214,8 @@ export class EventBus { original_event.event_timeout = this.event_timeout_default } - if (original_event.event_path.includes(this.name) || this.eventHasVisited(original_event)) { - return this._getBusScopedEvent(original_event) as T + if (original_event.event_path.includes(this.name) || this.hasProcessedEvent(original_event)) { + return this.getEventProxyScopedToThisBus(original_event) as T } if (!original_event.event_path.includes(this.name)) { @@ -335,7 +236,7 @@ export class EventBus { this.pending_event_queue.push(original_event) this.startRunloop() - return this._getBusScopedEvent(original_event) as T + return this.getEventProxyScopedToThisBus(original_event) as T } emit(event: T, event_key?: EventKey): T { @@ -373,6 +274,7 @@ export class EventBus { return true } + // find an event in the history that matches the criteria if (past !== false || future !== false) { const now_ms = performance.timeOrigin + performance.now() const cutoff_ms = past === true ? null : now_ms - Math.max(0, Number(past)) * 1000 @@ -390,23 +292,25 @@ export class EventBus { if (cutoff_ms !== null && Date.parse(event.event_created_at) < cutoff_ms) { continue } - return this._getBusScopedEvent(event) as T + return this.getEventProxyScopedToThisBus(event) as T } if (future !== false) { - return this._getBusScopedEvent(event) as T + return this.getEventProxyScopedToThisBus(event) as T } } } + // if we are only looking for past events, return null when no match is found if (future === false) { return null } + // if we are looking for future events, return a promise that resolves when a match is found return new Promise((resolve) => { const waiter: FindWaiter = { event_key, matches, - resolve: (event) => resolve(this._getBusScopedEvent(event) as T), + resolve: (event) => resolve(this.getEventProxyScopedToThisBus(event) as T), } if (future !== true) { @@ -428,30 +332,27 @@ export class EventBus { // we temporarily release it so child handlers on the same bus can acquire it // (preventing deadlock for bus-serial/global-serial modes). We re-acquire after // the child completes so the parent handler can continue with the semaphore held. - async _runImmediately(event: T, handler_result?: EventResult): Promise { + async processEventImmediately(event: T, handler_result?: EventResult): Promise { const original_event = event._original_event ?? event // Find the parent handler's result: prefer the proxy-provided one (only if // the handler is still running), then this bus's stack, then walk up the // parent event tree (cross-bus case). If none found, we're not inside a // handler and should fall back to waitForCompletion. const proxy_result = handler_result?.status === 'started' ? handler_result : undefined - const effective_result = - proxy_result ?? - this.locks.getCurrentHandlerResult() ?? - this._findInFlightAncestorResult(original_event) ?? - undefined - if (!effective_result) { - // Not inside any handler β€” fall back to normal completion waiting + const currently_active_event_result = + proxy_result ?? this.locks.getCurrentHandlerResult() ?? this.getParentEventResultAcrossAllBusses(original_event) ?? undefined + if (!currently_active_event_result) { + // Not inside any handler scope β€” fall back to normal completion waiting await original_event.waitForCompletion() return event } - this.locks.ensureQueueJumpPauseForResult(effective_result) + this.locks.ensureQueueJumpPauseForResult(currently_active_event_result) if (original_event.event_status === 'completed') { return event } - const run_queue_jump = effective_result._lock - ? (fn: () => Promise) => effective_result._lock!.runQueueJump(fn) + const run_queue_jump = currently_active_event_result._lock + ? (fn: () => Promise) => currently_active_event_result._lock!.runQueueJump(fn) : (fn: () => Promise) => fn() return await run_queue_jump(async () => { if (original_event.event_status === 'started') { @@ -473,18 +374,18 @@ export class EventBus { await this.locks.waitForIdle() } - hasPendingResults(): boolean { + isIdle(): boolean { for (const event of this.event_history.values()) { for (const result of event.event_results.values()) { if (result.eventbus_name !== this.name) { continue } - if (result.status === 'pending') { - return true + if (result.status === 'pending' || result.status === 'started') { + return false } } } - return false + return true // no handlers are pending or started } eventIsChildOf(event: BaseEvent, ancestor: BaseEvent): boolean { @@ -512,7 +413,7 @@ export class EventBus { recordChildEvent(parent_event_id: string, child_event: BaseEvent, handler_id?: string): void { const original_child = child_event._original_event ?? child_event - const parent_event = this.event_history.get(parent_event_id) + const parent_event = this.event_history.get(parent_event_id) ?? EventBus._all_instances.findEventById(parent_event_id) const target_handler_id = handler_id ?? original_child.event_emitted_by_handler_id ?? undefined if (target_handler_id) { @@ -526,89 +427,20 @@ export class EventBus { } } + // return a full detailed tree diagram of all events and results on this bus logTree(): string { - const parent_to_children = new Map() - - const add_child = (parent_id: string, child: BaseEvent): void => { - const existing = parent_to_children.get(parent_id) ?? [] - existing.push(child) - parent_to_children.set(parent_id, existing) - } - - const root_events: BaseEvent[] = [] - const seen = new Set() - - for (const event of this.event_history.values()) { - const parent_id = event.event_parent_id - if (!parent_id || parent_id === event.event_id || !this.event_history.has(parent_id)) { - if (!seen.has(event.event_id)) { - root_events.push(event) - seen.add(event.event_id) - } - } - } - - if (root_events.length === 0) { - return '(No events in history)' - } - - const nodes_by_id = new Map() - for (const root of root_events) { - nodes_by_id.set(root.event_id, root) - for (const descendant of root.event_descendants) { - nodes_by_id.set(descendant.event_id, descendant) - } - } - - for (const node of nodes_by_id.values()) { - const parent_id = node.event_parent_id - if (!parent_id || parent_id === node.event_id) { - continue - } - if (!nodes_by_id.has(parent_id)) { - continue - } - add_child(parent_id, node) - } - - for (const children of parent_to_children.values()) { - children.sort((a, b) => - a.event_created_at < b.event_created_at ? -1 : a.event_created_at > b.event_created_at ? 1 : 0 - ) - } - - const lines: string[] = [] - lines.push(`πŸ“Š Event History Tree for ${this.name}`) - lines.push('='.repeat(80)) - - root_events.sort((a, b) => (a.event_created_at < b.event_created_at ? -1 : a.event_created_at > b.event_created_at ? 1 : 0)) - const visited = new Set() - root_events.forEach((event, index) => { - lines.push(this.buildTreeLine(event, '', index === root_events.length - 1, parent_to_children, visited)) - }) - - lines.push('='.repeat(80)) - - return lines.join('\n') - } - - // Per-bus check: true only if this specific bus has a handler on its stack. - // For cross-bus queue-jumping, _runImmediately uses _findInFlightAncestorResult() - // to walk up the parent event tree, and the bus proxy passes handler_result - // to _runImmediately so it can yield/reacquire the correct semaphore. - isInsideHandler(): boolean { - return this.locks.isInsideHandlerContext() + return logTree(this) } // Walk up the parent event chain to find an in-flight ancestor handler result. - // Returns the result if found, null otherwise. Used by _runImmediately to detect + // Returns the result if found, null otherwise. Used by processEventImmediately to detect // cross-bus queue-jump scenarios where the calling handler is on a different bus. - _findInFlightAncestorResult(event: BaseEvent): EventResult | null { + getParentEventResultAcrossAllBusses(event: BaseEvent): EventResult | null { const original = event._original_event ?? event let current_parent_id = original.event_parent_id let current_handler_id = original.event_emitted_by_handler_id while (current_handler_id && current_parent_id) { - const parent = EventBus.findEventById(current_parent_id) + const parent = EventBus._all_instances.findEventById(current_parent_id) if (!parent) break const handler_result = parent.event_results.get(current_handler_id) if (handler_result && handler_result.status === 'started') return handler_result @@ -619,7 +451,7 @@ export class EventBus { } // Processes a queue-jumped event across all buses that have it dispatched. - // Called from _runImmediately after the parent handler's semaphore has been yielded. + // Called from processEventImmediately after the parent handler's semaphore has been yielded. // // Event semaphore bypass: the initiating bus (this) always bypasses its event semaphore // since we're inside a handler that already holds it. Other buses only bypass if @@ -627,7 +459,7 @@ export class EventBus { // buses share LockManager.global_event_semaphore). // // Handler semaphores are NOT bypassed β€” child handlers must acquire the handler - // semaphore normally. This works because _runImmediately already released the + // semaphore normally. This works because processEventImmediately already released the // parent's handler semaphore via yield-and-reacquire. private async runImmediatelyAcrossBuses(event: BaseEvent): Promise { const buses = this.getBusesForImmediateRun(event) @@ -648,7 +480,7 @@ export class EventBus { if (index >= 0) { bus.pending_event_queue.splice(index, 1) } - if (bus.eventHasVisited(event)) { + if (bus.hasProcessedEvent(event)) { continue } if (bus.in_flight_event_ids.has(event.event_id)) { @@ -658,7 +490,7 @@ export class EventBus { // Bypass event semaphore on the initiating bus (we're already inside a handler // that acquired it). For other buses, only bypass if they resolve to the same - // semaphore instance (global-serial shares one semaphore across all buses). + // semaphore instance (global-serial shares one semaphore across all buses). const bus_event_semaphore = bus.locks.getSemaphoreForEvent(event) const should_bypass_event_semaphore = bus === this || (initiating_event_semaphore !== null && bus_event_semaphore === initiating_event_semaphore) @@ -680,7 +512,7 @@ export class EventBus { // Collects buses that currently "own" this event so queue-jump can run it immediately // across all forwarded buses. Called by runImmediatelyAcrossBuses(), which itself is - // invoked from _runImmediately (via BaseEvent.done()) when an event is awaited inside + // invoked from processEventImmediately (via BaseEvent.done()) when an event is awaited inside // a handler. Uses event.event_path ordering to pick candidate buses and filters out // buses that haven't seen the event or already processed it. private getBusesForImmediateRun(event: BaseEvent): EventBus[] { @@ -696,7 +528,7 @@ export class EventBus { if (!bus.event_history.has(event.event_id)) { continue } - if (bus.eventHasVisited(event)) { + if (bus.hasProcessedEvent(event)) { continue } if (!seen.has(bus)) { @@ -762,7 +594,7 @@ export class EventBus { continue } const original_event = next_event._original_event ?? next_event - if (this.eventHasVisited(original_event)) { + if (this.hasProcessedEvent(original_event)) { this.pending_event_queue.shift() continue } @@ -797,87 +629,90 @@ export class EventBus { } private async processEvent(event: BaseEvent): Promise { - if (this.eventHasVisited(event)) { + if (this.hasProcessedEvent(event)) { return } event.markStarted() - this.notifyFinders(event) - - const deadlock_timer = - event.event_timeout === null - ? null - : setTimeout(() => { - if (event.event_status === 'completed') { - return - } - const started_ts = event.event_started_ts ?? event.event_created_ts ?? performance.now() - const elapsed_ms = Math.max(0, performance.now() - started_ts) - const elapsed_seconds = (elapsed_ms / 1000).toFixed(2) - const active_handler = [...event.event_results.values()].find((result: EventResult) => result.status === 'started')?.handler_file_path ?? 'handlers' - console.warn( - `[bubus] Slow handler: ${this.name}.on(${event.event_type}#${event.event_id.slice(-8, -1)}, ${active_handler}) still running after ${elapsed_seconds}s (timeout=${event.event_timeout}s)` - ) - }, event.event_timeout * 1000) + this.notifyFindListeners(event) + + const slow_event_warning_timer = this.createSlowEventWarningTimer(event) try { const handler_entries = this.createPendingHandlerResults(event) - const handler_promises = handler_entries.map((entry) => this.runHandlerEntry(event, entry.handler, entry.result, entry.options)) + const handler_promises = handler_entries.map((entry) => this.runEventHandler(event, entry.handler, entry.result, entry.options)) await Promise.all(handler_promises) event.event_pending_bus_count = Math.max(0, event.event_pending_bus_count - 1) event.markCompleted(false) if (event.event_status === 'completed') { - this.notifyParentsFor(event) + this.notifyEventParentsOfCompletion(event) } } finally { - if (deadlock_timer) { - clearTimeout(deadlock_timer) + if (slow_event_warning_timer) { + clearTimeout(slow_event_warning_timer) } } } // Manually manages the handler concurrency semaphore instead of using runWithSemaphore, - // because _runImmediately may temporarily yield it during queue-jumping. - private async runHandlerEntry(event: BaseEvent, handler: EventHandler, result: EventResult, options?: HandlerOptions): Promise { + // because processEventImmediately may temporarily yield it during queue-jumping. + async runEventHandler( + event: BaseEvent, + handler: EventHandlerFunction, + result: EventResult, + options?: HandlerOptions + ): Promise { if (result.status === 'error' && result.error instanceof EventHandlerCancelledError) { return } - const handler_event = this._getBusScopedEvent(event, result) + const handler_event = this.getEventProxyScopedToThisBus(event, result) const semaphore = this.locks.getSemaphoreForHandler(event, options) if (semaphore) { await semaphore.acquire() } - if (result.status === 'error' && result.error instanceof EventHandlerCancelledError) { + // if the result is already in an error or completed state, release the semaphore immediately and return + // prevent double-processing of the event by the same handler + if (result.status === 'error' || result.status === 'completed') { if (semaphore) semaphore.release() return } + // exit the handler lock if it is already held if (result._lock) result._lock.exitHandlerRun() + // create a new handler lock to track ownership of the semaphore during handler execution result._lock = new HandlerLock(semaphore) this.locks.enterHandlerContext(result) + + // resolve the effective timeout by combining the event timeout and the handler timeout + const effective_timeout = this.resolveEffectiveTimeout(event.event_timeout, result.handler.handler_timeout) + const slow_handler_warning_timer = this.createSlowHandlerWarningTimer(event, result, effective_timeout) + try { - result.markStarted() - const abort_promise = result.ensureAbortSignal() - const handler_result = await Promise.race([ - this.runHandlerWithTimeout(event, handler, handler_event, result), - abort_promise, - ]) + const abort_signal = result.markStarted() + const handler_result = await Promise.race([this.runHandlerWithTimeout(event, handler, handler_event, result), abort_signal]) if (event.event_result_schema) { + // if there is a result schema to enforce, parse the handler's return value and mark the event as completed or errored if it doesn't match the schema const parsed = event.event_result_schema.safeParse(handler_result) if (parsed.success) { result.markCompleted(parsed.data) } else { - const error = new Error(`handler result did not match event_result_schema: ${parsed.error.message}`) + // if the handler's return value doesn't match the schema, mark the event as errored with an error message + const error = new EventHandlerResultSchemaError( + `${this.toString()}.on(${event.toString()}, ${result.handler.toString()}) return value ${JSON.stringify(handler_result).slice(0, 20)}... did not match event_result_schema ${event.event_result_type}: ${parsed.error.message}`, + { event_result: result, cause: parsed.error, raw_value: handler_result } + ) result.markError(error) } } else { + // if there is no result schema to enforce, just mark the event as completed with the raw handler's return value result.markCompleted(handler_result) } } catch (error) { + // if the handler timed out, cancel all pending descendants and mark the event as errored if (error instanceof EventHandlerTimeoutError) { result.markError(error) this.cancelPendingDescendants(event, error) @@ -889,44 +724,37 @@ export class EventBus { result._lock?.exitHandlerRun() this.locks.exitHandlerContext(result) this.locks.releaseQueueJumpPauseForResult(result) + if (slow_handler_warning_timer) { + clearTimeout(slow_handler_warning_timer) + } } } + // run a handler with a timeout, returning a promise that resolves or rejects with the handler's result or an error if the timeout is exceeded private async runHandlerWithTimeout( event: BaseEvent, - handler: EventHandler, + handler: EventHandlerFunction, handler_event: BaseEvent = event, result: EventResult ): Promise { + // resolve the effective timeout by combining the event timeout and the handler timeout + const effective_timeout = this.resolveEffectiveTimeout(event.event_timeout, result.handler.handler_timeout) const handler_name = handler.name || 'anonymous' - const warn_ms = 15000 - const started_at_ms = performance.now() - const should_warn = event.event_timeout === null || event.event_timeout * 1000 > warn_ms - const warn_timer = should_warn - ? setTimeout(() => { - const elapsed_ms = performance.now() - started_at_ms - const elapsed_seconds = (elapsed_ms / 1000).toFixed(1) - console.warn(`[bubus] Slow handler: ${event.event_type}.${handler_name} running ${elapsed_seconds}s on ${this.name}`) - }, warn_ms) - : null - const clear_warn = () => { - if (warn_timer) { - clearTimeout(warn_timer) - } - } const run_handler = () => Promise.resolve().then(() => runWithAsyncContext(event._dispatch_context ?? null, () => handler(handler_event))) - if (event.event_timeout === null) { - return run_handler().finally(clear_warn) + if (effective_timeout === null) { + // if there is no timeout to enforce, just run the handler directly and return the promise + return run_handler() } - const timeout_seconds = event.event_timeout + const timeout_seconds = effective_timeout const timeout_ms = timeout_seconds * 1000 const { promise, resolve, reject } = withResolvers() let settled = false + // finalize the promise by clearing the timeout and calling the resolve or reject function const finalize = (fn: (value?: unknown) => void) => { return (value?: unknown) => { if (settled) { @@ -934,11 +762,11 @@ export class EventBus { } settled = true clearTimeout(timer) - clear_warn() fn(value) } } + // set a timeout to reject the promise if the handler takes too long const timer = setTimeout(() => { finalize(reject)( new EventHandlerTimeoutError(`handler ${handler_name} timed out after ${timeout_seconds}s`, { @@ -953,7 +781,63 @@ export class EventBus { return promise } - private eventHasVisited(event: BaseEvent): boolean { + private createSlowEventWarningTimer(event: BaseEvent): ReturnType | null { + const event_warn_ms = this.event_slow_timeout === null ? null : this.event_slow_timeout * 1000 + if (event_warn_ms === null) { + return null + } + return setTimeout(() => { + if (event.event_status === 'completed') { + return + } + const running_handler_count = [...event.event_results.values()].filter((result) => result.status === 'started').length + const started_ts = event.event_started_ts ?? event.event_created_ts ?? performance.now() + const elapsed_ms = Math.max(0, performance.now() - started_ts) + const elapsed_seconds = (elapsed_ms / 1000).toFixed(2) + console.warn( + `[bubus] Slow event processing: ${this.name}.on(${event.event_type}#${event.event_id.slice(-4)}, ${running_handler_count} handlers) still running after ${elapsed_seconds}s` + ) + }, event_warn_ms) + } + + private createSlowHandlerWarningTimer( + event: BaseEvent, + result: EventResult, + effective_timeout: number | null + ): ReturnType | null { + const warn_ms = this.event_handler_slow_timeout === null ? null : this.event_handler_slow_timeout * 1000 + const should_warn = warn_ms !== null && (effective_timeout === null || effective_timeout * 1000 > warn_ms) + if (!should_warn || warn_ms === null) { + return null + } + const started_at_ms = performance.now() + return setTimeout(() => { + if (result.status !== 'started') { + return + } + const elapsed_ms = performance.now() - started_at_ms + const elapsed_seconds = (elapsed_ms / 1000).toFixed(1) + console.warn( + `[bubus] Slow event handler: ${this.name}.on(${event.toString()}, ${result.handler.toString()}) still running after ${elapsed_seconds}s` + ) + }, warn_ms) + } + + private resolveEffectiveTimeout(event_timeout: number | null, handler_timeout: number | null): number | null { + if (handler_timeout === null && event_timeout === null) { + return null + } + if (handler_timeout === null) { + return event_timeout + } + if (event_timeout === null) { + return handler_timeout + } + return Math.min(handler_timeout, event_timeout) + } + + // check if an event has been processed (and completed) by this bus + hasProcessedEvent(event: BaseEvent): boolean { const results = Array.from(event.event_results.values()).filter((result) => result.eventbus_name === this.name) if (results.length === 0) { return false @@ -961,12 +845,12 @@ export class EventBus { return results.every((result) => result.status === 'completed' || result.status === 'error') } - private notifyParentsFor(event: BaseEvent): void { + private notifyEventParentsOfCompletion(event: BaseEvent): void { const visited = new Set() let parent_id = event.event_parent_id while (parent_id && !visited.has(parent_id)) { visited.add(parent_id) - const parent = EventBus.findEventById(parent_id) + const parent = EventBus._all_instances.findEventById(parent_id) if (!parent) { break } @@ -978,14 +862,17 @@ export class EventBus { } } - _getBusScopedEvent(event: T, handler_result?: EventResult): T { + // get a proxy wrapper around an Event that will automatically link emitted child events to this bus and handler + // proxy is what gets passed into the handler, if handler does event.bus.emit(...) to dispatch child events, + // the proxy auto-sets event.parent_event_id and event.event_emitted_by_handler_id + getEventProxyScopedToThisBus(event: T, handler_result?: EventResult): T { const original_event = event._original_event ?? event const bus = this const parent_event_id = original_event.event_id const handler_id = handler_result?.handler_id const bus_proxy = new Proxy(bus, { get(target, prop, receiver) { - if (prop === '_runImmediately') { + if (prop === 'processEventImmediately') { return (child_event: BaseEvent) => { const runner = Reflect.get(target, prop, receiver) as (event: BaseEvent, handler_result?: EventResult) => Promise return runner.call(target, child_event, handler_result) @@ -1002,7 +889,7 @@ export class EventBus { } const dispatcher = Reflect.get(target, prop, receiver) as (event: BaseEvent, event_key?: EventKey) => BaseEvent const dispatched = dispatcher.call(target, original_child, event_key) - return target._getBusScopedEvent(dispatched, handler_result) + return target.getEventProxyScopedToThisBus(dispatched, handler_result) } } return Reflect.get(target, prop, receiver) @@ -1038,10 +925,11 @@ export class EventBus { return scoped as T } + // force-abort processing of all pending descendants of an event regardless of whether they have already started cancelPendingDescendants(event: BaseEvent, reason: unknown): void { const cancellation_cause = this.normalizeCancellationCause(reason) const visited = new Set() - const cancel_child = (child: BaseEvent): void => { + const cancelChildEvent = (child: BaseEvent): void => { const original_child = child._original_event ?? child if (visited.has(original_child.event_id)) { return @@ -1051,7 +939,7 @@ export class EventBus { // Depth-first: cancel grandchildren before parent so // eventAreAllChildrenComplete() returns true when we get back up. for (const grandchild of original_child.event_children) { - cancel_child(grandchild) + cancelChildEvent(grandchild) } const path = Array.isArray(original_child.event_path) ? original_child.event_path : [] @@ -1060,7 +948,7 @@ export class EventBus { if (!buses_to_cancel.has(bus.name)) { continue } - bus.cancelEventOnBus(original_child, cancellation_cause) + bus.cancelEvent(original_child, cancellation_cause) } // Force-complete the child event. In JS we can't stop running async @@ -1073,7 +961,7 @@ export class EventBus { } for (const child of event.event_children) { - cancel_child(child) + cancelChildEvent(child) } } @@ -1087,7 +975,8 @@ export class EventBus { return reason instanceof Error ? reason : new Error(String(reason)) } - private cancelEventOnBus(event: BaseEvent, cause: Error): void { + // force-abort processing of an event regardless of whether it is pending or has already started + private cancelEvent(event: BaseEvent, cause: Error): void { const original_event = event._original_event ?? event const handler_entries = this.createPendingHandlerResults(original_event) let updated = false @@ -1136,213 +1025,12 @@ export class EventBus { if (updated || removed > 0) { original_event.markCompleted(false) if (original_event.event_status === 'completed') { - this.notifyParentsFor(original_event) - } - } - } - - private buildTreeLine( - event: BaseEvent, - indent: string, - is_last: boolean, - parent_to_children: Map, - visited: Set - ): string { - const connector = is_last ? '└── ' : 'β”œβ”€β”€ ' - const status_icon = event.event_status === 'completed' ? 'βœ…' : event.event_status === 'started' ? 'πŸƒ' : '⏳' - - const created_at = this.formatTimestamp(event.event_created_at) - let timing = `[${created_at}` - if (event.event_completed_at) { - const created_ms = Date.parse(event.event_created_at) - const completed_ms = Date.parse(event.event_completed_at) - if (!Number.isNaN(created_ms) && !Number.isNaN(completed_ms)) { - const duration = (completed_ms - created_ms) / 1000 - timing += ` (${duration.toFixed(3)}s)` + this.notifyEventParentsOfCompletion(original_event) } } - timing += ']' - - const line = `${indent}${connector}${status_icon} ${event.event_type}#${event.event_id.slice(-4)} ${timing}` - - if (visited.has(event.event_id)) { - return line - } - visited.add(event.event_id) - - const extension = is_last ? ' ' : 'β”‚ ' - const new_indent = indent + extension - - const result_items: Array<{ type: 'result'; result: EventResult } | { type: 'child'; child: BaseEvent }> = [] - const printed_child_ids = new Set() - - const results = Array.from(event.event_results.values()).sort((a, b) => { - const a_time = a.started_at ? Date.parse(a.started_at) : 0 - const b_time = b.started_at ? Date.parse(b.started_at) : 0 - return a_time - b_time - }) - - results.forEach((result) => { - result_items.push({ type: 'result', result }) - result.event_children.forEach((child) => { - printed_child_ids.add(child.event_id) - }) - }) - - const children = parent_to_children.get(event.event_id) ?? [] - children.forEach((child) => { - if (!printed_child_ids.has(child.event_id) && !child.event_emitted_by_handler_id) { - result_items.push({ type: 'child', child }) - } - }) - - if (result_items.length === 0) { - return line - } - - const child_lines: string[] = [] - result_items.forEach((item, index) => { - const is_last_item = index === result_items.length - 1 - if (item.type === 'result') { - child_lines.push(this.buildResultLine(item.result, new_indent, is_last_item, parent_to_children, visited)) - } else { - child_lines.push(this.buildTreeLine(item.child, new_indent, is_last_item, parent_to_children, visited)) - } - }) - - return [line, ...child_lines].join('\n') } - private buildResultLine( - result: EventResult, - indent: string, - is_last: boolean, - parent_to_children: Map, - visited: Set - ): string { - const connector = is_last ? '└── ' : 'β”œβ”€β”€ ' - const status_icon = result.status === 'completed' ? 'βœ…' : result.status === 'error' ? '❌' : result.status === 'started' ? 'πŸƒ' : '⏳' - - const handler_label = - result.handler_name && result.handler_name !== 'anonymous' - ? result.handler_name - : result.handler_file_path - ? result.handler_file_path - : 'anonymous' - const handler_display = `${result.eventbus_name}.${handler_label}#${result.handler_id.slice(-4)}` - let line = `${indent}${connector}${status_icon} ${handler_display}` - - if (result.started_at) { - line += ` [${this.formatTimestamp(result.started_at)}` - if (result.completed_at) { - const started_ms = Date.parse(result.started_at) - const completed_ms = Date.parse(result.completed_at) - if (!Number.isNaN(started_ms) && !Number.isNaN(completed_ms)) { - const duration = (completed_ms - started_ms) / 1000 - line += ` (${duration.toFixed(3)}s)` - } - } - line += ']' - } - - if (result.status === 'error' && result.error) { - if (result.error instanceof EventHandlerTimeoutError) { - line += ` ⏱️ Timeout: ${result.error.message}` - } else if (result.error instanceof EventHandlerCancelledError) { - line += ` 🚫 Cancelled: ${result.error.message}` - } else { - const error_name = result.error instanceof Error ? result.error.name : 'Error' - const error_message = result.error instanceof Error ? result.error.message : String(result.error) - line += ` ☠️ ${error_name}: ${error_message}` - } - } else if (result.status === 'completed') { - line += ` β†’ ${this.formatResultValue(result.result)}` - } - - const extension = is_last ? ' ' : 'β”‚ ' - const new_indent = indent + extension - - if (result.event_children.length === 0) { - return line - } - - const child_lines: string[] = [] - const direct_children = result.event_children - const parent_children = parent_to_children.get(result.event_id) ?? [] - const emitted_children = parent_children.filter((child) => child.event_emitted_by_handler_id === result.handler_id) - const children_by_id = new Map() - direct_children.forEach((child) => { - children_by_id.set(child.event_id, child) - }) - emitted_children.forEach((child) => { - if (!children_by_id.has(child.event_id)) { - children_by_id.set(child.event_id, child) - } - }) - const children_to_print = Array.from(children_by_id.values()).filter((child) => !visited.has(child.event_id)) - - children_to_print.forEach((child, index) => { - child_lines.push(this.buildTreeLine(child, new_indent, index === children_to_print.length - 1, parent_to_children, visited)) - }) - - return [line, ...child_lines].join('\n') - } - - private formatTimestamp(value?: string): string { - if (!value) { - return 'N/A' - } - const date = new Date(value) - if (Number.isNaN(date.getTime())) { - return 'N/A' - } - return date.toISOString().slice(11, 23) - } - - private inferHandlerFilePath(): string | null { - const stack = new Error().stack - if (!stack) { - return null - } - const lines = stack.split('\n').map((line) => line.trim()) - for (const line of lines) { - if (!line || line.startsWith('Error')) { - continue - } - if (line.includes('event_bus.ts') || line.includes('node:internal') || line.includes('/node_modules/')) { - continue - } - const match = line.match(/\(?(.+?:\d+:\d+)\)?$/) - if (match && match[1]) { - return match[1] - } - } - return null - } - - private formatResultValue(value: unknown): string { - if (value === null || value === undefined) { - return 'None' - } - if (value instanceof BaseEvent) { - return `Event(${value.event_type}#${value.event_id.slice(-4)})` - } - if (typeof value === 'string') { - return JSON.stringify(value) - } - if (typeof value === 'number' || typeof value === 'boolean') { - return String(value) - } - if (Array.isArray(value)) { - return `list(${value.length} items)` - } - if (typeof value === 'object') { - return `dict(${Object.keys(value as Record).length} items)` - } - return `${typeof value}(...)` - } - - private notifyFinders(event: BaseEvent): void { + private notifyFindListeners(event: BaseEvent): void { for (const waiter of Array.from(this.find_waiters)) { if (!this.eventMatchesKey(event, waiter.event_key)) { continue @@ -1359,69 +1047,34 @@ export class EventBus { } private createPendingHandlerResults(event: BaseEvent): Array<{ - handler: EventHandler + handler: EventHandlerFunction result: EventResult options?: HandlerOptions }> { - const handlers = this.collectHandlers(event) - return handlers.map(({ handler_id, handler, handler_name, handler_file_path, options }) => { + const handlers = this.getHandlersForEvent(event) + return handlers.map((entry) => { + const handler_id = entry.id const existing = event.event_results.get(handler_id) - if (existing && !existing.event) { - existing.event = event - } - const result = - existing ?? - new EventResult({ - event_id: event.event_id, - handler_id, - handler_name, - handler_file_path, - eventbus_name: this.name, - event, - }) + const result = existing ?? new EventResult({ event, handler: entry }) if (!existing) { event.event_results.set(handler_id, result) } - return { handler, result, options } + return { handler: entry.handler, result, options: entry.options } }) } - private collectHandlers(event: BaseEvent): Array<{ - handler_id: string - handler: EventHandler - handler_name: string - handler_file_path?: string - options?: HandlerOptions - }> { - const handlers: Array<{ - handler_id: string - handler: EventHandler - handler_name: string - handler_file_path?: string - options?: HandlerOptions - }> = [] + getHandlersForEvent(event: BaseEvent): EventHandler[] { + const handlers: EventHandler[] = [] // Exact-match handlers first, then wildcard β€” preserves original ordering - for (const [handler_id, entry] of this.handlers) { + for (const entry of this.handlers.values()) { if (entry.event_key === event.event_type) { - handlers.push({ - handler_id, - handler: entry.handler, - handler_name: entry.handler_name, - handler_file_path: entry.handler_file_path, - options: entry.options, - }) + handlers.push(entry) } } - for (const [handler_id, entry] of this.handlers) { + for (const entry of this.handlers.values()) { if (entry.event_key === '*') { - handlers.push({ - handler_id, - handler: entry.handler, - handler_name: entry.handler_name, - handler_file_path: entry.handler_file_path, - options: entry.options, - }) + handlers.push(entry) } } @@ -1450,7 +1103,7 @@ export class EventBus { if (typeof event_type === 'string' && event_type.length > 0 && event_type !== 'BaseEvent') { return event_type } - throw new Error('event_key must be a string or an event class with a static event_type (not BaseEvent)') + throw new Error('bus.on(match_pattern, ...) must be a string event type, "*", or a BaseEvent class, got: ' + JSON.stringify(event_key).slice(0, 30)) } private trimHistory(): void { @@ -1477,15 +1130,24 @@ export class EventBus { } // Second pass: force-remove oldest events regardless of status + let dropped_pending_events = 0 if (remaining_overage > 0) { for (const [event_id, event] of this.event_history) { if (remaining_overage <= 0) { break } + if (event.event_status !== 'completed') { + dropped_pending_events += 1 + } this.event_history.delete(event_id) event._gc() remaining_overage -= 1 } + if (dropped_pending_events > 0) { + console.error( + `[bubus] ⚠️ Bus ${this.toString()} has exceeded its limit of ${this.max_history_size} inflight events and has started dropping oldest pending events! Increase bus.max_history_size or reduce the event volume.` + ) + } } } } diff --git a/bubus-ts/src/event_handler.ts b/bubus-ts/src/event_handler.ts new file mode 100644 index 0000000..970fbbe --- /dev/null +++ b/bubus-ts/src/event_handler.ts @@ -0,0 +1,181 @@ +import { v5 as uuidv5 } from 'uuid' + +import type { EventHandlerFunction, HandlerOptions } from './types.js' +import { BaseEvent } from './base_event.js' +import { EventResult } from './event_result.js' + +const HANDLER_ID_NAMESPACE = uuidv5('bubus-handler', uuidv5.DNS) + +export class EventHandler { + // an entry in the list of handlers that are registered on a bus + id: string // unique uuidv5 based on hash of bus name, handler name, handler file path:lineno, registered at timestamp, and event key + handler: EventHandlerFunction + handler_name: string + handler_file_path?: string + handler_timeout: number | null + handler_registered_at: string + handler_registered_ts: number + options?: HandlerOptions + event_key: string | '*' + eventbus_name: string + + constructor(params: { + id?: string + handler: EventHandlerFunction + handler_name: string + handler_file_path?: string + handler_timeout: number | null + handler_registered_at: string + handler_registered_ts: number + options?: HandlerOptions + event_key: string | '*' + eventbus_name: string + }) { + const handler_file_path = EventHandler.detectHandlerFilePath(params.handler_file_path) + this.id = + params.id ?? + EventHandler.computeHandlerId({ + eventbus_name: params.eventbus_name, + handler_name: params.handler_name, + handler_file_path, + handler_registered_at: params.handler_registered_at, + event_key: params.event_key, + }) + this.handler = params.handler + this.handler_name = params.handler_name + this.handler_file_path = handler_file_path + this.handler_timeout = params.handler_timeout + this.handler_registered_at = params.handler_registered_at + this.handler_registered_ts = params.handler_registered_ts + this.options = params.options + this.event_key = params.event_key + this.eventbus_name = params.eventbus_name + } + + // compute globally unique handler uuid as a hash of the bus name, handler name, handler file path, registered at timestamp, and event key + static computeHandlerId(params: { + eventbus_name: string + handler_name: string + handler_file_path?: string + handler_registered_at: string + event_key: string | '*' + }): string { + const file_path = EventHandler.detectHandlerFilePath(params.handler_file_path, 'unknown') ?? 'unknown' + const seed = `${params.eventbus_name}|${params.handler_name}|${file_path}|${params.handler_registered_at}|${params.event_key}` + return uuidv5(seed, HANDLER_ID_NAMESPACE) + } + + toString(): string { + const label = this.handler_name && this.handler_name !== 'anonymous' ? `${this.handler_name}()` : `function#${this.id.slice(-4)}()` + const file_path = this.handler_file_path ?? 'unknown' + return `${label} (${file_path})` + } + + private static detectHandlerFilePath(file_path?: string, fallback: string = 'unknown'): string | undefined { + const extract = (value: string): string => + value.trim().match(/\(([^)]+)\)$/)?.[1] ?? + value.trim().match(/^\s*at\s+(.+)$/)?.[1] ?? + value.trim().match(/^[^@]+@(.+)$/)?.[1] ?? + value.trim() + let resolved_path = file_path ? extract(file_path) : file_path + if (!resolved_path) { + const line = new Error().stack?.split('\n').map((l) => l.trim()).filter(Boolean)[4] + if (line) resolved_path = extract(line) + } + if (!resolved_path) return fallback + const match = resolved_path.match(/^(.*?):(\d+)(?::\d+)?$/) + let normalized = match ? match[1] : resolved_path + const line_number = match?.[2] + if (normalized.startsWith('file://')) { + let path = normalized.slice('file://'.length) + if (path.startsWith('localhost/')) path = path.slice('localhost'.length) + if (!path.startsWith('/')) path = `/${path}` + try { + normalized = decodeURIComponent(path) + } catch { + normalized = path + } + } + normalized = normalized.replace(/\/Users\/[^/]+\//, '~/') + return line_number ? `${normalized}:${line_number}` : normalized + } +} +export class TimeoutError extends Error { + constructor(message: string) { + super(message) + this.name = 'TimeoutError' + } +} + +export class EventHandlerError extends Error { + event_result: EventResult + timeout_seconds: number | null + cause: Error + + constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause: Error }) { + super(message) + this.name = 'EventHandlerError' + this.event_result = params.event_result + this.cause = params.cause + this.timeout_seconds = params.timeout_seconds ?? this.event_result.event.event_timeout ?? null + } + + get event(): BaseEvent { + return this.event_result.event + } + + get event_type(): string { + return this.event.event_type + } + + get handler_name(): string { + return this.event_result.handler_name + } + + get handler_id(): string { + return this.event_result.handler_id + } + + get event_timeout(): number | null { + return this.event.event_timeout + } +} +// EventHandlerTimeoutError: when the handler itself timed out while executing (due to event.event_timeout being exceeded) + +export class EventHandlerTimeoutError extends EventHandlerError { + constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause?: Error }) { + super(message, { + event_result: params.event_result, + timeout_seconds: params.timeout_seconds, + cause: params.cause ?? new TimeoutError(message), + }) + this.name = 'EventHandlerTimeoutError' + } +} +// EventHandlerCancelledError: when a pending handler was cancelled and never run due to an error (e.g. timeout) in a parent scope + +export class EventHandlerCancelledError extends EventHandlerError { + constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause: Error }) { + super(message, params) + this.name = 'EventHandlerCancelledError' + } +} +// EventHandlerAbortedError: when a handler that was already running was aborted due to an error in the parent scope, not due to an error in its own logic / exceeding its own timeout + +export class EventHandlerAbortedError extends EventHandlerError { + constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause: Error }) { + super(message, params) + this.name = 'EventHandlerAbortedError' + } +} + +// EventHandlerResultSchemaError: when a handler returns a value that fails event_result_schema validation +export class EventHandlerResultSchemaError extends EventHandlerError { + raw_value: unknown + + constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause: Error, raw_value: unknown }) { + super(message, params) + this.name = 'EventHandlerResultSchemaError' + this.raw_value = params.raw_value + } +} diff --git a/bubus-ts/src/event_result.ts b/bubus-ts/src/event_result.ts index 364af01..b01b90f 100644 --- a/bubus-ts/src/event_result.ts +++ b/bubus-ts/src/event_result.ts @@ -1,6 +1,7 @@ import { v7 as uuidv7 } from 'uuid' import { BaseEvent } from './base_event.js' +import type { EventHandler } from './event_handler.js' import { HandlerLock, withResolvers } from './lock_manager.js' import type { Deferred } from './lock_manager.js' @@ -9,57 +10,62 @@ export type EventResultStatus = 'pending' | 'started' | 'completed' | 'error' export class EventResult { id: string status: EventResultStatus - event?: BaseEvent - event_id: string - handler_id: string - handler_name: string - handler_file_path?: string - eventbus_name: string + event: BaseEvent + handler: EventHandler started_at?: string started_ts?: number completed_at?: string completed_ts?: number - result?: unknown - error?: unknown + result?: unknown // raw return value from the event handler + error?: unknown // error object thrown by the event handler event_children: BaseEvent[] // Abort signal: created when handler starts, rejected by signalAbort() to - // interrupt runHandlerEntry's await via Promise.race. + // interrupt runEventHandler's await via Promise.race. _abort: Deferred | null // Handler lock: tracks ownership of the handler concurrency semaphore - // during handler execution. Set by EventBus.runHandlerEntry, used by - // _runImmediately for yield-and-reacquire during queue-jumps. + // during handler execution. Set by EventBus.runEventHandler, used by + // processEventImmediately for yield-and-reacquire during queue-jumps. _lock: HandlerLock | null - constructor(params: { - event_id: string - handler_id: string - handler_name: string - handler_file_path?: string - eventbus_name: string - event?: BaseEvent - }) { + constructor(params: { event: BaseEvent; handler: EventHandler }) { this.id = uuidv7() this.status = 'pending' this.event = params.event - this.event_id = params.event_id - this.handler_id = params.handler_id - this.handler_name = params.handler_name - this.handler_file_path = params.handler_file_path - this.eventbus_name = params.eventbus_name + this.handler = params.handler this.event_children = [] this._abort = null this._lock = null } - // Create the abort deferred so runHandlerEntry can race against it. - ensureAbortSignal(): Promise { - if (!this._abort) { - this._abort = withResolvers() - } - return this._abort.promise + toString(): string { + return `${this.result ?? 'null'} (${this.status})` + } + + get event_id(): string { + return this.event.event_id } - // Reject the abort promise, causing runHandlerEntry's Promise.race to + get handler_id(): string { + return this.handler.id + } + + get handler_name(): string { + return this.handler.handler_name + } + + get handler_file_path(): string | undefined { + return this.handler.handler_file_path + } + + get handler_timeout(): number | null { + return this.handler.handler_timeout + } + + get eventbus_name(): string { + return this.handler.eventbus_name + } + + // Reject the abort promise, causing runEventHandler's Promise.race to // throw immediately β€” even if the handler has no timeout. signalAbort(error: Error): void { if (this._abort) { @@ -68,11 +74,18 @@ export class EventResult { } } - markStarted(): void { - this.status = 'started' - const { isostring: started_at, ts: started_ts } = BaseEvent.nextTimestamp() - this.started_at = started_at - this.started_ts = started_ts + // Mark started and return the abort promise for Promise.race. + markStarted(): Promise { + if (!this._abort) { + this._abort = withResolvers() + } + if (this.status === 'pending') { + this.status = 'started' + const { isostring: started_at, ts: started_ts } = BaseEvent.nextTimestamp() + this.started_at = started_at + this.started_ts = started_ts + } + return this._abort.promise } markCompleted(result: unknown): void { diff --git a/bubus-ts/src/index.ts b/bubus-ts/src/index.ts index b2f9a5d..4202275 100644 --- a/bubus-ts/src/index.ts +++ b/bubus-ts/src/index.ts @@ -1,5 +1,19 @@ export { BaseEvent, BaseEventSchema } from './base_event.js' export { EventResult } from './event_result.js' -export { EventBus, EventHandlerTimeoutError, EventHandlerCancelledError, EventHandlerAbortedError } from './event_bus.js' +export { EventBus } from './event_bus.js' +export { + EventHandlerTimeoutError, + EventHandlerCancelledError, + EventHandlerAbortedError, + EventHandlerResultSchemaError, +} from './event_handler.js' export type { ConcurrencyMode, EventBusInterfaceForLockManager } from './lock_manager.js' -export type { EventClass, EventHandler, EventKey, HandlerOptions, EventStatus, FindOptions, FindWindow } from './types.js' +export type { + EventClass, + EventHandlerFunction as EventHandler, + EventKey, + HandlerOptions, + EventStatus, + FindOptions, + FindWindow, +} from './types.js' diff --git a/bubus-ts/src/lock_manager.ts b/bubus-ts/src/lock_manager.ts index 58c288f..6a1f0c3 100644 --- a/bubus-ts/src/lock_manager.ts +++ b/bubus-ts/src/lock_manager.ts @@ -75,7 +75,11 @@ export class AsyncSemaphore { } } -export const semaphoreForMode = (mode: ConcurrencyMode, global_semaphore: AsyncSemaphore, bus_semaphore: AsyncSemaphore): AsyncSemaphore | null => { +export const semaphoreForMode = ( + mode: ConcurrencyMode, + global_semaphore: AsyncSemaphore, + bus_semaphore: AsyncSemaphore +): AsyncSemaphore | null => { if (mode === 'parallel') { return null } @@ -172,7 +176,7 @@ export type EventBusInterfaceForLockManager = { pending_event_queue: BaseEvent[] in_flight_event_ids: Set runloop_running: boolean - hasPendingResults: () => boolean + isIdle: () => boolean event_concurrency_default: ConcurrencyMode handler_concurrency_default: ConcurrencyMode } @@ -257,6 +261,10 @@ export class LockManager { return this.active_handler_results[this.active_handler_results.length - 1] } + // Per-bus check: true only if this specific bus has a handler on its stack. + // For cross-bus queue-jumping, EventBus.processEventImmediately uses getParentEventResultAcrossAllBusses() + // to walk up the parent event tree, and the bus proxy passes handler_result + // to processEventImmediately so it can yield/reacquire the correct semaphore. isInsideHandlerContext(): boolean { return this.active_handler_results.length > 0 } @@ -318,8 +326,7 @@ export class LockManager { } getSemaphoreForHandler(event: BaseEvent, options?: HandlerOptions): AsyncSemaphore | null { - const event_override = - event.handler_concurrency && event.handler_concurrency !== 'auto' ? event.handler_concurrency : undefined + const event_override = event.handler_concurrency && event.handler_concurrency !== 'auto' ? event.handler_concurrency : undefined const handler_override = options?.handler_concurrency && options.handler_concurrency !== 'auto' ? options.handler_concurrency : undefined const fallback = this.bus.handler_concurrency_default @@ -351,10 +358,7 @@ export class LockManager { // Compute instantaneous idle snapshot from live bus state; used to gate waiters. private getIdleSnapshot(): boolean { return ( - this.bus.pending_event_queue.length === 0 && - this.bus.in_flight_event_ids.size === 0 && - !this.bus.hasPendingResults() && - !this.bus.runloop_running + this.bus.pending_event_queue.length === 0 && this.bus.in_flight_event_ids.size === 0 && this.bus.isIdle() && !this.bus.runloop_running ) } } diff --git a/bubus-ts/src/logging.ts b/bubus-ts/src/logging.ts new file mode 100644 index 0000000..8d242e7 --- /dev/null +++ b/bubus-ts/src/logging.ts @@ -0,0 +1,242 @@ +import { BaseEvent } from './base_event.js' +import { EventResult } from './event_result.js' +import { EventHandlerCancelledError, EventHandlerTimeoutError } from './event_handler.js' + +type LogTreeBus = { + name: string + event_history: Map +} + +export const logTree = (bus: LogTreeBus): string => { + const parent_to_children = new Map() + + const add_child = (parent_id: string, child: BaseEvent): void => { + const existing = parent_to_children.get(parent_id) ?? [] + existing.push(child) + parent_to_children.set(parent_id, existing) + } + + const root_events: BaseEvent[] = [] + const seen = new Set() + + for (const event of bus.event_history.values()) { + const parent_id = event.event_parent_id + if (!parent_id || parent_id === event.event_id || !bus.event_history.has(parent_id)) { + if (!seen.has(event.event_id)) { + root_events.push(event) + seen.add(event.event_id) + } + } + } + + if (root_events.length === 0) { + return '(No events in history)' + } + + const nodes_by_id = new Map() + for (const root of root_events) { + nodes_by_id.set(root.event_id, root) + for (const descendant of root.event_descendants) { + nodes_by_id.set(descendant.event_id, descendant) + } + } + + for (const node of nodes_by_id.values()) { + const parent_id = node.event_parent_id + if (!parent_id || parent_id === node.event_id) { + continue + } + if (!nodes_by_id.has(parent_id)) { + continue + } + add_child(parent_id, node) + } + + for (const children of parent_to_children.values()) { + children.sort((a, b) => (a.event_created_at < b.event_created_at ? -1 : a.event_created_at > b.event_created_at ? 1 : 0)) + } + + const lines: string[] = [] + lines.push(`πŸ“Š Event History Tree for ${bus.name}`) + lines.push('='.repeat(80)) + + root_events.sort((a, b) => (a.event_created_at < b.event_created_at ? -1 : a.event_created_at > b.event_created_at ? 1 : 0)) + const visited = new Set() + root_events.forEach((event, index) => { + lines.push(buildTreeLine(event, '', index === root_events.length - 1, parent_to_children, visited)) + }) + + lines.push('='.repeat(80)) + + return lines.join('\n') +} + +export const buildTreeLine = ( + event: BaseEvent, + indent: string, + is_last: boolean, + parent_to_children: Map, + visited: Set +): string => { + const connector = is_last ? '└── ' : 'β”œβ”€β”€ ' + const status_icon = event.event_status === 'completed' ? 'βœ…' : event.event_status === 'started' ? 'πŸƒ' : '⏳' + + const created_at = formatTimestamp(event.event_created_at) + let timing = `[${created_at}` + if (event.event_completed_at) { + const created_ms = Date.parse(event.event_created_at) + const completed_ms = Date.parse(event.event_completed_at) + if (!Number.isNaN(created_ms) && !Number.isNaN(completed_ms)) { + const duration = (completed_ms - created_ms) / 1000 + timing += ` (${duration.toFixed(3)}s)` + } + } + timing += ']' + + const line = `${indent}${connector}${status_icon} ${event.event_type}#${event.event_id.slice(-4)} ${timing}` + + if (visited.has(event.event_id)) { + return line + } + visited.add(event.event_id) + + const extension = is_last ? ' ' : 'β”‚ ' + const new_indent = indent + extension + + const result_items: Array<{ type: 'result'; result: EventResult } | { type: 'child'; child: BaseEvent }> = [] + for (const result of event.event_results.values()) { + result_items.push({ type: 'result', result }) + } + const children = parent_to_children.get(event.event_id) ?? [] + const printed_child_ids = new Set(event.event_results.size > 0 ? event.event_results.keys() : []) + for (const child of children) { + if (!printed_child_ids.has(child.event_id) && !child.event_emitted_by_handler_id) { + result_items.push({ type: 'child', child }) + printed_child_ids.add(child.event_id) + } + } + + if (result_items.length === 0) { + return line + } + + const child_lines: string[] = [] + result_items.forEach((item, index) => { + const is_last_item = index === result_items.length - 1 + if (item.type === 'result') { + child_lines.push(buildResultLine(item.result, new_indent, is_last_item, parent_to_children, visited)) + } else { + child_lines.push(buildTreeLine(item.child, new_indent, is_last_item, parent_to_children, visited)) + } + }) + + return [line, ...child_lines].join('\n') +} + +export const buildResultLine = ( + result: EventResult, + indent: string, + is_last: boolean, + parent_to_children: Map, + visited: Set +): string => { + const connector = is_last ? '└── ' : 'β”œβ”€β”€ ' + const status_icon = result.status === 'completed' ? 'βœ…' : result.status === 'error' ? '❌' : result.status === 'started' ? 'πŸƒ' : '⏳' + + const handler_label = + result.handler_name && result.handler_name !== 'anonymous' + ? result.handler_name + : result.handler_file_path + ? result.handler_file_path + : 'anonymous' + const handler_display = `${result.eventbus_name}.${handler_label}#${result.handler_id.slice(-4)}` + let line = `${indent}${connector}${status_icon} ${handler_display}` + + if (result.started_at) { + line += ` [${formatTimestamp(result.started_at)}` + if (result.completed_at) { + const started_ms = Date.parse(result.started_at) + const completed_ms = Date.parse(result.completed_at) + if (!Number.isNaN(started_ms) && !Number.isNaN(completed_ms)) { + const duration = (completed_ms - started_ms) / 1000 + line += ` (${duration.toFixed(3)}s)` + } + } + line += ']' + } + + if (result.status === 'error' && result.error) { + if (result.error instanceof EventHandlerTimeoutError) { + line += ` ⏱️ Timeout: ${result.error.message}` + } else if (result.error instanceof EventHandlerCancelledError) { + line += ` 🚫 Cancelled: ${result.error.message}` + } else { + const error_name = result.error instanceof Error ? result.error.name : 'Error' + const error_message = result.error instanceof Error ? result.error.message : String(result.error) + line += ` ☠️ ${error_name}: ${error_message}` + } + } else if (result.status === 'completed') { + line += ` β†’ ${formatResultValue(result.result)}` + } + + const extension = is_last ? ' ' : 'β”‚ ' + const new_indent = indent + extension + + if (result.event_children.length === 0) { + return line + } + + const child_lines: string[] = [] + const direct_children = result.event_children + const parent_children = parent_to_children.get(result.event_id) ?? [] + const emitted_children = parent_children.filter((child) => child.event_emitted_by_handler_id === result.handler_id) + const children_by_id = new Map() + direct_children.forEach((child) => { + children_by_id.set(child.event_id, child) + }) + emitted_children.forEach((child) => { + if (!children_by_id.has(child.event_id)) { + children_by_id.set(child.event_id, child) + } + }) + const children_to_print = Array.from(children_by_id.values()).filter((child) => !visited.has(child.event_id)) + + children_to_print.forEach((child, index) => { + child_lines.push(buildTreeLine(child, new_indent, index === children_to_print.length - 1, parent_to_children, visited)) + }) + + return [line, ...child_lines].join('\n') +} + +export const formatTimestamp = (value?: string): string => { + if (!value) { + return 'N/A' + } + const date = new Date(value) + if (Number.isNaN(date.getTime())) { + return 'N/A' + } + return date.toISOString().slice(11, 23) +} + +export const formatResultValue = (value: unknown): string => { + if (value === null || value === undefined) { + return 'None' + } + if (value instanceof BaseEvent) { + return `Event(${value.event_type}#${value.event_id.slice(-4)})` + } + if (typeof value === 'string') { + return JSON.stringify(value) + } + if (typeof value === 'number' || typeof value === 'boolean') { + return String(value) + } + if (Array.isArray(value)) { + return `list(${value.length} items)` + } + if (typeof value === 'object') { + return `dict(${Object.keys(value as Record).length} items)` + } + return `${typeof value}(...)` +} diff --git a/bubus-ts/src/types.ts b/bubus-ts/src/types.ts index c78e16f..7ffd0fa 100644 --- a/bubus-ts/src/types.ts +++ b/bubus-ts/src/types.ts @@ -7,10 +7,11 @@ export type EventClass = { event_type?: string export type EventKey = string | EventClass -export type EventHandler = (event: T) => void | Promise +export type EventHandlerFunction = (event: T) => void | Promise export type HandlerOptions = { handler_concurrency?: ConcurrencyMode + handler_timeout?: number | null } export type FindWindow = boolean | number diff --git a/bubus-ts/tests/_perf_profile.ts b/bubus-ts/tests/_perf_profile.ts index 6307e41..8ec7ce0 100644 --- a/bubus-ts/tests/_perf_profile.ts +++ b/bubus-ts/tests/_perf_profile.ts @@ -3,7 +3,8 @@ import { BaseEvent, EventBus } from '../src/index.js' const SimpleEvent = BaseEvent.extend('SimpleEvent', {}) const total_events = 200_000 -const bus = new EventBus('PerfBus', { max_history_size: 1000 }) +// Keep full history to avoid trimming inflight events during perf profiling. +const bus = new EventBus('PerfBus', { max_history_size: total_events }) let processed_count = 0 bus.on(SimpleEvent, () => { @@ -46,6 +47,13 @@ global.gc?.() const mem_gc = process.memoryUsage() console.log(`Memory after GC: RSS=${(mem_gc.rss / 1024 / 1024).toFixed(1)}MB, Heap=${(mem_gc.heapUsed / 1024 / 1024).toFixed(1)}MB`) +const total_ms = t3 - t0 +console.log( + `Per-event: time=${(total_ms / total_events).toFixed(4)}ms, ` + + `heap=${(((mem_after.heapUsed - mem_before.heapUsed) / total_events) / 1024).toFixed(2)}KB, ` + + `heap_gc=${(((mem_gc.heapUsed - mem_before.heapUsed) / total_events) / 1024).toFixed(2)}KB` +) + console.log(`\nProcessed: ${processed_count}/${total_events}`) console.log(`History size: ${bus.event_history.size} (max: ${bus.max_history_size})`) console.log(`Heap delta (before GC): +${((mem_after.heapUsed - mem_before.heapUsed) / 1024 / 1024).toFixed(1)}MB`) diff --git a/bubus-ts/tests/comprehensive_patterns.test.ts b/bubus-ts/tests/comprehensive_patterns.test.ts index ef5dec9..518dfe0 100644 --- a/bubus-ts/tests/comprehensive_patterns.test.ts +++ b/bubus-ts/tests/comprehensive_patterns.test.ts @@ -326,13 +326,13 @@ test('isInsideHandler() is per-bus, not global', async () => { let bus_b_inside_during_b_handler = false bus_a.on(EventA, () => { - bus_a_inside_during_a_handler = bus_a.isInsideHandler() - bus_b_inside_during_a_handler = bus_b.isInsideHandler() + bus_a_inside_during_a_handler = bus_a.locks.isInsideHandlerContext() + bus_b_inside_during_a_handler = bus_b.locks.isInsideHandlerContext() }) bus_b.on(EventB, () => { - bus_a_inside_during_b_handler = bus_a.isInsideHandler() - bus_b_inside_during_b_handler = bus_b.isInsideHandler() + bus_a_inside_during_b_handler = bus_a.locks.isInsideHandlerContext() + bus_b_inside_during_b_handler = bus_b.locks.isInsideHandlerContext() }) // Dispatch to bus_a first, wait for completion so bus_b has no active handlers @@ -344,16 +344,16 @@ test('isInsideHandler() is per-bus, not global', async () => { await bus_b.waitUntilIdle() // During bus_a's handler: bus_a should report inside, bus_b should not - assert.equal(bus_a_inside_during_a_handler, true, 'bus_a.isInsideHandler() should be true during bus_a handler') - assert.equal(bus_b_inside_during_a_handler, false, 'bus_b.isInsideHandler() should be false during bus_a handler') + assert.equal(bus_a_inside_during_a_handler, true, 'bus_a.locks.isInsideHandlerContext() should be true during bus_a handler') + assert.equal(bus_b_inside_during_a_handler, false, 'bus_b.locks.isInsideHandlerContext() should be false during bus_a handler') // During bus_b's handler: bus_b should report inside, bus_a should not - assert.equal(bus_b_inside_during_b_handler, true, 'bus_b.isInsideHandler() should be true during bus_b handler') - assert.equal(bus_a_inside_during_b_handler, false, 'bus_a.isInsideHandler() should be false during bus_b handler') + assert.equal(bus_b_inside_during_b_handler, true, 'bus_b.locks.isInsideHandlerContext() should be true during bus_b handler') + assert.equal(bus_a_inside_during_b_handler, false, 'bus_a.locks.isInsideHandlerContext() should be false during bus_b handler') // After all handlers complete, neither bus should report inside - assert.equal(bus_a.isInsideHandler(), false, 'bus_a.isInsideHandler() should be false after idle') - assert.equal(bus_b.isInsideHandler(), false, 'bus_b.isInsideHandler() should be false after idle') + assert.equal(bus_a.locks.isInsideHandlerContext(), false, 'bus_a.locks.isInsideHandlerContext() should be false after idle') + assert.equal(bus_b.locks.isInsideHandlerContext(), false, 'bus_b.locks.isInsideHandlerContext() should be false after idle') }) test('dispatch multiple, await one skips others until after handler completes', async () => { diff --git a/bubus-ts/tests/eventbus_basics.test.ts b/bubus-ts/tests/eventbus_basics.test.ts index 0ac3d1e..d44e032 100644 --- a/bubus-ts/tests/eventbus_basics.test.ts +++ b/bubus-ts/tests/eventbus_basics.test.ts @@ -489,7 +489,7 @@ test('unreferenced EventBus can be garbage collected (not retained by _all_insta let weak_ref: WeakRef - // Create a bus inside an IIFE so the only reference is the WeakRef + // Create a bus inside an IIFE so the only reference is the WeakRef ;(() => { const bus = new EventBus('GCTestBus') weak_ref = new WeakRef(bus) diff --git a/bubus-ts/tests/log_tree.test.ts b/bubus-ts/tests/log_tree.test.ts index f7c24f2..535a26f 100644 --- a/bubus-ts/tests/log_tree.test.ts +++ b/bubus-ts/tests/log_tree.test.ts @@ -4,6 +4,8 @@ import { test } from 'node:test' import { z } from 'zod' import { BaseEvent, EventBus, EventResult } from '../src/index.js' +import { EventHandler } from '../src/event_handler.js' +import type { EventHandlerFunction } from '../src/types.js' const RootEvent = BaseEvent.extend('RootEvent', { data: z.string().optional() }) const ChildEvent = BaseEvent.extend('ChildEvent', { value: z.number().optional() }) @@ -16,6 +18,21 @@ class ValueError extends Error { } } +const createHandlerEntry = (bus: EventBus, handler_id: string, handler_name: string, event_key: string): EventHandler => { + const handler: EventHandlerFunction = () => undefined + const { isostring: handler_registered_at, ts: handler_registered_ts } = BaseEvent.nextTimestamp() + return new EventHandler({ + id: handler_id, + handler, + handler_name, + handler_timeout: bus.event_timeout_default, + handler_registered_at, + handler_registered_ts, + event_key, + eventbus_name: bus.name, + }) +} + test('logTree: single event', () => { const bus = new EventBus('SingleBus') @@ -40,10 +57,8 @@ test('logTree: with handler results', () => { const handler_id = 'handler-1' const result = new EventResult({ - event_id: event.event_id, - handler_id, - handler_name: 'test_handler', - eventbus_name: 'HandlerBus', + event, + handler: createHandlerEntry(bus, handler_id, 'test_handler', event.event_type), }) result.markStarted() result.markCompleted('status: success') @@ -67,10 +82,8 @@ test('logTree: with handler errors', () => { const handler_id = 'handler-2' const result = new EventResult({ - event_id: event.event_id, - handler_id, - handler_name: 'error_handler', - eventbus_name: 'ErrorBus', + event, + handler: createHandlerEntry(bus, handler_id, 'error_handler', event.event_type), }) result.markStarted() result.markError(new ValueError('Test error message')) @@ -93,10 +106,8 @@ test('logTree: complex nested', () => { const root_handler_id = 'handler-root' const root_result = new EventResult({ - event_id: root.event_id, - handler_id: root_handler_id, - handler_name: 'root_handler', - eventbus_name: 'ComplexBus', + event: root, + handler: createHandlerEntry(bus, root_handler_id, 'root_handler', root.event_type), }) root_result.markStarted() root_result.markCompleted('Root processed') @@ -110,10 +121,8 @@ test('logTree: complex nested', () => { const child_handler_id = 'handler-child' const child_result = new EventResult({ - event_id: child.event_id, - handler_id: child_handler_id, - handler_name: 'child_handler', - eventbus_name: 'ComplexBus', + event: child, + handler: createHandlerEntry(bus, child_handler_id, 'child_handler', child.event_type), }) child_result.markStarted() child_result.markCompleted([1, 2, 3]) @@ -127,10 +136,8 @@ test('logTree: complex nested', () => { const grandchild_handler_id = 'handler-grandchild' const grandchild_result = new EventResult({ - event_id: grandchild.event_id, - handler_id: grandchild_handler_id, - handler_name: 'grandchild_handler', - eventbus_name: 'ComplexBus', + event: grandchild, + handler: createHandlerEntry(bus, grandchild_handler_id, 'grandchild_handler', grandchild.event_type), }) grandchild_result.markStarted() grandchild_result.markCompleted(null) @@ -182,10 +189,8 @@ test('logTree: timing info', () => { const handler_id = 'handler-time' const result = new EventResult({ - event_id: event.event_id, - handler_id, - handler_name: 'timed_handler', - eventbus_name: 'TimingBus', + event, + handler: createHandlerEntry(bus, handler_id, 'timed_handler', event.event_type), }) result.markStarted() result.markCompleted('done') @@ -207,10 +212,8 @@ test('logTree: running handler', () => { const handler_id = 'handler-running' const result = new EventResult({ - event_id: event.event_id, - handler_id, - handler_name: 'running_handler', - eventbus_name: 'RunningBus', + event, + handler: createHandlerEntry(bus, handler_id, 'running_handler', event.event_type), }) result.markStarted() event.event_results.set(handler_id, result) diff --git a/bubus-ts/tests/performance.test.ts b/bubus-ts/tests/performance.test.ts index f9bc9ea..8e3fd59 100644 --- a/bubus-ts/tests/performance.test.ts +++ b/bubus-ts/tests/performance.test.ts @@ -9,15 +9,15 @@ const SimpleEvent = BaseEvent.extend('SimpleEvent', {}) const mb = (bytes: number) => (bytes / 1024 / 1024).toFixed(1) test('processes 50k events within reasonable time', { timeout: 30_000 }, async () => { - const bus = new EventBus('PerfBus', { max_history_size: 1000 }) + const total_events = 50_000 + // Keep full history to avoid trimming inflight events during perf runs. + const bus = new EventBus('PerfBus', { max_history_size: total_events }) let processed_count = 0 bus.on(SimpleEvent, () => { processed_count += 1 }) - const total_events = 50_000 - global.gc?.() const mem_before = process.memoryUsage() @@ -48,12 +48,13 @@ test('processes 50k events within reasonable time', { timeout: 30_000 }, async ( `\n perf: ${total_events} events in ${total_ms}ms (${Math.round(total_events / (total_ms / 1000))}/s)` + `\n dispatch: ${dispatch_ms}ms | await: ${await_ms}ms` + `\n memory: before=${mb(mem_before.heapUsed)}MB β†’ dispatch=${mb(mem_dispatch.heapUsed)}MB β†’ done=${mb(mem_done.heapUsed)}MB β†’ gc=${mb(mem_gc.heapUsed)}MB` + + `\n per-event: time=${(total_ms / total_events).toFixed(4)}ms | heap=${(((mem_done.heapUsed - mem_before.heapUsed) / total_events) / 1024).toFixed(2)}KB | heap_gc=${(((mem_gc.heapUsed - mem_before.heapUsed) / total_events) / 1024).toFixed(2)}KB` + `\n rss: before=${mb(mem_before.rss)}MB β†’ done=${mb(mem_done.rss)}MB β†’ gc=${mb(mem_gc.rss)}MB` ) assert.equal(processed_count, total_events) assert.ok(total_ms < 30_000, `Processing took ${total_ms}ms`) - assert.ok(bus.event_history.size <= bus.max_history_size) + assert.ok(bus.event_history.size <= bus.max_history_size!) bus.destroy() }) @@ -73,7 +74,8 @@ test('500 ephemeral buses with 100 events each', { timeout: 30_000 }, async () = const t0 = Date.now() for (let b = 0; b < total_buses; b += 1) { - const bus = new EventBus(`ReqBus-${b}`, { max_history_size: 10 }) + // Avoid trimming inflight events during perf runs. + const bus = new EventBus(`ReqBus-${b}`, { max_history_size: events_per_bus }) bus.on(SimpleEvent, () => { processed_count += 1 @@ -101,6 +103,7 @@ test('500 ephemeral buses with 100 events each', { timeout: 30_000 }, async () = console.log( `\n perf: ${total_buses} buses Γ— ${events_per_bus} events = ${total_events} total in ${total_ms}ms (${Math.round(total_events / (total_ms / 1000))}/s)` + `\n memory: before=${mb(mem_before.heapUsed)}MB β†’ done=${mb(mem_done.heapUsed)}MB β†’ gc=${mb(mem_gc.heapUsed)}MB` + + `\n per-event: time=${(total_ms / total_events).toFixed(4)}ms | heap=${(((mem_done.heapUsed - mem_before.heapUsed) / total_events) / 1024).toFixed(2)}KB | heap_gc=${(((mem_gc.heapUsed - mem_before.heapUsed) / total_events) / 1024).toFixed(2)}KB` + `\n rss: before=${mb(mem_before.rss)}MB β†’ done=${mb(mem_done.rss)}MB β†’ gc=${mb(mem_gc.rss)}MB` + `\n live bus instances: ${EventBus._all_instances.size}` ) @@ -117,10 +120,10 @@ test('500 ephemeral buses with 100 events each', { timeout: 30_000 }, async () = test('50k events with ephemeral on/off handler registration across 2 buses', { timeout: 30_000 }, async () => { const RequestEvent = BaseEvent.extend('RequestEvent', {}) - const bus_a = new EventBus('SharedBusA', { max_history_size: 1000 }) - const bus_b = new EventBus('SharedBusB', { max_history_size: 1000 }) - const total_events = 50_000 + // Keep full history to avoid trimming inflight events during perf runs. + const bus_a = new EventBus('SharedBusA', { max_history_size: total_events }) + const bus_b = new EventBus('SharedBusB', { max_history_size: total_events }) let processed_a = 0 let processed_b = 0 @@ -166,6 +169,7 @@ test('50k events with ephemeral on/off handler registration across 2 buses', { t `\n perf: ${total_events} events with ephemeral on/off in ${total_ms}ms (${Math.round(total_events / (total_ms / 1000))}/s)` + `\n dispatch: bus_a=${processed_a} | bus_b=${processed_b}` + `\n memory: before=${mb(mem_before.heapUsed)}MB β†’ done=${mb(mem_done.heapUsed)}MB β†’ gc=${mb(mem_gc.heapUsed)}MB` + + `\n per-event: time=${(total_ms / total_events).toFixed(4)}ms | heap=${(((mem_done.heapUsed - mem_before.heapUsed) / total_events) / 1024).toFixed(2)}KB | heap_gc=${(((mem_gc.heapUsed - mem_before.heapUsed) / total_events) / 1024).toFixed(2)}KB` + `\n rss: before=${mb(mem_before.rss)}MB β†’ done=${mb(mem_done.rss)}MB β†’ gc=${mb(mem_gc.rss)}MB` + `\n bus_a handlers: ${bus_a.handlers.size} | bus_b handlers: ${bus_b.handlers.size}` ) @@ -186,163 +190,160 @@ test('50k events with ephemeral on/off handler registration across 2 buses', { t // Worst-case memory leak stress test. Exercises every retention path simultaneously: // multi-bus forwarding, queue-jumping (done() inside handler), timeouts that cancel // pending handlers, nested parent-child-grandchild trees, Proxy accumulation from -// _getBusScopedEvent, ephemeral on/off handler churn, find() waiter timeouts, -// and aggressive history trimming via _gc(). If any code path leaks references, -// memory will grow unbounded across 2000 iterations. -test( - 'worst-case: forwarding + queue-jump + timeouts + cancellation at scale', - { timeout: 60_000 }, - async () => { - const ParentEvent = BaseEvent.extend('WC_Parent', { - iteration: z.number(), - }) - const ChildEvent = BaseEvent.extend('WC_Child', { - iteration: z.number(), - }) - const GrandchildEvent = BaseEvent.extend('WC_Grandchild', { - iteration: z.number(), - }) +// getEventProxyScopedToThisBus, ephemeral on/off handler churn, and find() waiter timeouts. +// If any code path leaks references, memory will grow unbounded across 2000 iterations. +test('worst-case: forwarding + queue-jump + timeouts + cancellation at scale', { timeout: 60_000 }, async () => { + const ParentEvent = BaseEvent.extend('WC_Parent', { + iteration: z.number(), + }) + const ChildEvent = BaseEvent.extend('WC_Child', { + iteration: z.number(), + }) + const GrandchildEvent = BaseEvent.extend('WC_Grandchild', { + iteration: z.number(), + }) - const bus_a = new EventBus('WC_A', { max_history_size: 50 }) - const bus_b = new EventBus('WC_B', { max_history_size: 50 }) - const bus_c = new EventBus('WC_C', { max_history_size: 50 }) - - const total_iterations = 2000 - let parent_handled_a = 0 - let parent_handled_b = 0 - let child_handled_c = 0 - let grandchild_handled = 0 - let timeout_count = 0 - let cancel_count = 0 - - // Persistent handler on bus_b β€” just counts - bus_b.on(ParentEvent, () => { - parent_handled_b += 1 - }) + const total_iterations = 2000 + const history_limit = total_iterations * 2 + // Keep enough history to avoid trimming inflight events during perf runs. + const bus_a = new EventBus('WC_A', { max_history_size: history_limit }) + const bus_b = new EventBus('WC_B', { max_history_size: history_limit }) + const bus_c = new EventBus('WC_C', { max_history_size: history_limit }) + let parent_handled_a = 0 + let parent_handled_b = 0 + let child_handled_c = 0 + let grandchild_handled = 0 + let timeout_count = 0 + let cancel_count = 0 + + // Persistent handler on bus_b β€” just counts + bus_b.on(ParentEvent, () => { + parent_handled_b += 1 + }) - // Persistent handler on bus_c β€” processes child, emits grandchild - bus_c.on(ChildEvent, async (event) => { - child_handled_c += 1 - const gc = event.bus?.emit(GrandchildEvent({ iteration: (event as any).iteration }))! - bus_c.dispatch(gc) - await gc.done() - }) + // Persistent handler on bus_c β€” processes child, emits grandchild + bus_c.on(ChildEvent, async (event) => { + child_handled_c += 1 + const gc = event.bus?.emit(GrandchildEvent({ iteration: (event as any).iteration }))! + bus_c.dispatch(gc) + await gc.done() + }) - // Persistent handler on bus_c for grandchild β€” slow on timeout iterations - // so the child's 5ms timeout fires while this is still sleeping. - // This creates EventHandlerTimeoutError β†’ EventHandlerCancelledError chains. - // Sleep is 50ms but child timeout is 5ms β€” with cancellation of started handlers, - // the child completes immediately when timeout fires. Background sleep continues - // silently (JS can't cancel async functions, but the event system moves on). - bus_c.on(GrandchildEvent, async (event) => { - grandchild_handled += 1 - if ((event as any).iteration % 5 === 0) { - await new Promise((r) => setTimeout(r, 50)) - } - }) + // Persistent handler on bus_c for grandchild β€” slow on timeout iterations + // so the child's 5ms timeout fires while this is still sleeping. + // This creates EventHandlerTimeoutError β†’ EventHandlerCancelledError chains. + // Sleep is 50ms but child timeout is 5ms β€” with cancellation of started handlers, + // the child completes immediately when timeout fires. Background sleep continues + // silently (JS can't cancel async functions, but the event system moves on). + bus_c.on(GrandchildEvent, async (event) => { + grandchild_handled += 1 + if ((event as any).iteration % 5 === 0) { + await new Promise((r) => setTimeout(r, 50)) + } + }) - global.gc?.() - const mem_before = process.memoryUsage() - const t0 = Date.now() + global.gc?.() + const mem_before = process.memoryUsage() + const t0 = Date.now() - for (let i = 0; i < total_iterations; i += 1) { - const should_timeout = i % 5 === 0 + for (let i = 0; i < total_iterations; i += 1) { + const should_timeout = i % 5 === 0 - // Ephemeral handler on bus_a β€” queue-jumps a child to bus_c - const ephemeral_handler = async (event: any) => { - parent_handled_a += 1 - const child_timeout = should_timeout ? 0.005 : null // 5ms timeout β†’ fires while grandchild sleeps 50ms - const child = event.bus?.emit(ChildEvent({ + // Ephemeral handler on bus_a β€” queue-jumps a child to bus_c + const ephemeral_handler = async (event: any) => { + parent_handled_a += 1 + const child_timeout = should_timeout ? 0.005 : null // 5ms timeout β†’ fires while grandchild sleeps 50ms + const child = event.bus?.emit( + ChildEvent({ iteration: i, event_timeout: child_timeout, - }))! - bus_c.dispatch(child) - try { - await child.done() - } catch { - // Swallow β€” timeout errors are expected - } + }) + )! + bus_c.dispatch(child) + try { + await child.done() + } catch { + // Swallow β€” timeout errors are expected } - bus_a.on(ParentEvent, ephemeral_handler) + } + bus_a.on(ParentEvent, ephemeral_handler) - // Dispatch parent to bus_a (with handler) and bus_b (forwarding) - const parent = ParentEvent({ iteration: i }) - const ev_a = bus_a.dispatch(parent) - bus_b.dispatch(parent) + // Dispatch parent to bus_a (with handler) and bus_b (forwarding) + const parent = ParentEvent({ iteration: i }) + const ev_a = bus_a.dispatch(parent) + bus_b.dispatch(parent) - await ev_a.done() - // Don't waitUntilIdle on bus_c here β€” timed-out grandchild handlers are - // still sleeping in the background (JS can't cancel async functions). - // Let them pile up; the final waitUntilIdle() outside the loop will drain. + await ev_a.done() + // Don't waitUntilIdle on bus_c here β€” timed-out grandchild handlers are + // still sleeping in the background (JS can't cancel async functions). + // Let them pile up; the final waitUntilIdle() outside the loop will drain. - // Deregister ephemeral handler - bus_a.off(ParentEvent, ephemeral_handler) + // Deregister ephemeral handler + bus_a.off(ParentEvent, ephemeral_handler) - // Periodic find() with short timeout β€” exercises find_waiter cleanup - if (i % 10 === 0) { - // Don't await β€” let it timeout in the background - bus_a.find(ParentEvent, { future: 0.001 }) - } + // Periodic find() with short timeout β€” exercises find_waiter cleanup + if (i % 10 === 0) { + // Don't await β€” let it timeout in the background + bus_a.find(ParentEvent, { future: 0.001 }) } + } - await bus_a.waitUntilIdle() - await bus_b.waitUntilIdle() - await bus_c.waitUntilIdle() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + await bus_c.waitUntilIdle() - // Count timeouts and cancellations from bus_c's history - for (const event of bus_c.event_history.values()) { - for (const result of event.event_results.values()) { - if (result.error instanceof EventHandlerTimeoutError) timeout_count += 1 - if (result.error instanceof EventHandlerCancelledError) cancel_count += 1 - } + // Count timeouts and cancellations from bus_c's history + for (const event of bus_c.event_history.values()) { + for (const result of event.event_results.values()) { + if (result.error instanceof EventHandlerTimeoutError) timeout_count += 1 + if (result.error instanceof EventHandlerCancelledError) cancel_count += 1 } - - const t_done = Date.now() - const mem_done = process.memoryUsage() - - global.gc?.() - // Short delay to let find() timeouts and timed-out handler promises settle - await new Promise((r) => setTimeout(r, 50)) - global.gc?.() - const mem_gc = process.memoryUsage() - - const total_ms = t_done - t0 - const mem_delta_mb = (mem_gc.heapUsed - mem_before.heapUsed) / 1024 / 1024 - - console.log( - `\n worst-case: ${total_iterations} iterations in ${total_ms}ms (${Math.round(total_iterations / (total_ms / 1000))}/s)` + - `\n parent: bus_a=${parent_handled_a} bus_b=${parent_handled_b}` + - `\n child: bus_c=${child_handled_c} | grandchild=${grandchild_handled}` + - `\n timeouts=${timeout_count} cancellations=${cancel_count}` + - `\n memory: before=${mb(mem_before.heapUsed)}MB β†’ done=${mb(mem_done.heapUsed)}MB β†’ gc=${mb(mem_gc.heapUsed)}MB (delta=${mem_delta_mb.toFixed(1)}MB)` + - `\n rss: before=${mb(mem_before.rss)}MB β†’ done=${mb(mem_done.rss)}MB β†’ gc=${mb(mem_gc.rss)}MB` + - `\n history: a=${bus_a.event_history.size} b=${bus_b.event_history.size} c=${bus_c.event_history.size}` + - `\n handlers: a=${bus_a.handlers.size} b=${bus_b.handlers.size} c=${bus_c.handlers.size}` + - `\n instances: ${EventBus._all_instances.size}` - ) - - // All iterations processed - assert.equal(parent_handled_a, total_iterations) - assert.equal(parent_handled_b, total_iterations) - - // History bounded by max_history_size - assert.ok(bus_a.event_history.size <= 50, `bus_a history ${bus_a.event_history.size} > 50`) - assert.ok(bus_b.event_history.size <= 50, `bus_b history ${bus_b.event_history.size} > 50`) - assert.ok(bus_c.event_history.size <= 50, `bus_c history ${bus_c.event_history.size} > 50`) - - // Ephemeral handlers all cleaned up - assert.equal(bus_a.handlers.size, 0, 'All ephemeral handlers removed from bus_a') - - // Memory should not grow unbounded β€” allow 50MB over baseline - assert.ok( - mem_delta_mb < 50, - `Memory grew ${mem_delta_mb.toFixed(1)}MB over baseline (limit 50MB)` - ) - - bus_a.destroy() - bus_b.destroy() - bus_c.destroy() - - assert.equal(EventBus._all_instances.size, 0, 'All buses destroyed') } -) + + const t_done = Date.now() + const mem_done = process.memoryUsage() + + global.gc?.() + // Short delay to let find() timeouts and timed-out handler promises settle + await new Promise((r) => setTimeout(r, 50)) + global.gc?.() + const mem_gc = process.memoryUsage() + + const total_ms = t_done - t0 + const estimated_events = total_iterations * 3 + const mem_delta_mb = (mem_gc.heapUsed - mem_before.heapUsed) / 1024 / 1024 + + console.log( + `\n worst-case: ${total_iterations} iterations in ${total_ms}ms (${Math.round(total_iterations / (total_ms / 1000))}/s)` + + `\n parent: bus_a=${parent_handled_a} bus_b=${parent_handled_b}` + + `\n child: bus_c=${child_handled_c} | grandchild=${grandchild_handled}` + + `\n timeouts=${timeout_count} cancellations=${cancel_count}` + + `\n memory: before=${mb(mem_before.heapUsed)}MB β†’ done=${mb(mem_done.heapUsed)}MB β†’ gc=${mb(mem_gc.heapUsed)}MB (delta=${mem_delta_mb.toFixed(1)}MB)` + + `\n per-event (est): time=${(total_ms / estimated_events).toFixed(4)}ms | heap=${(((mem_done.heapUsed - mem_before.heapUsed) / estimated_events) / 1024).toFixed(2)}KB | heap_gc=${(((mem_gc.heapUsed - mem_before.heapUsed) / estimated_events) / 1024).toFixed(2)}KB` + + `\n rss: before=${mb(mem_before.rss)}MB β†’ done=${mb(mem_done.rss)}MB β†’ gc=${mb(mem_gc.rss)}MB` + + `\n history: a=${bus_a.event_history.size} b=${bus_b.event_history.size} c=${bus_c.event_history.size}` + + `\n handlers: a=${bus_a.handlers.size} b=${bus_b.handlers.size} c=${bus_c.handlers.size}` + + `\n instances: ${EventBus._all_instances.size}` + ) + + // All iterations processed + assert.equal(parent_handled_a, total_iterations) + assert.equal(parent_handled_b, total_iterations) + + // History bounded by max_history_size + assert.ok(bus_a.event_history.size <= history_limit, `bus_a history ${bus_a.event_history.size} > ${history_limit}`) + assert.ok(bus_b.event_history.size <= history_limit, `bus_b history ${bus_b.event_history.size} > ${history_limit}`) + assert.ok(bus_c.event_history.size <= history_limit, `bus_c history ${bus_c.event_history.size} > ${history_limit}`) + + // Ephemeral handlers all cleaned up + assert.equal(bus_a.handlers.size, 0, 'All ephemeral handlers removed from bus_a') + + // Memory should not grow unbounded β€” allow 50MB over baseline + assert.ok(mem_delta_mb < 50, `Memory grew ${mem_delta_mb.toFixed(1)}MB over baseline (limit 50MB)`) + + bus_a.destroy() + bus_b.destroy() + bus_c.destroy() + + assert.equal(EventBus._all_instances.size, 0, 'All buses destroyed') +}) diff --git a/bubus-ts/tests/timeout.test.ts b/bubus-ts/tests/timeout.test.ts index 6074899..0a8bb4e 100644 --- a/bubus-ts/tests/timeout.test.ts +++ b/bubus-ts/tests/timeout.test.ts @@ -189,8 +189,11 @@ test('timeout still marks event failed when other handlers finish', async () => assert.ok(results.includes('fast')) }) -test('deadlock warning triggers when event exceeds timeout', async () => { - const bus = new EventBus('DeadlockWarnBus') +test('slow event warning fires when event exceeds event_slow_timeout', async () => { + const bus = new EventBus('SlowEventWarnBus', { + event_slow_timeout: 0.01, + event_handler_slow_timeout: null, + }) const warnings: string[] = [] const original_warn = console.warn console.warn = (message?: unknown, ...args: unknown[]) => { @@ -202,30 +205,29 @@ test('deadlock warning triggers when event exceeds timeout', async () => { try { bus.on(TimeoutEvent, async () => { - await new Promise(() => { - // never resolve - }) + await delay(25) + return 'ok' }) - const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.01 })) + const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.5 })) await event.done() } finally { console.warn = original_warn } assert.ok( - warnings.some((message) => message.includes('Slow handler')), - 'Expected deadlock warning' + warnings.some((message) => message.toLowerCase().includes('slow event processing')), + 'Expected slow event warning' ) }) test('slow handler warning fires when handler runs long', async () => { - const bus = new EventBus('SlowHandlerWarnBus') + const bus = new EventBus('SlowHandlerWarnBus', { + event_handler_slow_timeout: 0.01, + event_slow_timeout: null, + }) const warnings: string[] = [] const original_warn = console.warn - const original_set_timeout = global.setTimeout - const original_clear_timeout = global.clearTimeout - console.warn = (message?: unknown, ...args: unknown[]) => { warnings.push(String(message)) if (args.length > 0) { @@ -233,36 +235,58 @@ test('slow handler warning fires when handler runs long', async () => { } } - // Force the slow-handler warning timer to fire immediately - global.setTimeout = ((callback: (...args: unknown[]) => void, delay?: number, ...args: unknown[]) => { - if (delay === 15000) { - return original_set_timeout(callback, 0, ...args) - } - return original_set_timeout(callback, delay as number, ...args) - }) as typeof setTimeout + try { + bus.on(TimeoutEvent, async () => { + await delay(25) + return 'ok' + }) + + const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.5 })) + await event.done() + } finally { + console.warn = original_warn + } + + assert.ok( + warnings.some((message) => message.toLowerCase().includes('slow event handler')), + 'Expected slow handler warning' + ) +}) - global.clearTimeout = ((timeout: ReturnType) => { - return original_clear_timeout(timeout) - }) as typeof clearTimeout +test('slow handler and slow event warnings can both fire', async () => { + const bus = new EventBus('SlowComboWarnBus', { + event_handler_slow_timeout: 0.01, + event_slow_timeout: 0.01, + }) + const warnings: string[] = [] + const original_warn = console.warn + console.warn = (message?: unknown, ...args: unknown[]) => { + warnings.push(String(message)) + if (args.length > 0) { + warnings.push(args.map(String).join(' ')) + } + } try { bus.on(TimeoutEvent, async () => { - await delay(5) + await delay(25) return 'ok' }) - const event = bus.dispatch(TimeoutEvent({ event_timeout: null })) + const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.5 })) await event.done() } finally { console.warn = original_warn - global.setTimeout = original_set_timeout - global.clearTimeout = original_clear_timeout } assert.ok( - warnings.some((message) => message.includes('Slow handler')), + warnings.some((message) => message.toLowerCase().includes('slow event handler')), 'Expected slow handler warning' ) + assert.ok( + warnings.some((message) => message.toLowerCase().includes('slow event processing')), + 'Expected slow event warning' + ) }) test('event-level concurrency overrides do not bypass timeouts', async () => { @@ -369,7 +393,7 @@ test('queue-jump awaited child timeouts still fire across buses', async () => { bus_a.on(ParentEvent, async (event) => { // Use scoped bus emit to set parent tracking (event_parent_id, event_emitted_by_handler_id), // then also dispatch on bus_b for cross-bus handler execution. - // Without parent tracking, _runImmediately can't detect the queue-jump context + // Without parent tracking, processEventImmediately can't detect the queue-jump context // and falls back to waitForCompletion(), which deadlocks with global-serial. const child = event.bus?.emit(ChildEvent({ event_timeout: 0.01 }))! bus_b.dispatch(child) @@ -408,7 +432,7 @@ for (const handler_mode of STEP1_HANDLER_MODES) { semaphore.acquire = async () => { acquire_count += 1 - // Third acquire is the parent reclaim in _runImmediately finally. + // Third acquire is the parent reclaim in processEventImmediately finally. // Delay it so the parent handler timeout can fire in the middle. if (acquire_count === 3) { await delay(30) @@ -667,7 +691,7 @@ test('parent timeout cancels pending child handler results under serial handler test('event_timeout null falls back to bus default', async () => { const bus = new EventBus('TimeoutDefaultBus', { event_timeout: 0.01 }) - bus.on(TimeoutEvent, async () => { + bus.on(TimeoutEvent, async (_event: BaseEvent) => { await delay(50) return 'slow' }) @@ -840,7 +864,7 @@ test('multi-level timeout cascade with mixed cancellations', async () => { // └── 1 handler: never runs, CANCELLED when top_handler_main times out // // KEY MECHANIC: When a child event is awaited via event.done() inside a handler, -// it triggers "queue-jumping" via _runImmediately β†’ runImmediatelyAcrossBuses. +// it triggers "queue-jumping" via processEventImmediately β†’ runImmediatelyAcrossBuses. // Queue-jumped events use yield-and-reacquire: the parent handler's semaphore is // temporarily released so child handlers can acquire it normally. This means // child handlers run SERIALLY on a bus-serial bus (respecting concurrency limits). From a21dd4689fcf05be7bf93b7c476149ca53e61817 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Fri, 6 Feb 2026 18:42:14 -0800 Subject: [PATCH 55/79] all working except slower performance --- bubus-ts/README.md | 2 +- bubus-ts/src/event_bus.ts | 35 ++++++--- bubus-ts/src/lock_manager.ts | 27 +++---- bubus-ts/tests/comprehensive_patterns.test.ts | 20 +++--- bubus-ts/tests/eventbus_basics.test.ts | 4 +- bubus-ts/tests/performance.test.ts | 71 +++++++++++++++++++ 6 files changed, 125 insertions(+), 34 deletions(-) diff --git a/bubus-ts/README.md b/bubus-ts/README.md index cdf9ae7..dba5ed5 100644 --- a/bubus-ts/README.md +++ b/bubus-ts/README.md @@ -228,7 +228,7 @@ propagates it via `event_emitted_by_handler_id`. This keeps parentage determinis When an event is awaited inside a handler, the event must **jump the queue**. If the runloop continues normally, it could process unrelated events ("overshoot"), breaking FIFO guarantees. -The `LockManager` pause mechanism (`requestPause`/`waitUntilResumed`) pauses the runloop while we run the awaited +The `LockManager` pause mechanism (`requestPause`/`waitUntilRunloopResumed`) pauses the runloop while we run the awaited event immediately. Once the queue-jump completes, the runloop resumes in FIFO order. This matches the Python behavior. ### C) BusScopedEvent: why it exists and how it works diff --git a/bubus-ts/src/event_bus.ts b/bubus-ts/src/event_bus.ts index 404c417..885e9a0 100644 --- a/bubus-ts/src/event_bus.ts +++ b/bubus-ts/src/event_bus.ts @@ -340,13 +340,31 @@ export class EventBus { // handler and should fall back to waitForCompletion. const proxy_result = handler_result?.status === 'started' ? handler_result : undefined const currently_active_event_result = - proxy_result ?? this.locks.getCurrentHandlerResult() ?? this.getParentEventResultAcrossAllBusses(original_event) ?? undefined + proxy_result ?? this.locks.getActiveHandlerResult() ?? this.getParentEventResultAcrossAllBusses(original_event) ?? undefined if (!currently_active_event_result) { - // Not inside any handler scope β€” fall back to normal completion waiting + // Not inside any handler scope β€” avoid queue-jump, but if this event is + // next in line we can process it immediately without waiting on the runloop. + const queue_index = this.pending_event_queue.indexOf(original_event) + const can_process_now = + queue_index === 0 && + !this.locks.isPaused() && + !this.in_flight_event_ids.has(original_event.event_id) && + !this.hasProcessedEvent(original_event) + if (can_process_now) { + this.pending_event_queue.shift() + this.in_flight_event_ids.add(original_event.event_id) + await this.scheduleEventProcessing(original_event) + if (original_event.event_status !== 'completed') { + await original_event.waitForCompletion() + } + return event + } await original_event.waitForCompletion() return event } - this.locks.ensureQueueJumpPauseForResult(currently_active_event_result) + + // ensure a pause request is set so the runloop pauses and (will resume when the event is completed) + this.locks.requestRunloopPauseForQueueJumpEvent(currently_active_event_result) if (original_event.event_status === 'completed') { return event } @@ -586,7 +604,7 @@ export class EventBus { while (this.pending_event_queue.length > 0) { await Promise.resolve() if (this.locks.isPaused()) { - await this.locks.waitUntilResumed() + await this.locks.waitUntilRunloopResumed() continue } const next_event = this.pending_event_queue[0] @@ -685,7 +703,7 @@ export class EventBus { if (result._lock) result._lock.exitHandlerRun() // create a new handler lock to track ownership of the semaphore during handler execution result._lock = new HandlerLock(semaphore) - this.locks.enterHandlerContext(result) + this.locks.enterActiveHandlerContext(result) // resolve the effective timeout by combining the event timeout and the handler timeout const effective_timeout = this.resolveEffectiveTimeout(event.event_timeout, result.handler.handler_timeout) @@ -722,8 +740,8 @@ export class EventBus { } finally { result._abort = null result._lock?.exitHandlerRun() - this.locks.exitHandlerContext(result) - this.locks.releaseQueueJumpPauseForResult(result) + this.locks.exitActiveHandlerContext(result) + this.locks.releaseRunloopPauseForQueueJumpEvent(result) if (slow_handler_warning_timer) { clearTimeout(slow_handler_warning_timer) } @@ -739,7 +757,6 @@ export class EventBus { ): Promise { // resolve the effective timeout by combining the event timeout and the handler timeout const effective_timeout = this.resolveEffectiveTimeout(event.event_timeout, result.handler.handler_timeout) - const handler_name = handler.name || 'anonymous' const run_handler = () => Promise.resolve().then(() => runWithAsyncContext(event._dispatch_context ?? null, () => handler(handler_event))) @@ -769,7 +786,7 @@ export class EventBus { // set a timeout to reject the promise if the handler takes too long const timer = setTimeout(() => { finalize(reject)( - new EventHandlerTimeoutError(`handler ${handler_name} timed out after ${timeout_seconds}s`, { + new EventHandlerTimeoutError(`${this.toString()}.on(${event.toString()}, ${result.handler.toString()}) timed out after ${timeout_seconds}s`, { event_result: result, timeout_seconds, }) diff --git a/bubus-ts/src/lock_manager.ts b/bubus-ts/src/lock_manager.ts index 6a1f0c3..2d3d0f9 100644 --- a/bubus-ts/src/lock_manager.ts +++ b/bubus-ts/src/lock_manager.ts @@ -120,10 +120,6 @@ export class HandlerLock { this.state = 'held' } - getExecutionState(): HandlerExecutionState { - return this.state - } - yieldHandlerLockForChildRun(): boolean { if (!this.semaphore || this.state !== 'held') { return false @@ -190,7 +186,7 @@ export class LockManager { readonly bus_handler_semaphore: AsyncSemaphore // Per-bus handler semaphore; created with LockManager and never swapped. private pause_depth: number // Re-entrant pause counter; increments on requestPause, decrements on release. - private pause_waiters: Array<() => void> // Resolvers for waitUntilResumed; drained when pause_depth hits 0. + private pause_waiters: Array<() => void> // Resolvers for waitUntilRunloopResumed; drained when pause_depth hits 0. private queue_jump_pause_releases: WeakMap void> // Per-handler pause release for queue-jump; cleared on handler exit. private active_handler_results: EventResult[] // Stack of active handler results for "inside handler" detection. @@ -214,6 +210,8 @@ export class LockManager { } requestPause(): () => void { + // Low-level runloop pause: increments a re-entrant counter and returns a release + // function. Used for broad, bus-scoped pauses (e.g. runImmediatelyAcrossBuses). this.pause_depth += 1 let released = false return () => { @@ -233,7 +231,7 @@ export class LockManager { } } - waitUntilResumed(): Promise { + waitUntilRunloopResumed(): Promise { if (this.pause_depth === 0) { return Promise.resolve() } @@ -246,18 +244,18 @@ export class LockManager { return this.pause_depth > 0 } - enterHandlerContext(result: EventResult): void { + enterActiveHandlerContext(result: EventResult): void { this.active_handler_results.push(result) } - exitHandlerContext(result: EventResult): void { + exitActiveHandlerContext(result: EventResult): void { const idx = this.active_handler_results.indexOf(result) if (idx >= 0) { this.active_handler_results.splice(idx, 1) } } - getCurrentHandlerResult(): EventResult | undefined { + getActiveHandlerResult(): EventResult | undefined { return this.active_handler_results[this.active_handler_results.length - 1] } @@ -265,18 +263,23 @@ export class LockManager { // For cross-bus queue-jumping, EventBus.processEventImmediately uses getParentEventResultAcrossAllBusses() // to walk up the parent event tree, and the bus proxy passes handler_result // to processEventImmediately so it can yield/reacquire the correct semaphore. - isInsideHandlerContext(): boolean { + isAnyHandlerActive(): boolean { return this.active_handler_results.length > 0 } - ensureQueueJumpPauseForResult(result: EventResult): void { + requestRunloopPauseForQueueJumpEvent(result: EventResult): void { + // Queue-jump pause: wraps requestPause with per-handler deduping so repeated + // calls during the same handler run don't stack pauses. Released via + // releaseRunloopPauseForQueueJumpEvent when the handler finishes. if (this.queue_jump_pause_releases.has(result)) { return } this.queue_jump_pause_releases.set(result, this.requestPause()) } - releaseQueueJumpPauseForResult(result: EventResult): void { + // release the eventt bus runloop pause for a given event result if there is a pause request for it + // i.e. if it was a queue-jump event that was processed immediately, notify the runloop to resume + releaseRunloopPauseForQueueJumpEvent(result: EventResult): void { const release_pause = this.queue_jump_pause_releases.get(result) if (!release_pause) { return diff --git a/bubus-ts/tests/comprehensive_patterns.test.ts b/bubus-ts/tests/comprehensive_patterns.test.ts index 518dfe0..b843f05 100644 --- a/bubus-ts/tests/comprehensive_patterns.test.ts +++ b/bubus-ts/tests/comprehensive_patterns.test.ts @@ -326,13 +326,13 @@ test('isInsideHandler() is per-bus, not global', async () => { let bus_b_inside_during_b_handler = false bus_a.on(EventA, () => { - bus_a_inside_during_a_handler = bus_a.locks.isInsideHandlerContext() - bus_b_inside_during_a_handler = bus_b.locks.isInsideHandlerContext() + bus_a_inside_during_a_handler = bus_a.locks.isAnyHandlerActive() + bus_b_inside_during_a_handler = bus_b.locks.isAnyHandlerActive() }) bus_b.on(EventB, () => { - bus_a_inside_during_b_handler = bus_a.locks.isInsideHandlerContext() - bus_b_inside_during_b_handler = bus_b.locks.isInsideHandlerContext() + bus_a_inside_during_b_handler = bus_a.locks.isAnyHandlerActive() + bus_b_inside_during_b_handler = bus_b.locks.isAnyHandlerActive() }) // Dispatch to bus_a first, wait for completion so bus_b has no active handlers @@ -344,16 +344,16 @@ test('isInsideHandler() is per-bus, not global', async () => { await bus_b.waitUntilIdle() // During bus_a's handler: bus_a should report inside, bus_b should not - assert.equal(bus_a_inside_during_a_handler, true, 'bus_a.locks.isInsideHandlerContext() should be true during bus_a handler') - assert.equal(bus_b_inside_during_a_handler, false, 'bus_b.locks.isInsideHandlerContext() should be false during bus_a handler') + assert.equal(bus_a_inside_during_a_handler, true, 'bus_a.locks.isAnyHandlerActive() should be true during bus_a handler') + assert.equal(bus_b_inside_during_a_handler, false, 'bus_b.locks.isAnyHandlerActive() should be false during bus_a handler') // During bus_b's handler: bus_b should report inside, bus_a should not - assert.equal(bus_b_inside_during_b_handler, true, 'bus_b.locks.isInsideHandlerContext() should be true during bus_b handler') - assert.equal(bus_a_inside_during_b_handler, false, 'bus_a.locks.isInsideHandlerContext() should be false during bus_b handler') + assert.equal(bus_b_inside_during_b_handler, true, 'bus_b.locks.isAnyHandlerActive() should be true during bus_b handler') + assert.equal(bus_a_inside_during_b_handler, false, 'bus_a.locks.isAnyHandlerActive() should be false during bus_b handler') // After all handlers complete, neither bus should report inside - assert.equal(bus_a.locks.isInsideHandlerContext(), false, 'bus_a.locks.isInsideHandlerContext() should be false after idle') - assert.equal(bus_b.locks.isInsideHandlerContext(), false, 'bus_b.locks.isInsideHandlerContext() should be false after idle') + assert.equal(bus_a.locks.isAnyHandlerActive(), false, 'bus_a.locks.isAnyHandlerActive() should be false after idle') + assert.equal(bus_b.locks.isAnyHandlerActive(), false, 'bus_b.locks.isAnyHandlerActive() should be false after idle') }) test('dispatch multiple, await one skips others until after handler completes', async () => { diff --git a/bubus-ts/tests/eventbus_basics.test.ts b/bubus-ts/tests/eventbus_basics.test.ts index d44e032..abc3bff 100644 --- a/bubus-ts/tests/eventbus_basics.test.ts +++ b/bubus-ts/tests/eventbus_basics.test.ts @@ -59,7 +59,7 @@ test('EventBus exposes locks API surface', () => { const locks = bus.locks as unknown as Record assert.equal(typeof locks.requestPause, 'function') - assert.equal(typeof locks.waitUntilResumed, 'function') + assert.equal(typeof locks.waitUntilRunloopResumed, 'function') assert.equal(typeof locks.isPaused, 'function') assert.equal(typeof locks.waitForIdle, 'function') assert.equal(typeof locks.notifyIdleListeners, 'function') @@ -78,7 +78,7 @@ test('EventBus locks methods are callable and preserve semaphore resolution beha assert.equal(bus.locks.isPaused(), true) let resumed = false - const resumed_promise = bus.locks.waitUntilResumed().then(() => { + const resumed_promise = bus.locks.waitUntilRunloopResumed().then(() => { resumed = true }) await Promise.resolve() diff --git a/bubus-ts/tests/performance.test.ts b/bubus-ts/tests/performance.test.ts index 8e3fd59..0d4d849 100644 --- a/bubus-ts/tests/performance.test.ts +++ b/bubus-ts/tests/performance.test.ts @@ -126,12 +126,71 @@ test('50k events with ephemeral on/off handler registration across 2 buses', { t const bus_b = new EventBus('SharedBusB', { max_history_size: total_events }) let processed_a = 0 let processed_b = 0 + let on_ms = 0 + let off_ms = 0 + let dispatch_a_ms = 0 + let dispatch_b_ms = 0 + let done_ms = 0 + let process_a_ms = 0 + let process_b_ms = 0 + let handler_a_ms = 0 + let handler_b_ms = 0 // Persistent handler on bus_b that forwards count bus_b.on(RequestEvent, () => { processed_b += 1 }) + const bus_a_any = bus_a as any + const bus_b_any = bus_b as any + const original_process_a = typeof bus_a_any.processEvent === 'function' ? bus_a_any.processEvent.bind(bus_a) : null + const original_process_b = typeof bus_b_any.processEvent === 'function' ? bus_b_any.processEvent.bind(bus_b) : null + const original_run_handler_a = + typeof bus_a_any.runEventHandler === 'function' ? bus_a_any.runEventHandler.bind(bus_a) : null + const original_run_handler_b = + typeof bus_b_any.runEventHandler === 'function' ? bus_b_any.runEventHandler.bind(bus_b) : null + + if (original_process_a) { + bus_a_any.processEvent = async (event: any) => { + const t = performance.now() + try { + return await original_process_a(event) + } finally { + process_a_ms += performance.now() - t + } + } + } + if (original_process_b) { + bus_b_any.processEvent = async (event: any) => { + const t = performance.now() + try { + return await original_process_b(event) + } finally { + process_b_ms += performance.now() - t + } + } + } + if (original_run_handler_a) { + bus_a_any.runEventHandler = async (...args: any[]) => { + const t = performance.now() + try { + return await original_run_handler_a(...args) + } finally { + handler_a_ms += performance.now() - t + } + } + } + if (original_run_handler_b) { + bus_b_any.runEventHandler = async (...args: any[]) => { + const t = performance.now() + try { + return await original_run_handler_b(...args) + } finally { + handler_b_ms += performance.now() - t + } + } + } + global.gc?.() const mem_before = process.memoryUsage() const t0 = Date.now() @@ -141,17 +200,27 @@ test('50k events with ephemeral on/off handler registration across 2 buses', { t const ephemeral_handler = () => { processed_a += 1 } + let t = performance.now() bus_a.on(RequestEvent, ephemeral_handler) + on_ms += performance.now() - t // Dispatch on bus_a, forward to bus_b const event = RequestEvent({}) + t = performance.now() const ev_a = bus_a.dispatch(event) + dispatch_a_ms += performance.now() - t + t = performance.now() bus_b.dispatch(event) + dispatch_b_ms += performance.now() - t + t = performance.now() await ev_a.done() + done_ms += performance.now() - t // Tear down ephemeral handler + t = performance.now() bus_a.off(RequestEvent, ephemeral_handler) + off_ms += performance.now() - t } await bus_a.waitUntilIdle() @@ -168,6 +237,8 @@ test('50k events with ephemeral on/off handler registration across 2 buses', { t console.log( `\n perf: ${total_events} events with ephemeral on/off in ${total_ms}ms (${Math.round(total_events / (total_ms / 1000))}/s)` + `\n dispatch: bus_a=${processed_a} | bus_b=${processed_b}` + + `\n timings: on=${on_ms.toFixed(0)}ms | off=${off_ms.toFixed(0)}ms | dispatch_a=${dispatch_a_ms.toFixed(0)}ms | dispatch_b=${dispatch_b_ms.toFixed(0)}ms | done=${done_ms.toFixed(0)}ms` + + `\n processing: bus_a=${process_a_ms.toFixed(0)}ms | bus_b=${process_b_ms.toFixed(0)}ms | handlers_a=${handler_a_ms.toFixed(0)}ms | handlers_b=${handler_b_ms.toFixed(0)}ms` + `\n memory: before=${mb(mem_before.heapUsed)}MB β†’ done=${mb(mem_done.heapUsed)}MB β†’ gc=${mb(mem_gc.heapUsed)}MB` + `\n per-event: time=${(total_ms / total_events).toFixed(4)}ms | heap=${(((mem_done.heapUsed - mem_before.heapUsed) / total_events) / 1024).toFixed(2)}KB | heap_gc=${(((mem_gc.heapUsed - mem_before.heapUsed) / total_events) / 1024).toFixed(2)}KB` + `\n rss: before=${mb(mem_before.rss)}MB β†’ done=${mb(mem_done.rss)}MB β†’ gc=${mb(mem_gc.rss)}MB` + From 2d51a3917102e0dbffc8f467e7700df07fc72862 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Fri, 6 Feb 2026 23:29:07 -0800 Subject: [PATCH 56/79] add type inference for return values --- bubus-ts/README.md | 16 +++++------- bubus-ts/src/base_event.ts | 29 ++++++++++++++------- bubus-ts/src/event_bus.ts | 8 +++--- bubus-ts/src/type_inference.test.ts | 38 ++++++++++++++++++++++++++++ bubus-ts/src/types.ts | 13 +++++++++- bubus-ts/tests/event_results.test.ts | 13 ++++++++++ 6 files changed, 95 insertions(+), 22 deletions(-) create mode 100644 bubus-ts/src/type_inference.test.ts diff --git a/bubus-ts/README.md b/bubus-ts/README.md index dba5ed5..e1626f6 100644 --- a/bubus-ts/README.md +++ b/bubus-ts/README.md @@ -27,7 +27,7 @@ gotchas we uncovered while matching behavior. It intentionally does **not** re-d ### 4) Monotonic timestamps - JS `Date.now()` is not strictly monotonic at millisecond granularity. -- To keep FIFO tests stable, we generate strictly increasing ISO timestamps via `BaseEvent.nextIsoTimestamp()`. +- To keep FIFO tests stable, we generate strictly increasing timestamps via `BaseEvent.nextTimestamp()` (returns `{ date, isostring, ts }`). ### 5) No middleware, no WAL, no SQLite mirrors @@ -100,16 +100,13 @@ Handlers can be configured with `HandlerOptions`: ```ts bus.on(SomeEvent, handler, { - order: -10, // serial ordering (lower runs earlier) handler_concurrency: 'parallel', + handler_timeout: 10, // per-handler timeout in seconds }) ``` -- `order: number` runs handlers in ascending order (serial). -- `order: null` puts the handler into the parallel bucket. -- `handler_concurrency` allows per-handler overrides. - -If an event sets `handler_concurrency: "parallel"`, that wins even if a handler is ordered. +- `handler_concurrency` allows per-handler concurrency overrides. +- `handler_timeout` sets a per-handler timeout in seconds (overrides the bus default when lower). ## Semaphores (how concurrency is enforced) @@ -152,7 +149,7 @@ under different `event_concurrency` / `handler_concurrency` configurations. - `notifyFindListeners(event)` - creates handler results (`event_results`) - runs handlers (respecting handler semaphore) - - decrements `event_pending_bus_count` and calls `event.tryFinalizeCompletion()` + - decrements `event_pending_bus_count` and calls `event.markCompleted(false)` (completes only if all buses and children are done) ### 2) Event concurrency modes (`event_concurrency`) @@ -188,7 +185,8 @@ When a handler on Bus A calls `bus_b.dispatch(event)` without awaiting: When `event.done()` is awaited inside a handler, **queue-jump** happens: -1. `BaseEvent.done()` detects it's inside a handler and calls `processEventImmediately()`. +1. `BaseEvent.done()` delegates to `bus.processEventImmediately()`, which detects whether we're inside a handler + (via `getActiveHandlerResult()` / `getParentEventResultAcrossAllBusses()`). If not inside a handler, it falls back to `waitForCompletion()`. 2. `processEventImmediately()` **yields** the parent handler's concurrency semaphore (if held) so child handlers can acquire it. 3. `processEventImmediately()` removes the event from the pending queue (if present). 4. `runImmediatelyAcrossBuses()` processes the event immediately on all buses where it is queued. diff --git a/bubus-ts/src/base_event.ts b/bubus-ts/src/base_event.ts index 2d8f293..03df71d 100644 --- a/bubus-ts/src/base_event.ts +++ b/bubus-ts/src/base_event.ts @@ -44,14 +44,19 @@ export type EventSchema = z.ZodObject = z.input> export type EventInit = Omit, keyof BaseEventFields> & Partial -export type EventFactory = { - (data: EventInit): BaseEvent & z.infer> - new (data: EventInit): BaseEvent & z.infer> +type EventWithResult = BaseEvent & { __event_result_type__?: TResult } + +type ResultTypeFromShape = + TShape extends { event_result_schema: infer S } ? (S extends z.ZodTypeAny ? z.infer : unknown) : unknown + +export type EventFactory = { + (data: EventInit): EventWithResult & z.infer> + new (data: EventInit): EventWithResult & z.infer> schema: EventSchema event_type?: string event_result_schema?: z.ZodTypeAny event_result_type?: string - fromJSON?: (data: unknown) => BaseEvent & z.infer> + fromJSON?: (data: unknown) => EventWithResult & z.infer> } type ZodShapeFrom> = { @@ -143,12 +148,18 @@ export class BaseEvent { return { date, isostring: date.toISOString(), ts } } - static extend(event_type: string, shape?: TShape): EventFactory - static extend>(event_type: string, shape?: TShape): EventFactory> + static extend( + event_type: string, + shape?: TShape + ): EventFactory> + static extend>( + event_type: string, + shape?: TShape + ): EventFactory, ResultTypeFromShape> static extend>( event_type: string, shape: TShape = {} as TShape - ): EventFactory> { + ): EventFactory, ResultTypeFromShape> { const raw_shape = shape as Record const event_result_schema = is_zod_schema(raw_shape.event_result_schema) ? (raw_shape.event_result_schema as z.ZodTypeAny) : undefined @@ -168,7 +179,7 @@ export class BaseEvent { } } - type FactoryResult = BaseEvent & z.infer>> + type FactoryResult = EventWithResult> & z.infer>> function EventFactory(data: EventInit>): FactoryResult { return new ExtendedEvent(data) as FactoryResult @@ -182,7 +193,7 @@ export class BaseEvent { EventFactory.prototype = ExtendedEvent.prototype ;(EventFactory as unknown as { class: typeof ExtendedEvent }).class = ExtendedEvent - return EventFactory as unknown as EventFactory> + return EventFactory as unknown as EventFactory, ResultTypeFromShape> } static parse(this: T, data: unknown): InstanceType { diff --git a/bubus-ts/src/event_bus.ts b/bubus-ts/src/event_bus.ts index 885e9a0..c45419e 100644 --- a/bubus-ts/src/event_bus.ts +++ b/bubus-ts/src/event_bus.ts @@ -11,7 +11,7 @@ import { } from './event_handler.js' import { logTree } from './logging.js' -import type { EventHandlerFunction, EventKey, FindOptions, HandlerOptions } from './types.js' +import type { EventClass, EventHandlerFunction, EventKey, FindOptions, HandlerOptions, UntypedEventHandlerFunction } from './types.js' type FindWaiter = { // similar to a handler, except its for .find() calls @@ -158,7 +158,9 @@ export class EventBus { this.locks.clear() } - on(event_key: EventKey | '*', handler: EventHandlerFunction, options: HandlerOptions = {}): EventHandler { + on(event_key: EventClass, handler: EventHandlerFunction, options?: HandlerOptions): EventHandler + on(event_key: string | '*', handler: UntypedEventHandlerFunction, options?: HandlerOptions): EventHandler + on(event_key: EventKey | '*', handler: EventHandlerFunction | UntypedEventHandlerFunction, options: HandlerOptions = {}): EventHandler { const normalized_key = this.normalizeEventKey(event_key) const handler_name = handler.name || 'anonymous' const { isostring: handler_registered_at, ts: handler_registered_ts } = BaseEvent.nextTimestamp() @@ -712,7 +714,7 @@ export class EventBus { try { const abort_signal = result.markStarted() const handler_result = await Promise.race([this.runHandlerWithTimeout(event, handler, handler_event, result), abort_signal]) - if (event.event_result_schema) { + if (event.event_result_schema && handler_result !== undefined) { // if there is a result schema to enforce, parse the handler's return value and mark the event as completed or errored if it doesn't match the schema const parsed = event.event_result_schema.safeParse(handler_result) if (parsed.success) { diff --git a/bubus-ts/src/type_inference.test.ts b/bubus-ts/src/type_inference.test.ts new file mode 100644 index 0000000..89a5d8c --- /dev/null +++ b/bubus-ts/src/type_inference.test.ts @@ -0,0 +1,38 @@ +import { z } from 'zod' + +import { BaseEvent } from './base_event.js' +import { EventBus } from './event_bus.js' +import type { EventResultType } from './types.js' + +type IsEqual = + (() => T extends A ? 1 : 2) extends (() => T extends B ? 1 : 2) ? true : false +type Assert = T + +const InferableResultEvent = BaseEvent.extend('InferableResultEvent', { + target_id: z.string(), + event_result_schema: z.object({ ok: z.boolean() }), +}) + +type InferableResult = EventResultType> +type _assert_inferable_result = Assert> + +const NoSchemaEvent = BaseEvent.extend('NoSchemaEventForInference', {}) +type NoSchemaResult = EventResultType> +type _assert_no_schema_result = Assert> + +const bus = new EventBus('TypeInferenceBus') + +bus.on(InferableResultEvent, (event) => { + const _target: string = event.target_id + return { ok: true } +}) + +bus.on(InferableResultEvent, () => undefined) + +// @ts-expect-error non-void return must match event_result_schema for inferable event keys +bus.on(InferableResultEvent, () => 'not-ok') + +// String/wildcard keys remain best-effort and do not strongly enforce return shapes. +bus.on('InferableResultEvent', () => 'anything') +bus.on('*', () => 123) + diff --git a/bubus-ts/src/types.ts b/bubus-ts/src/types.ts index 7ffd0fa..4e8a591 100644 --- a/bubus-ts/src/types.ts +++ b/bubus-ts/src/types.ts @@ -7,7 +7,18 @@ export type EventClass = { event_type?: string export type EventKey = string | EventClass -export type EventHandlerFunction = (event: T) => void | Promise +export type EventWithResult = BaseEvent & { __event_result_type__?: TResult } + +export type EventResultType = + TEvent extends { __event_result_type__?: infer TResult } ? TResult : unknown + +export type EventHandlerFunction = ( + event: T +) => void | EventResultType | Promise> + +// For string and wildcard subscriptions we cannot reliably infer which event +// type will arrive, so return type checking intentionally degrades to unknown. +export type UntypedEventHandlerFunction = (event: T) => void | unknown | Promise export type HandlerOptions = { handler_concurrency?: ConcurrencyMode diff --git a/bubus-ts/tests/event_results.test.ts b/bubus-ts/tests/event_results.test.ts index 14c67c1..cda71ff 100644 --- a/bubus-ts/tests/event_results.test.ts +++ b/bubus-ts/tests/event_results.test.ts @@ -43,6 +43,19 @@ test('event_result_schema validates handler results', async () => { assert.deepEqual(result.result, { value: 'hello', count: 2 }) }) +test('event_result_schema allows undefined handler return values', async () => { + const bus = new EventBus('ResultSchemaUndefinedBus') + + bus.on(ObjectResultEvent, () => {}) + + const event = bus.dispatch(ObjectResultEvent({})) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.equal(result.result, undefined) +}) + test('invalid result marks handler error', async () => { const bus = new EventBus('ResultSchemaErrorBus') From e79782b8104af3357e98100a8a5c9114544329c4 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Fri, 6 Feb 2026 23:32:19 -0800 Subject: [PATCH 57/79] update readme --- bubus-ts/README.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/bubus-ts/README.md b/bubus-ts/README.md index e1626f6..4957c2c 100644 --- a/bubus-ts/README.md +++ b/bubus-ts/README.md @@ -108,6 +108,25 @@ bus.on(SomeEvent, handler, { - `handler_concurrency` allows per-handler concurrency overrides. - `handler_timeout` sets a per-handler timeout in seconds (overrides the bus default when lower). +## TypeScript Return Type Enforcement (Edge Cases) + +TypeScript can only enforce handler return types when the event type is inferable at compile time. + +- `bus.on(EventFactoryOrClass, handler)`: + - Return values are type-checked against the event's `event_result_schema` (if defined). + - `undefined` (or no return) is always allowed. +- `bus.on('SomeEventName', handler)`: + - Return type checking is best-effort only (treated as unknown in typing). + - Use class/factory keys when you want compile-time return-shape enforcement. +- `bus.on('*', handler)`: + - Return type checking is intentionally loose (best-effort only), because wildcard handlers may receive many event types, including forwarded events from other buses. + - In practice, wildcard handlers are expected to be side-effect/forwarding handlers and usually return `undefined`. + +Runtime behavior is still consistent across all key styles: + +- If an event has `event_result_schema` and a handler returns a non-`undefined` value, that value is validated at runtime. +- If the handler returns `undefined`, schema validation is skipped and the result is accepted. + ## Semaphores (how concurrency is enforced) We use four semaphores: From 03a244931ccf980aab4c1f70f6e7041adf8ae72c Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Fri, 6 Feb 2026 23:58:26 -0800 Subject: [PATCH 58/79] fix perf regression --- bubus-ts/src/lock_manager.ts | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/bubus-ts/src/lock_manager.ts b/bubus-ts/src/lock_manager.ts index 2d3d0f9..4613947 100644 --- a/bubus-ts/src/lock_manager.ts +++ b/bubus-ts/src/lock_manager.ts @@ -299,6 +299,13 @@ export class LockManager { } notifyIdleListeners(): void { + // Fast-path: most completions have no waitUntilIdle() callers waiting, + // so skip expensive idle snapshot scans in that common case. + if (this.idle_waiters.length === 0) { + this.idle_check_streak = 0 + return + } + if (!this.getIdleSnapshot()) { this.idle_check_streak = 0 if (this.idle_waiters.length > 0) { From f20916c6f1410a99de7a2204051c325964c8ea90 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Sat, 7 Feb 2026 19:58:45 -0800 Subject: [PATCH 59/79] more cleanup and naming improvements --- bubus-ts/README.md | 85 +++++-- bubus-ts/src/base_event.ts | 237 ++++++++++++------ bubus-ts/src/event_bus.ts | 172 +++++++------ bubus-ts/src/event_handler.ts | 50 ++-- bubus-ts/src/event_result.ts | 166 +++++++++++- bubus-ts/src/index.ts | 10 +- bubus-ts/src/lock_manager.ts | 79 +++--- bubus-ts/src/type_inference.test.ts | 17 +- bubus-ts/src/types.ts | 76 +++++- bubus-ts/tests/_perf_profile.ts | 4 +- bubus-ts/tests/comprehensive_patterns.test.ts | 30 +-- bubus-ts/tests/event_bus_proxy.test.ts | 3 + bubus-ts/tests/eventbus_basics.test.ts | 111 ++++++-- bubus-ts/tests/locking.test.ts | 78 +++--- bubus-ts/tests/parent_child.test.ts | 3 + bubus-ts/tests/performance.test.ts | 14 +- bubus-ts/tests/timeout.test.ts | 34 +-- bubus-ts/tests/typed_results.test.ts | 50 ++++ 18 files changed, 858 insertions(+), 361 deletions(-) diff --git a/bubus-ts/README.md b/bubus-ts/README.md index 4957c2c..006f384 100644 --- a/bubus-ts/README.md +++ b/bubus-ts/README.md @@ -52,7 +52,7 @@ All options are passed to `new EventBus(name, options)`. - `"bus-serial"` enforces FIFO per bus, allows cross-bus overlap. - `"parallel"` allows events to process concurrently. - `"auto"` uses the bus default (mostly useful for overrides). -- `handler_concurrency?: "global-serial" | "bus-serial" | "parallel" | "auto"` (default: `"bus-serial"`) +- `event_handler_concurrency?: "global-serial" | "bus-serial" | "parallel" | "auto"` (default: `"bus-serial"`) - Controls how many **handlers** run at once for each event. - Same semantics as `event_concurrency`, but applied to handler execution. - `event_timeout?: number | null` (default: `60`) @@ -79,33 +79,33 @@ const FastEvent = BaseEvent.extend('FastEvent', { const event = FastEvent({ payload: 'x', event_concurrency: 'parallel', - handler_concurrency: 'parallel', + event_handler_concurrency: 'parallel', }) // Per-handler override (lower precedence) -bus.on(FastEvent, handler, { handler_concurrency: 'parallel' }) +bus.on(FastEvent, handler, { event_handler_concurrency: 'parallel' }) ``` Precedence order (highest β†’ lowest): -1. Event instance overrides (`event_concurrency`, `handler_concurrency`) -2. Handler options (`handler_concurrency`) -3. Bus defaults (`event_concurrency`, `handler_concurrency`) +1. Event instance overrides (`event_concurrency`, `event_handler_concurrency`) +2. Handler options (`event_handler_concurrency`) +3. Bus defaults (`event_concurrency`, `event_handler_concurrency`) `"auto"` resolves to the bus default. ## Handler Options -Handlers can be configured with `HandlerOptions`: +Handlers can be configured at registration time: ```ts bus.on(SomeEvent, handler, { - handler_concurrency: 'parallel', + event_handler_concurrency: 'parallel', handler_timeout: 10, // per-handler timeout in seconds }) ``` -- `handler_concurrency` allows per-handler concurrency overrides. +- `event_handler_concurrency` allows per-handler concurrency overrides. - `handler_timeout` sets a per-handler timeout in seconds (overrides the bus default when lower). ## TypeScript Return Type Enforcement (Edge Cases) @@ -127,6 +127,61 @@ Runtime behavior is still consistent across all key styles: - If an event has `event_result_schema` and a handler returns a non-`undefined` value, that value is validated at runtime. - If the handler returns `undefined`, schema validation is skipped and the result is accepted. +## Throughput + Memory Behavior (Current) + +This section documents the current runtime profile and the important edge cases. It is intentionally conservative: +we describe what is enforced today, not theoretical best-case behavior. + +### Throughput model + +- Baseline throughput in tests is gated at `<30s` for: + - `50k events within reasonable time` + - `50k events with ephemeral on/off handler registration across 2 buses` + - `500 ephemeral buses with 100 events each` +- The major hot-path operations are linear in collection sizes: + - Per event, handler matching is `O(total handlers on bus)` (`exact` scan + `*` scan). + - `.off()` is `O(total handlers on bus)` for matching/removal. + - Queue-jump (`await event.done()` inside handlers) does cross-bus discovery by walking `event_path` and iterating `EventBus._all_instances`, so cost grows with buses and forwarding depth. +- `waitUntilIdle()` is best used at batch boundaries, not per event: + - Idle checks call `isIdle()`, which scans `event_history` and handler results. + - There is a fast-path that skips idle scans when no idle waiters exist, which keeps normal dispatch/complete flows fast even with large history. +- Concurrency settings are a direct throughput limiter: + - `global-serial` and `bus-serial` intentionally serialize work. + - `parallel` increases throughput but can increase transient memory if producers outpace consumers. + +### Memory model + +- Per bus, strong references are held for: + - `handlers` + - `pending_event_queue` + - `in_flight_event_ids` + - `event_history` (bounded by `max_history_size`, or unbounded if `null`) + - active `find()` waiters until match/timeout +- Per event, retained state includes: + - `event_results` (per-handler result objects) + - descendant links in `event_results[].event_children` +- History trimming behavior: + - Completed events are evicted first (oldest first). + - If still over limit, oldest remaining events are dropped even if pending, and a warning is logged. + - Eviction calls `event._gc()` to clear internal references (`event_results`, child arrays, bus/context pointers). +- Memory is not strictly bounded by only `pending_queue_size + max_history_size`: + - A retained parent event can hold references to many children/grandchildren via `event_children`. + - So effective retained memory can exceed a simple `event_count * avg_event_size` bound in high fan-out trees. +- `destroy()` is recommended for deterministic cleanup, but not required for GC safety: + - `_all_instances` is WeakRef-based, so unreferenced buses can be collected without calling `.destroy()`. + - There is a GC regression test for this (`unreferenced buses with event history are garbage collected without destroy()`). +- `heapUsed` vs `rss`: + - `heapUsed` returning near baseline after GC is the primary leak signal in tests. + - `rss` can stay elevated due to V8 allocator high-water behavior and is not, by itself, a proof of leak. + +### Practical guidance for high-load deployments + +- Keep `max_history_size` finite in production. +- Avoid very large wildcard handler sets on hot event types. +- Avoid calling `waitUntilIdle()` for every single event in large streams; prefer periodic/batch waits. +- Be aware that very deep/high-fan-out parent-child graphs increase retained memory until parent events are evicted. +- Use `.destroy()` for explicit lifecycle control in request-scoped or short-lived bus patterns. + ## Semaphores (how concurrency is enforced) We use four semaphores: @@ -142,7 +197,7 @@ mutex checks throughout the code. ## Full lifecycle across concurrency modes Below is the complete execution flow for nested events, including forwarding across buses, and how it behaves -under different `event_concurrency` / `handler_concurrency` configurations. +under different `event_concurrency` / `event_handler_concurrency` configurations. ### 1) Base execution flow (applies to all modes) @@ -179,9 +234,9 @@ under different `event_concurrency` / `handler_concurrency` configurations. **Mixed buses:** each bus enforces its own event mode. Forwarding to another bus does not inherit the source bus’s mode. -### 3) Handler concurrency modes (`handler_concurrency`) +### 3) Handler concurrency modes (`event_handler_concurrency`) -`handler_concurrency` controls how handlers run **for a single event**: +`event_handler_concurrency` controls how handlers run **for a single event**: - **`global-serial`**: only one handler at a time across all buses using `LockManager.global_handler_semaphore`. - **`bus-serial`**: handlers serialize per bus. @@ -190,7 +245,7 @@ under different `event_concurrency` / `handler_concurrency` configurations. **Interaction with event concurrency:** Even if events are parallel, handlers can still be serialized: -`event_concurrency: "parallel"` + `handler_concurrency: "bus-serial"` means events start concurrently but handler execution on a bus is serialized. +`event_concurrency: "parallel"` + `event_handler_concurrency: "bus-serial"` means events start concurrently but handler execution on a bus is serialized. ### 4) Forwarding across buses (non-awaited) @@ -221,8 +276,8 @@ This means queue-jumped handlers run serially on a `bus-serial` bus, not in para Highest β†’ lowest: -1. Event instance fields (`event_concurrency`, `handler_concurrency`) -2. Handler options (`handler_concurrency`) +1. Event instance fields (`event_concurrency`, `event_handler_concurrency`) +2. Handler options (`event_handler_concurrency`) 3. Bus defaults `"auto"` always resolves to the bus default. diff --git a/bubus-ts/src/base_event.ts b/bubus-ts/src/base_event.ts index 03df71d..b333b89 100644 --- a/bubus-ts/src/base_event.ts +++ b/bubus-ts/src/base_event.ts @@ -5,34 +5,54 @@ import type { EventBus } from './event_bus.js' import { EventResult } from './event_result.js' import type { ConcurrencyMode, Deferred } from './lock_manager.js' import { CONCURRENCY_MODES, withResolvers } from './lock_manager.js' +import { extractZodShape, getStringTypeName, isZodSchema, toJsonSchema } from './types.js' export const BaseEventSchema = z .object({ event_id: z.string().uuid(), event_created_at: z.string().datetime(), + event_created_ts: z.number().optional(), event_type: z.string(), event_timeout: z.number().positive().nullable(), event_parent_id: z.string().uuid().optional(), event_path: z.array(z.string()).optional(), event_result_type: z.string().optional(), event_result_schema: z.unknown().optional(), + event_emitted_by_handler_id: z.string().uuid().optional(), + event_pending_bus_count: z.number().nonnegative().optional(), + event_status: z.enum(['pending', 'started', 'completed']).optional(), + event_started_at: z.string().datetime().optional(), + event_started_ts: z.number().optional(), + event_completed_at: z.string().datetime().optional(), + event_completed_ts: z.number().optional(), + event_results: z.array(z.unknown()).optional(), event_concurrency: z.enum(CONCURRENCY_MODES).optional(), - handler_concurrency: z.enum(CONCURRENCY_MODES).optional(), + event_handler_concurrency: z.enum(CONCURRENCY_MODES).optional(), }) - .passthrough() + .loose() export type BaseEventData = z.infer type BaseEventFields = Pick< BaseEventData, | 'event_id' | 'event_created_at' + | 'event_created_ts' | 'event_type' | 'event_timeout' | 'event_parent_id' + | 'event_path' | 'event_result_type' | 'event_result_schema' + | 'event_emitted_by_handler_id' + | 'event_pending_bus_count' + | 'event_status' + | 'event_started_at' + | 'event_started_ts' + | 'event_completed_at' + | 'event_completed_ts' + | 'event_results' | 'event_concurrency' - | 'handler_concurrency' + | 'event_handler_concurrency' > export type BaseEventInit> = TFields & Partial @@ -40,23 +60,27 @@ export type BaseEventInit> = TFields & P type BaseEventSchemaShape = typeof BaseEventSchema.shape export type EventSchema = z.ZodObject +type EventPayload = z.infer> type EventInput = z.input> export type EventInit = Omit, keyof BaseEventFields> & Partial type EventWithResult = BaseEvent & { __event_result_type__?: TResult } -type ResultTypeFromShape = - TShape extends { event_result_schema: infer S } ? (S extends z.ZodTypeAny ? z.infer : unknown) : unknown +type ResultTypeFromShape = TShape extends { event_result_schema: infer S } + ? S extends z.ZodTypeAny + ? z.infer + : unknown + : unknown export type EventFactory = { - (data: EventInit): EventWithResult & z.infer> - new (data: EventInit): EventWithResult & z.infer> + (data: EventInit): EventWithResult & EventPayload + new (data: EventInit): EventWithResult & EventPayload schema: EventSchema event_type?: string event_result_schema?: z.ZodTypeAny event_result_type?: string - fromJSON?: (data: unknown) => EventWithResult & z.infer> + fromJSON?: (data: unknown) => EventWithResult & EventPayload } type ZodShapeFrom> = { @@ -68,34 +92,36 @@ type ZodShapeFrom> = { } export class BaseEvent { + // event metadata fields event_id!: string // unique uuidv7 identifier for the event - event_created_at!: string // ISO datetime string version of event_created_ts + event_created_at!: string // ISO datetime string version of event_created_at event_created_ts!: number // nanosecond monotonic version of event_created_at event_type!: string // should match the class name of the event, e.g. BaseEvent.extend("MyEvent").event_type === "MyEvent" - event_timeout!: number | null // maximum time in seconds that each handler for the event is allowed to run before it is aborted + event_timeout!: number | null // maximum time in seconds that the event is allowed to run before it is aborted event_parent_id?: string // id of the parent event that triggered this event, if this event was emitted during handling of another event event_path!: string[] // list of bus names that the event has been dispatched to, including the current bus event_result_schema?: z.ZodTypeAny // optional zod schema to enforce the shape of return values from handlers event_result_type?: string // optional string identifier of the type of the return values from handlers, to make it easier to reference common shapes across networkboundaries e.g. ScreenshotEventResultType - event_results!: Map + event_results!: Map> // map of handler ids to EventResult objects for the event event_emitted_by_handler_id?: string // if event was emitted inside a handler while it was running, this will be set to the enclosing handler's handler id - event_pending_bus_count!: number // Number of buses that have accepted this event and not yet finished processing or removed it from their queues. - event_status!: 'pending' | 'started' | 'completed' - event_started_at?: string - event_started_ts?: number - event_completed_at?: string - event_completed_ts?: number - event_concurrency?: ConcurrencyMode - handler_concurrency?: ConcurrencyMode - + event_pending_bus_count!: number // number of buses that have accepted this event and not yet finished processing or removed it from their queues (for queue-jump processing) + event_status!: 'pending' | 'started' | 'completed' // processing status of the event as a whole, no separate 'error' state because events can not error, only individual handlers can + event_started_at?: string // ISO datetime string version of event_started_ts + event_started_ts?: number // nanosecond monotonic version of event_started_at + event_completed_at?: string // ISO datetime string version of event_completed_ts + event_completed_ts?: number // nanosecond monotonic version of event_completed_at + event_concurrency?: ConcurrencyMode // concurrency mode for the event as a whole in relation to other events + event_handler_concurrency?: ConcurrencyMode // concurrency mode for the handlers within the event + + static event_type?: string // class name of the event, e.g. BaseEvent.extend("MyEvent").event_type === "MyEvent" + static schema = BaseEventSchema // zod schema for the event data fields, used to parse and validate event data when creating a new event + + // internal runtime state bus?: EventBus // shortcut to the bus that dispatched this event, for event.bus.dispatch(event) auto-child tracking via proxy wrapping - _original_event?: BaseEvent // underlying event object that was dispatched, if this is a bus-scoped proxy wrapping it - _dispatch_context?: unknown | null // captured AsyncLocalStorage context at dispatch site, used to restore that context when running handlers - - static schema = BaseEventSchema - static event_type?: string + _event_original?: BaseEvent // underlying event object that was dispatched, if this is a bus-scoped proxy wrapping it + _event_dispatch_context?: unknown | null // captured AsyncLocalStorage context at dispatch site, used to restore that context when running handlers - _done: Deferred | null + _event_done_signal: Deferred | null constructor(data: BaseEventInit> = {}) { const ctor = this.constructor as typeof BaseEvent & { @@ -104,7 +130,7 @@ export class BaseEvent { } const event_type = data.event_type ?? ctor.event_type ?? ctor.name const event_result_schema = (data.event_result_schema ?? ctor.event_result_schema) as z.ZodTypeAny | undefined - const event_result_type = data.event_result_type ?? ctor.event_result_type + const event_result_type = data.event_result_type ?? ctor.event_result_type ?? getStringTypeName(event_result_schema) const event_id = data.event_id ?? uuidv7() const { isostring: default_event_created_at, ts: event_created_ts } = BaseEvent.nextTimestamp() const event_created_at = data.event_created_at ?? default_event_created_at @@ -127,31 +153,64 @@ export class BaseEvent { const parsed_path = (parsed as { event_path?: string[] }).event_path this.event_path = Array.isArray(parsed_path) ? [...parsed_path] : [] - this.event_pending_bus_count = 0 - this.event_status = 'pending' + + // load event results from potentially raw objects from JSON to proper EventResult objects + this.event_results = hydrateEventResults(this, (parsed as { event_results?: unknown }).event_results) + this.event_pending_bus_count = + typeof (parsed as { event_pending_bus_count?: unknown }).event_pending_bus_count === 'number' + ? Math.max(0, Number((parsed as { event_pending_bus_count?: number }).event_pending_bus_count)) + : 0 + const parsed_status = (parsed as { event_status?: unknown }).event_status + this.event_status = + parsed_status === 'pending' || parsed_status === 'started' || parsed_status === 'completed' ? parsed_status : 'pending' + + this.event_started_at = + typeof (parsed as { event_started_at?: unknown }).event_started_at === 'string' + ? (parsed as { event_started_at: string }).event_started_at + : undefined + this.event_started_ts = + typeof (parsed as { event_started_ts?: unknown }).event_started_ts === 'number' + ? (parsed as { event_started_ts: number }).event_started_ts + : undefined + this.event_completed_at = + typeof (parsed as { event_completed_at?: unknown }).event_completed_at === 'string' + ? (parsed as { event_completed_at: string }).event_completed_at + : undefined + this.event_completed_ts = + typeof (parsed as { event_completed_ts?: unknown }).event_completed_ts === 'number' + ? (parsed as { event_completed_ts: number }).event_completed_ts + : undefined + this.event_emitted_by_handler_id = + typeof (parsed as { event_emitted_by_handler_id?: unknown }).event_emitted_by_handler_id === 'string' + ? (parsed as { event_emitted_by_handler_id: string }).event_emitted_by_handler_id + : undefined + this.event_result_schema = event_result_schema this.event_result_type = event_result_type - this.event_results = new Map() - this.event_created_ts = event_created_ts + this.event_created_ts = + typeof (parsed as { event_created_ts?: unknown }).event_created_ts === 'number' + ? (parsed as { event_created_ts: number }).event_created_ts + : event_created_ts - this._done = null - this._dispatch_context = undefined + this._event_done_signal = null + this._event_dispatch_context = undefined } + // "MyEvent#a48f" toString(): string { return `${this.event_type}#${this.event_id.slice(-4)}` } + // get the next monotonic timestamp for global ordering of all operations static nextTimestamp(): { date: Date; isostring: string; ts: number } { const ts = performance.now() const date = new Date(performance.timeOrigin + ts) return { date, isostring: date.toISOString(), ts } } - static extend( - event_type: string, - shape?: TShape - ): EventFactory> + // main entry point for users to define their own event types + // BaseEvent.extend("MyEvent", { some_custom_field: z.string(), event_result_schema: z.string(), event_timeout: 25, ... }) -> MyEvent + static extend(event_type: string, shape?: TShape): EventFactory> static extend>( event_type: string, shape?: TShape @@ -162,12 +221,14 @@ export class BaseEvent { ): EventFactory, ResultTypeFromShape> { const raw_shape = shape as Record - const event_result_schema = is_zod_schema(raw_shape.event_result_schema) ? (raw_shape.event_result_schema as z.ZodTypeAny) : undefined - const event_result_type = typeof raw_shape.event_result_type === 'string' ? raw_shape.event_result_type : undefined + const event_result_schema = isZodSchema(raw_shape.event_result_schema) ? (raw_shape.event_result_schema as z.ZodTypeAny) : undefined + const explicit_event_result_type = typeof raw_shape.event_result_type === 'string' ? raw_shape.event_result_type : undefined + const event_result_type = explicit_event_result_type ?? getStringTypeName(event_result_schema) - const zod_shape = extract_zod_shape(raw_shape) + const zod_shape = extractZodShape(raw_shape) const full_schema = BaseEventSchema.extend(zod_shape) + // create a new event class that extends BaseEvent and adds the custom fields class ExtendedEvent extends BaseEvent { static schema = full_schema as unknown as typeof BaseEvent.schema static event_type = event_type @@ -179,7 +240,7 @@ export class BaseEvent { } } - type FactoryResult = EventWithResult> & z.infer>> + type FactoryResult = EventWithResult> & EventPayload> function EventFactory(data: EventInit>): FactoryResult { return new ExtendedEvent(data) as FactoryResult @@ -196,6 +257,7 @@ export class BaseEvent { return EventFactory as unknown as EventFactory, ResultTypeFromShape> } + // parse raw event data into a new event object static parse(this: T, data: unknown): InstanceType { const schema = this.schema ?? BaseEventSchema const parsed = schema.parse(data) @@ -207,7 +269,7 @@ export class BaseEvent { return this.parse(data) } const record = { ...(data as Record) } - if (record.event_result_schema && !is_zod_schema(record.event_result_schema)) { + if (record.event_result_schema && !isZodSchema(record.event_result_schema)) { const zod_any = z as unknown as { fromJSONSchema?: (schema: unknown) => z.ZodTypeAny } if (typeof zod_any.fromJSONSchema === 'function') { record.event_result_schema = zod_any.fromJSONSchema(record.event_result_schema) @@ -220,17 +282,36 @@ export class BaseEvent { return { event_id: this.event_id, event_created_at: this.event_created_at, + event_created_ts: this.event_created_ts, event_type: this.event_type, event_timeout: this.event_timeout, event_parent_id: this.event_parent_id, event_path: this.event_path, event_result_type: this.event_result_type, + event_emitted_by_handler_id: this.event_emitted_by_handler_id, + event_pending_bus_count: this.event_pending_bus_count, + event_status: this.event_status, + event_started_at: this.event_started_at, + event_started_ts: this.event_started_ts, + event_completed_at: this.event_completed_at, + event_completed_ts: this.event_completed_ts, + event_results: Array.from(this.event_results.values()).map((result) => result.toJSON()), event_concurrency: this.event_concurrency, - handler_concurrency: this.handler_concurrency, - event_result_schema: this.event_result_schema ? to_json_schema(this.event_result_schema) : this.event_result_schema, + event_handler_concurrency: this.event_handler_concurrency, + event_result_schema: this.event_result_schema ? toJsonSchema(this.event_result_schema) : this.event_result_schema, } } + // Get parent event object from event_parent_id (checks across all busses) + get event_parent(): BaseEvent | undefined { + const original = this._event_original ?? this + const parent_id = original.event_parent_id + if (!parent_id) { + return undefined + } + return original.bus?.findEventById(parent_id) ?? undefined + } + // get all direct children of this event get event_children(): BaseEvent[] { const children: BaseEvent[] = [] @@ -275,8 +356,8 @@ export class BaseEvent { return descendants } - // awaitable to trigger immediate processing of the event on all buses where it is queued - // TODO: rename to immediate() + // awaitable that triggers immediate (queue-jump) processing of the event on all buses where it is queued + // use event.waitForCompletion() or event.finished() to wait for the event to be processed in normal queue order done(): Promise { if (!this.bus) { return Promise.reject(new Error('event has no bus attached')) @@ -293,13 +374,24 @@ export class BaseEvent { return runner_bus.processEventImmediately(this) as Promise } - // TODO: rename to done() + // clearer alias for done() to indicate that the event will be processed immediately + // await bus.dispatch(event).immediate() is less ambiguous than await event.done() + immediate(): Promise { + return this.done() + } + + // awaitable that waits for the event to be processed in normal queue order by the runloop waitForCompletion(): Promise { if (this.event_status === 'completed') { return Promise.resolve(this) } this._notifyDoneListeners() - return this._done!.promise + return this._event_done_signal!.promise + } + + // convenience alias for await event.waitForCompletion() + finished(): Promise { + return this.waitForCompletion() } markStarted(): void { @@ -328,10 +420,10 @@ export class BaseEvent { const { isostring: event_completed_at, ts: event_completed_ts } = BaseEvent.nextTimestamp() this.event_completed_at = event_completed_at this.event_completed_ts = event_completed_ts - this._dispatch_context = null + this._event_dispatch_context = null this._notifyDoneListeners() - this._done!.resolve(this) - this._done = null + this._event_done_signal!.resolve(this) + this._event_done_signal = null } get event_errors(): unknown[] { @@ -354,17 +446,17 @@ export class BaseEvent { } _notifyDoneListeners(): void { - if (this._done) { + if (this._event_done_signal) { return } - this._done = withResolvers() + this._event_done_signal = withResolvers() } // Break internal reference chains so a completed event can be GC'd when // evicted from event_history. Called by EventBus.trimHistory(). _gc(): void { - this._done = null - this._dispatch_context = null + this._event_done_signal = null + this._event_dispatch_context = null this.bus = undefined for (const result of this.event_results.values()) { result.event_children = [] @@ -373,31 +465,18 @@ export class BaseEvent { } } -const is_zod_schema = (value: unknown): value is z.ZodTypeAny => !!value && typeof (value as z.ZodTypeAny).safeParse === 'function' - -const extract_zod_shape = (raw: Record): z.ZodRawShape => { - const shape: Record = {} - for (const [key, value] of Object.entries(raw)) { - if (key === 'event_result_schema' || key === 'event_result_type') { +const hydrateEventResults = (event: TEvent, raw_event_results: unknown): Map> => { + const event_results = new Map>() + if (!Array.isArray(raw_event_results)) { + return event_results + } + for (const item of raw_event_results) { + const result = EventResult.fromJSON(event, item) + if (!result) { continue } - if (is_zod_schema(value)) { - shape[key] = value - } - } - return shape as z.ZodRawShape -} - -const to_json_schema = (schema: unknown): unknown => { - if (!schema) { - return schema - } - if (!is_zod_schema(schema)) { - return schema - } - const zod_any = z as unknown as { toJSONSchema?: (schema: z.ZodTypeAny) => unknown } - if (typeof zod_any.toJSONSchema === 'function') { - return zod_any.toJSONSchema(schema) + const map_key = typeof result.handler_id === 'string' && result.handler_id.length > 0 ? result.handler_id : result.id + event_results.set(map_key, result) } - return undefined + return event_results } diff --git a/bubus-ts/src/event_bus.ts b/bubus-ts/src/event_bus.ts index c45419e..9e4409a 100644 --- a/bubus-ts/src/event_bus.ts +++ b/bubus-ts/src/event_bus.ts @@ -11,7 +11,7 @@ import { } from './event_handler.js' import { logTree } from './logging.js' -import type { EventClass, EventHandlerFunction, EventKey, FindOptions, HandlerOptions, UntypedEventHandlerFunction } from './types.js' +import type { EventClass, EventHandlerFunction, EventKey, FindOptions, UntypedEventHandlerFunction } from './types.js' type FindWaiter = { // similar to a handler, except its for .find() calls @@ -27,12 +27,13 @@ type FindWaiter = { type EventBusOptions = { max_history_size?: number | null event_concurrency?: ConcurrencyMode - handler_concurrency?: ConcurrencyMode - event_timeout?: number | null - event_handler_slow_timeout?: number | null - event_slow_timeout?: number | null + event_handler_concurrency?: ConcurrencyMode + event_timeout?: number | null // default handler timeout in seconds, applied when event.event_timeout is undefined + event_handler_slow_timeout?: number | null // threshold before a warning is logged about slow handler execution + event_slow_timeout?: number | null // threshold before a warning is logged about slow event processing } +// Global registry of all EventBus instances to allow for cross-bus coordination when global-serial concurrency mode is used class GlobalEventBusInstanceRegistry { private _refs = new Set>() private _lookup = new WeakMap>() @@ -76,6 +77,7 @@ class GlobalEventBusInstanceRegistry { } } + // find an event by its id across all buses findEventById(event_id: string): BaseEvent | null { for (const bus of this) { const event = bus.event_history.get(event_id) @@ -90,27 +92,26 @@ class GlobalEventBusInstanceRegistry { export class EventBus { static _all_instances = new GlobalEventBusInstanceRegistry() - name: string + name: string // name of the event bus, recommended to include the word "Bus" in the name for clarity in logs // configuration options - max_history_size: number | null + max_history_size: number | null // max number of completed events kept in log, set to null for unlimited history event_concurrency_default: ConcurrencyMode - handler_concurrency_default: ConcurrencyMode + event_handler_concurrency_default: ConcurrencyMode event_timeout_default: number | null event_handler_slow_timeout: number | null event_slow_timeout: number | null // public runtime state - handlers: Map - event_history: Map + handlers: Map // map of handler uuidv5 ids to EventHandler objects + event_history: Map // map of event uuidv7 ids to processed BaseEvent objects // internal runtime state - pending_event_queue: BaseEvent[] - in_flight_event_ids: Set + pending_event_queue: BaseEvent[] // queue of events that have been dispatched to the bus but not yet processed + in_flight_event_ids: Set // set of event ids that are currently being processed by the bus runloop_running: boolean locks: LockManager - // Pending find() callers waiting for a matching future event. - find_waiters: Set + find_waiters: Set // set of FindWaiter objects that are waiting for a matching future event constructor(name: string = 'EventBus', options: EventBusOptions = {}) { this.name = name @@ -118,7 +119,7 @@ export class EventBus { // set configuration options this.max_history_size = options.max_history_size === undefined ? 100 : options.max_history_size this.event_concurrency_default = options.event_concurrency ?? 'bus-serial' - this.handler_concurrency_default = options.handler_concurrency ?? 'bus-serial' + this.event_handler_concurrency_default = options.event_handler_concurrency ?? 'bus-serial' this.event_timeout_default = options.event_timeout === undefined ? 60 : options.event_timeout this.event_handler_slow_timeout = options.event_handler_slow_timeout === undefined ? 30 : options.event_handler_slow_timeout this.event_slow_timeout = options.event_slow_timeout === undefined ? 300 : options.event_slow_timeout @@ -142,9 +143,10 @@ export class EventBus { if (this.name.toLowerCase().includes('bus')) { return `${this.name}` } - return `EventBus(${this.name})` // for clarity that its a bus if bus is not in the name + return `EventBus(${this.name})` // for clarity that its a bus if bus is not in the name } + // destroy the event bus and all its state to allow for garbage collection destroy(): void { EventBus._all_instances.delete(this) this.handlers.clear() @@ -158,20 +160,32 @@ export class EventBus { this.locks.clear() } - on(event_key: EventClass, handler: EventHandlerFunction, options?: HandlerOptions): EventHandler - on(event_key: string | '*', handler: UntypedEventHandlerFunction, options?: HandlerOptions): EventHandler - on(event_key: EventKey | '*', handler: EventHandlerFunction | UntypedEventHandlerFunction, options: HandlerOptions = {}): EventHandler { - const normalized_key = this.normalizeEventKey(event_key) - const handler_name = handler.name || 'anonymous' + on( + event_key: EventClass, + handler: EventHandlerFunction, + options?: { event_handler_concurrency?: ConcurrencyMode; handler_timeout?: number | null } + ): EventHandler + on( + event_key: string | '*', + handler: UntypedEventHandlerFunction, + options?: { event_handler_concurrency?: ConcurrencyMode; handler_timeout?: number | null } + ): EventHandler + on( + event_key: EventKey | '*', + handler: EventHandlerFunction | UntypedEventHandlerFunction, + options: { event_handler_concurrency?: ConcurrencyMode; handler_timeout?: number | null } = {} + ): EventHandler { + const normalized_key = this.normalizeEventKey(event_key) // get string event_type or '*' + const handler_name = handler.name || 'anonymous' // get handler function name or 'anonymous' if the handler is an anonymous/arrow function const { isostring: handler_registered_at, ts: handler_registered_ts } = BaseEvent.nextTimestamp() const handler_timeout = options.handler_timeout ?? this.event_timeout_default const handler_entry = new EventHandler({ handler: handler as EventHandlerFunction, handler_name, handler_timeout, + event_handler_concurrency: options.event_handler_concurrency, handler_registered_at, handler_registered_ts, - options: Object.keys(options).length > 0 ? options : undefined, event_key: normalized_key, eventbus_name: this.name, }) @@ -198,7 +212,7 @@ export class EventBus { } dispatch(event: T, _event_key?: EventKey): T { - const original_event = event._original_event ?? event // if event is a bus-scoped proxy already, get the original underlying event object + const original_event = event._event_original ?? event // if event is a bus-scoped proxy already, get the original underlying event object if (!original_event.bus) { // if we are the first bus to dispatch this event, set the bus property on the original event object original_event.bus = this @@ -206,11 +220,11 @@ export class EventBus { if (!Array.isArray(original_event.event_path)) { original_event.event_path = [] } - if (original_event._dispatch_context === undefined) { + if (original_event._event_dispatch_context === undefined) { // when used in fastify/nextjs/other contexts with tracing based on AsyncLocalStorage in node // we want to capture the context at the dispatch site and use it when running handlers // because events may be handled async in a separate context than the dispatch site - original_event._dispatch_context = captureAsyncContext() + original_event._event_dispatch_context = captureAsyncContext() } if (original_event.event_timeout === null) { original_event.event_timeout = this.event_timeout_default @@ -224,10 +238,10 @@ export class EventBus { original_event.event_path.push(this.name) } - if (original_event.event_parent_id) { - const parent_event = this.event_history.get(original_event.event_parent_id) - if (parent_event) { - this.recordChildEvent(parent_event.event_id, original_event, original_event.event_emitted_by_handler_id) + if (original_event.event_parent_id && original_event.event_emitted_by_handler_id) { + const parent_result = original_event.event_parent?.event_results.get(original_event.event_emitted_by_handler_id) + if (parent_result) { + parent_result.linkEmittedChildEvent(original_event) } } @@ -241,10 +255,12 @@ export class EventBus { return this.getEventProxyScopedToThisBus(original_event) as T } + // alias for dispatch emit(event: T, event_key?: EventKey): T { return this.dispatch(event, event_key) } + // find a recent event or wait for a future event that matches some criteria find(event_key: EventKey, options?: FindOptions): Promise find(event_key: EventKey, where: (event: T) => boolean, options?: FindOptions): Promise async find( @@ -335,7 +351,7 @@ export class EventBus { // (preventing deadlock for bus-serial/global-serial modes). We re-acquire after // the child completes so the parent handler can continue with the semaphore held. async processEventImmediately(event: T, handler_result?: EventResult): Promise { - const original_event = event._original_event ?? event + const original_event = event._event_original ?? event // Find the parent handler's result: prefer the proxy-provided one (only if // the handler is still running), then this bus's stack, then walk up the // parent event tree (cross-bus case). If none found, we're not inside a @@ -394,6 +410,7 @@ export class EventBus { await this.locks.waitForIdle() } + // Weak idle check: only checks if handlers are idle, doesnt check that the queue is empty isIdle(): boolean { for (const event of this.event_history.values()) { for (const result of event.event_results.values()) { @@ -408,6 +425,12 @@ export class EventBus { return true // no handlers are pending or started } + // Stronger idle check: no queued work, no in-flight processing, runloop not + // active, and no handlers pending/running for this bus. + isIdleAndQueueEmpty(): boolean { + return this.pending_event_queue.length === 0 && this.in_flight_event_ids.size === 0 && this.isIdle() && !this.runloop_running + } + eventIsChildOf(event: BaseEvent, ancestor: BaseEvent): boolean { if (event.event_id === ancestor.event_id) { return false @@ -427,24 +450,8 @@ export class EventBus { return false } - eventIsParentOf(event: BaseEvent, descendant: BaseEvent): boolean { - return this.eventIsChildOf(descendant, event) - } - - recordChildEvent(parent_event_id: string, child_event: BaseEvent, handler_id?: string): void { - const original_child = child_event._original_event ?? child_event - const parent_event = this.event_history.get(parent_event_id) ?? EventBus._all_instances.findEventById(parent_event_id) - - const target_handler_id = handler_id ?? original_child.event_emitted_by_handler_id ?? undefined - if (target_handler_id) { - const current_result = parent_event?.event_results.get(target_handler_id) - if (current_result) { - if (!current_result.event_children.some((child) => child.event_id === original_child.event_id)) { - current_result.event_children.push(original_child) - } - } - original_child.event_emitted_by_handler_id = target_handler_id - } + eventIsParentOf(parent_event: BaseEvent, child_event: BaseEvent): boolean { + return this.eventIsChildOf(child_event, parent_event) } // return a full detailed tree diagram of all events and results on this bus @@ -452,11 +459,16 @@ export class EventBus { return logTree(this) } + // Resolve an event id from this bus first, then across all known buses. + findEventById(event_id: string): BaseEvent | null { + return this.event_history.get(event_id) ?? EventBus._all_instances.findEventById(event_id) + } + // Walk up the parent event chain to find an in-flight ancestor handler result. // Returns the result if found, null otherwise. Used by processEventImmediately to detect // cross-bus queue-jump scenarios where the calling handler is on a different bus. getParentEventResultAcrossAllBusses(event: BaseEvent): EventResult | null { - const original = event._original_event ?? event + const original = event._event_original ?? event let current_parent_id = original.event_parent_id let current_handler_id = original.event_emitted_by_handler_id while (current_handler_id && current_parent_id) { @@ -575,6 +587,8 @@ export class EventBus { }) } + // schedule the processing of an event on the event bus by its normal runloop + // but set up the bus to process the given event immediately if it is a queue-jump event private async scheduleEventProcessing( event: BaseEvent, options: { @@ -613,7 +627,7 @@ export class EventBus { if (!next_event) { continue } - const original_event = next_event._original_event ?? next_event + const original_event = next_event._event_original ?? next_event if (this.hasProcessedEvent(original_event)) { this.pending_event_queue.shift() continue @@ -660,7 +674,7 @@ export class EventBus { try { const handler_entries = this.createPendingHandlerResults(event) - const handler_promises = handler_entries.map((entry) => this.runEventHandler(event, entry.handler, entry.result, entry.options)) + const handler_promises = handler_entries.map((entry) => this.runEventHandler(event, entry.handler, entry.result)) await Promise.all(handler_promises) event.event_pending_bus_count = Math.max(0, event.event_pending_bus_count - 1) @@ -677,18 +691,13 @@ export class EventBus { // Manually manages the handler concurrency semaphore instead of using runWithSemaphore, // because processEventImmediately may temporarily yield it during queue-jumping. - async runEventHandler( - event: BaseEvent, - handler: EventHandlerFunction, - result: EventResult, - options?: HandlerOptions - ): Promise { + async runEventHandler(event: BaseEvent, handler: EventHandler, result: EventResult): Promise { if (result.status === 'error' && result.error instanceof EventHandlerCancelledError) { return } const handler_event = this.getEventProxyScopedToThisBus(event, result) - const semaphore = this.locks.getSemaphoreForHandler(event, options) + const semaphore = this.locks.getSemaphoreForHandler(event, handler) if (semaphore) { await semaphore.acquire() @@ -753,14 +762,14 @@ export class EventBus { // run a handler with a timeout, returning a promise that resolves or rejects with the handler's result or an error if the timeout is exceeded private async runHandlerWithTimeout( event: BaseEvent, - handler: EventHandlerFunction, + handler: EventHandler, handler_event: BaseEvent = event, result: EventResult ): Promise { // resolve the effective timeout by combining the event timeout and the handler timeout const effective_timeout = this.resolveEffectiveTimeout(event.event_timeout, result.handler.handler_timeout) const run_handler = () => - Promise.resolve().then(() => runWithAsyncContext(event._dispatch_context ?? null, () => handler(handler_event))) + Promise.resolve().then(() => runWithAsyncContext(event._event_dispatch_context ?? null, () => handler.handler(handler_event))) if (effective_timeout === null) { // if there is no timeout to enforce, just run the handler directly and return the promise @@ -788,10 +797,13 @@ export class EventBus { // set a timeout to reject the promise if the handler takes too long const timer = setTimeout(() => { finalize(reject)( - new EventHandlerTimeoutError(`${this.toString()}.on(${event.toString()}, ${result.handler.toString()}) timed out after ${timeout_seconds}s`, { - event_result: result, - timeout_seconds, - }) + new EventHandlerTimeoutError( + `${this.toString()}.on(${event.toString()}, ${result.handler.toString()}) timed out after ${timeout_seconds}s`, + { + event_result: result, + timeout_seconds, + } + ) ) }, timeout_ms) @@ -885,10 +897,9 @@ export class EventBus { // proxy is what gets passed into the handler, if handler does event.bus.emit(...) to dispatch child events, // the proxy auto-sets event.parent_event_id and event.event_emitted_by_handler_id getEventProxyScopedToThisBus(event: T, handler_result?: EventResult): T { - const original_event = event._original_event ?? event + const original_event = event._event_original ?? event const bus = this const parent_event_id = original_event.event_id - const handler_id = handler_result?.handler_id const bus_proxy = new Proxy(bus, { get(target, prop, receiver) { if (prop === 'processEventImmediately') { @@ -899,13 +910,13 @@ export class EventBus { } if (prop === 'dispatch' || prop === 'emit') { return (child_event: BaseEvent, event_key?: EventKey) => { - const original_child = child_event._original_event ?? child_event - if (!original_child.event_parent_id) { + const original_child = child_event._event_original ?? child_event + if (handler_result) { + handler_result.linkEmittedChildEvent(original_child) + } else if (!original_child.event_parent_id) { + // fallback for non-handler scoped dispatch original_child.event_parent_id = parent_event_id } - if (handler_id && !original_child.event_emitted_by_handler_id) { - original_child.event_emitted_by_handler_id = handler_id - } const dispatcher = Reflect.get(target, prop, receiver) as (event: BaseEvent, event_key?: EventKey) => BaseEvent const dispatched = dispatcher.call(target, original_child, event_key) return target.getEventProxyScopedToThisBus(dispatched, handler_result) @@ -919,7 +930,7 @@ export class EventBus { if (prop === 'bus') { return bus_proxy } - if (prop === '_original_event') { + if (prop === '_event_original') { return target } return Reflect.get(target, prop, receiver) @@ -934,7 +945,7 @@ export class EventBus { if (prop === 'bus') { return true } - if (prop === '_original_event') { + if (prop === '_event_original') { return true } return Reflect.has(target, prop) @@ -949,7 +960,7 @@ export class EventBus { const cancellation_cause = this.normalizeCancellationCause(reason) const visited = new Set() const cancelChildEvent = (child: BaseEvent): void => { - const original_child = child._original_event ?? child + const original_child = child._event_original ?? child if (visited.has(original_child.event_id)) { return } @@ -996,7 +1007,7 @@ export class EventBus { // force-abort processing of an event regardless of whether it is pending or has already started private cancelEvent(event: BaseEvent, cause: Error): void { - const original_event = event._original_event ?? event + const original_event = event._event_original ?? event const handler_entries = this.createPendingHandlerResults(original_event) let updated = false for (const entry of handler_entries) { @@ -1032,7 +1043,7 @@ export class EventBus { if (this.pending_event_queue.length > 0) { const before_len = this.pending_event_queue.length this.pending_event_queue = this.pending_event_queue.filter( - (queued) => (queued._original_event ?? queued).event_id !== original_event.event_id + (queued) => (queued._event_original ?? queued).event_id !== original_event.event_id ) removed = before_len - this.pending_event_queue.length } @@ -1066,9 +1077,8 @@ export class EventBus { } private createPendingHandlerResults(event: BaseEvent): Array<{ - handler: EventHandlerFunction + handler: EventHandler result: EventResult - options?: HandlerOptions }> { const handlers = this.getHandlersForEvent(event) return handlers.map((entry) => { @@ -1078,7 +1088,7 @@ export class EventBus { if (!existing) { event.event_results.set(handler_id, result) } - return { handler: entry.handler, result, options: entry.options } + return { handler: entry, result } }) } @@ -1122,7 +1132,9 @@ export class EventBus { if (typeof event_type === 'string' && event_type.length > 0 && event_type !== 'BaseEvent') { return event_type } - throw new Error('bus.on(match_pattern, ...) must be a string event type, "*", or a BaseEvent class, got: ' + JSON.stringify(event_key).slice(0, 30)) + throw new Error( + 'bus.on(match_pattern, ...) must be a string event type, "*", or a BaseEvent class, got: ' + JSON.stringify(event_key).slice(0, 30) + ) } private trimHistory(): void { diff --git a/bubus-ts/src/event_handler.ts b/bubus-ts/src/event_handler.ts index 970fbbe..a165408 100644 --- a/bubus-ts/src/event_handler.ts +++ b/bubus-ts/src/event_handler.ts @@ -1,23 +1,24 @@ import { v5 as uuidv5 } from 'uuid' -import type { EventHandlerFunction, HandlerOptions } from './types.js' +import type { ConcurrencyMode } from './lock_manager.js' +import type { EventHandlerFunction } from './types.js' import { BaseEvent } from './base_event.js' import { EventResult } from './event_result.js' const HANDLER_ID_NAMESPACE = uuidv5('bubus-handler', uuidv5.DNS) +// an entry in the list of event handlers that are registered on a bus export class EventHandler { - // an entry in the list of handlers that are registered on a bus id: string // unique uuidv5 based on hash of bus name, handler name, handler file path:lineno, registered at timestamp, and event key - handler: EventHandlerFunction - handler_name: string - handler_file_path?: string - handler_timeout: number | null - handler_registered_at: string - handler_registered_ts: number - options?: HandlerOptions - event_key: string | '*' - eventbus_name: string + handler: EventHandlerFunction // the handler function itself + handler_name: string // name of the handler function, or 'anonymous' if the handler is an anonymous/arrow function + handler_file_path?: string // ~/path/to/source/file.ts:123 + handler_timeout: number | null // maximum time in seconds that the handler is allowed to run before it is aborted, defaults to event.event_timeout if not set + event_handler_concurrency?: ConcurrencyMode // per-handler concurrency override + handler_registered_at: string // ISO datetime string version of handler_registered_ts + handler_registered_ts: number // nanosecond monotonic version of handler_registered_at + event_key: string | '*' // event_type string to match against, or '*' to match all events + eventbus_name: string // name of the event bus that the handler is registered on constructor(params: { id?: string @@ -25,9 +26,9 @@ export class EventHandler { handler_name: string handler_file_path?: string handler_timeout: number | null + event_handler_concurrency?: ConcurrencyMode handler_registered_at: string handler_registered_ts: number - options?: HandlerOptions event_key: string | '*' eventbus_name: string }) { @@ -45,9 +46,9 @@ export class EventHandler { this.handler_name = params.handler_name this.handler_file_path = handler_file_path this.handler_timeout = params.handler_timeout + this.event_handler_concurrency = params.event_handler_concurrency this.handler_registered_at = params.handler_registered_at this.handler_registered_ts = params.handler_registered_ts - this.options = params.options this.event_key = params.event_key this.eventbus_name = params.eventbus_name } @@ -65,12 +66,15 @@ export class EventHandler { return uuidv5(seed, HANDLER_ID_NAMESPACE) } + // "someHandlerName() (~/path/to/source/file.ts:123)" toString(): string { const label = this.handler_name && this.handler_name !== 'anonymous' ? `${this.handler_name}()` : `function#${this.id.slice(-4)}()` const file_path = this.handler_file_path ?? 'unknown' return `${label} (${file_path})` } + // walk the stack trace at registration time to detect the location of the source code file that defines the handler function + // and return the file path and line number as a string, or 'unknown' if the file path cannot be determined private static detectHandlerFilePath(file_path?: string, fallback: string = 'unknown'): string | undefined { const extract = (value: string): string => value.trim().match(/\(([^)]+)\)$/)?.[1] ?? @@ -79,7 +83,10 @@ export class EventHandler { value.trim() let resolved_path = file_path ? extract(file_path) : file_path if (!resolved_path) { - const line = new Error().stack?.split('\n').map((l) => l.trim()).filter(Boolean)[4] + const line = new Error().stack + ?.split('\n') + .map((l) => l.trim()) + .filter(Boolean)[4] if (line) resolved_path = extract(line) } if (!resolved_path) return fallback @@ -96,10 +103,12 @@ export class EventHandler { normalized = path } } - normalized = normalized.replace(/\/Users\/[^/]+\//, '~/') + normalized = normalized.replace(/\/users\/[^/]+\//i, '~/').replace(/\/home\/[^/]+\//i, '~/') return line_number ? `${normalized}:${line_number}` : normalized } } + +// Generic base TimeoutError used for EventHandlerTimeoutError.cause default value if export class TimeoutError extends Error { constructor(message: string) { super(message) @@ -107,6 +116,7 @@ export class TimeoutError extends Error { } } +// Base class for all errors that can occur while running an event handler export class EventHandlerError extends Error { event_result: EventResult timeout_seconds: number | null @@ -140,8 +150,8 @@ export class EventHandlerError extends Error { return this.event.event_timeout } } -// EventHandlerTimeoutError: when the handler itself timed out while executing (due to event.event_timeout being exceeded) +// When the handler itself timed out while executing (due to handler.handler_timeout being exceeded) export class EventHandlerTimeoutError extends EventHandlerError { constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause?: Error }) { super(message, { @@ -152,16 +162,16 @@ export class EventHandlerTimeoutError extends EventHandlerError { this.name = 'EventHandlerTimeoutError' } } -// EventHandlerCancelledError: when a pending handler was cancelled and never run due to an error (e.g. timeout) in a parent scope +// When a pending handler was cancelled and never run due to an error (e.g. timeout) in a parent scope export class EventHandlerCancelledError extends EventHandlerError { constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause: Error }) { super(message, params) this.name = 'EventHandlerCancelledError' } } -// EventHandlerAbortedError: when a handler that was already running was aborted due to an error in the parent scope, not due to an error in its own logic / exceeding its own timeout +// When a handler that was already running was aborted due to an error in the parent scope, not due to an error in its own logic / exceeding its own timeout export class EventHandlerAbortedError extends EventHandlerError { constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause: Error }) { super(message, params) @@ -169,11 +179,11 @@ export class EventHandlerAbortedError extends EventHandlerError { } } -// EventHandlerResultSchemaError: when a handler returns a value that fails event_result_schema validation +// When a handler run succesfully but returned a value that failed event_result_schema validation export class EventHandlerResultSchemaError extends EventHandlerError { raw_value: unknown - constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause: Error, raw_value: unknown }) { + constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause: Error; raw_value: unknown }) { super(message, params) this.name = 'EventHandlerResultSchemaError' this.raw_value = params.raw_value diff --git a/bubus-ts/src/event_result.ts b/bubus-ts/src/event_result.ts index b01b90f..5d6ef20 100644 --- a/bubus-ts/src/event_result.ts +++ b/bubus-ts/src/event_result.ts @@ -2,23 +2,51 @@ import { v7 as uuidv7 } from 'uuid' import { BaseEvent } from './base_event.js' import type { EventHandler } from './event_handler.js' -import { HandlerLock, withResolvers } from './lock_manager.js' +import { HandlerLock, type ConcurrencyMode, withResolvers } from './lock_manager.js' import type { Deferred } from './lock_manager.js' +import type { EventHandlerFunction, EventResultType } from './types.js' +// More precise than event.event_status, includes separate 'error' state for handlers that throw errors during execution export type EventResultStatus = 'pending' | 'started' | 'completed' | 'error' -export class EventResult { - id: string - status: EventResultStatus - event: BaseEvent - handler: EventHandler +export type EventResultData = { + id?: string + status?: EventResultStatus + event_id?: string + handler?: { + id?: string + handler_name?: string + handler_file_path?: string + handler_timeout?: number | null + event_handler_concurrency?: ConcurrencyMode + handler_registered_at?: string + handler_registered_ts?: number + event_key?: string | '*' + eventbus_name?: string + } started_at?: string started_ts?: number completed_at?: string completed_ts?: number - result?: unknown // raw return value from the event handler - error?: unknown // error object thrown by the event handler - event_children: BaseEvent[] + result?: unknown + error?: unknown + event_children?: string[] +} + +// Object that tracks the pending or completed execution of a single event handler +export class EventResult { + id: string // unique uuidv7 identifier for the event result + status: EventResultStatus // 'pending', 'started', 'completed', or 'error' + event: TEvent // the Event that the handler is processing + handler: EventHandler // the EventHandler object that going to process the event + started_at?: string // ISO datetime string version of started_ts + started_ts?: number // nanosecond monotonic version of started_at + completed_at?: string // ISO datetime string version of completed_ts + completed_ts?: number // nanosecond monotonic version of completed_at + result?: EventResultType // parsed return value from the event handler + error?: unknown // error object thrown by the event handler, or null if the handler completed successfully + event_children: BaseEvent[] // any child events that were emitted during handler execution are captured automatically and stored here to track hierarchy + // Abort signal: created when handler starts, rejected by signalAbort() to // interrupt runEventHandler's await via Promise.race. _abort: Deferred | null @@ -27,12 +55,14 @@ export class EventResult { // processEventImmediately for yield-and-reacquire during queue-jumps. _lock: HandlerLock | null - constructor(params: { event: BaseEvent; handler: EventHandler }) { + constructor(params: { event: TEvent; handler: EventHandler }) { this.id = uuidv7() this.status = 'pending' this.event = params.event this.handler = params.handler this.event_children = [] + this.result = undefined + this.error = undefined this._abort = null this._lock = null } @@ -65,6 +95,34 @@ export class EventResult { return this.handler.eventbus_name } + // shortcut for the result value so users can do event_result.value instead of event_result.result + get value(): EventResultType | undefined { + return this.result + } + + // Link a child event emitted by this handler run to the parent event/result. + linkEmittedChildEvent(child_event: BaseEvent): void { + const original_child = child_event._event_original ?? child_event + const parent_event = this.event._event_original ?? this.event + if (!original_child.event_parent_id) { + original_child.event_parent_id = parent_event.event_id + } + if (!original_child.event_emitted_by_handler_id) { + original_child.event_emitted_by_handler_id = this.handler_id + } + if (!this.event_children.some((child) => child.event_id === original_child.event_id)) { + this.event_children.push(original_child) + } + } + + // Get the raw return value from the handler, even if it threw an error / failed validation + get raw_value(): EventResultType | undefined { + if (this.error && (this.error as any).raw_value !== undefined) { + return (this.error as any).raw_value + } + return this.result + } + // Reject the abort promise, causing runEventHandler's Promise.race to // throw immediately β€” even if the handler has no timeout. signalAbort(error: Error): void { @@ -88,7 +146,7 @@ export class EventResult { return this._abort.promise } - markCompleted(result: unknown): void { + markCompleted(result: EventResultType | undefined): void { if (this.status === 'completed' || this.status === 'error') return this.status = 'completed' this.result = result @@ -105,4 +163,90 @@ export class EventResult { this.completed_at = completed_at this.completed_ts = completed_ts } + + toJSON(): EventResultData { + return { + id: this.id, + status: this.status, + event_id: this.event.event_id, + handler: { + id: this.handler.id, + handler_name: this.handler.handler_name, + handler_file_path: this.handler.handler_file_path, + handler_timeout: this.handler.handler_timeout, + event_handler_concurrency: this.handler.event_handler_concurrency, + handler_registered_at: this.handler.handler_registered_at, + handler_registered_ts: this.handler.handler_registered_ts, + event_key: this.handler.event_key, + eventbus_name: this.handler.eventbus_name, + }, + started_at: this.started_at, + started_ts: this.started_ts, + completed_at: this.completed_at, + completed_ts: this.completed_ts, + result: this.result, + error: this.error, + event_children: this.event_children.map((child) => child.event_id), + } + } + + static fromJSON(event: TEvent, data: unknown): EventResult | null { + if (!data || typeof data !== 'object') { + return null + } + const record = data as EventResultData + const handler_record = record.handler ?? {} + + const handler_stub = { + id: typeof handler_record.id === 'string' ? handler_record.id : `deserialized_handler_${uuidv7()}`, + handler: (() => undefined) as EventHandlerFunction, + handler_name: typeof handler_record.handler_name === 'string' ? handler_record.handler_name : 'deserialized_handler', + handler_file_path: typeof handler_record.handler_file_path === 'string' ? handler_record.handler_file_path : undefined, + handler_timeout: + typeof handler_record.handler_timeout === 'number' || handler_record.handler_timeout === null + ? handler_record.handler_timeout + : null, + event_handler_concurrency: handler_record.event_handler_concurrency, + handler_registered_at: + typeof handler_record.handler_registered_at === 'string' ? handler_record.handler_registered_at : event.event_created_at, + handler_registered_ts: + typeof handler_record.handler_registered_ts === 'number' ? handler_record.handler_registered_ts : event.event_created_ts, + event_key: + handler_record.event_key === '*' || typeof handler_record.event_key === 'string' ? handler_record.event_key : event.event_type, + eventbus_name: typeof handler_record.eventbus_name === 'string' ? handler_record.eventbus_name : (event.bus?.name ?? 'unknown'), + toString: () => { + const name = typeof handler_record.handler_name === 'string' ? handler_record.handler_name : 'deserialized_handler' + const file = typeof handler_record.handler_file_path === 'string' ? handler_record.handler_file_path : 'unknown' + return `${name}() (${file})` + }, + } as unknown as EventHandler + + const result = new EventResult({ event, handler: handler_stub }) + if (typeof record.id === 'string') { + result.id = record.id + } + if (record.status === 'pending' || record.status === 'started' || record.status === 'completed' || record.status === 'error') { + result.status = record.status + } + if (typeof record.started_at === 'string') { + result.started_at = record.started_at + } + if (typeof record.started_ts === 'number') { + result.started_ts = record.started_ts + } + if (typeof record.completed_at === 'string') { + result.completed_at = record.completed_at + } + if (typeof record.completed_ts === 'number') { + result.completed_ts = record.completed_ts + } + if ('result' in record) { + result.result = record.result as EventResultType + } + if ('error' in record) { + result.error = record.error + } + result.event_children = [] + return result + } } diff --git a/bubus-ts/src/index.ts b/bubus-ts/src/index.ts index 4202275..5021eaf 100644 --- a/bubus-ts/src/index.ts +++ b/bubus-ts/src/index.ts @@ -8,12 +8,4 @@ export { EventHandlerResultSchemaError, } from './event_handler.js' export type { ConcurrencyMode, EventBusInterfaceForLockManager } from './lock_manager.js' -export type { - EventClass, - EventHandlerFunction as EventHandler, - EventKey, - HandlerOptions, - EventStatus, - FindOptions, - FindWindow, -} from './types.js' +export type { EventClass, EventHandlerFunction as EventHandler, EventKey, EventStatus, FindOptions, FindWindow } from './types.js' diff --git a/bubus-ts/src/lock_manager.ts b/bubus-ts/src/lock_manager.ts index 4613947..d814368 100644 --- a/bubus-ts/src/lock_manager.ts +++ b/bubus-ts/src/lock_manager.ts @@ -1,6 +1,6 @@ import type { BaseEvent } from './base_event.js' +import type { EventHandler } from './event_handler.js' import type { EventResult } from './event_result.js' -import type { HandlerOptions } from './types.js' // ─── Deferred / withResolvers ──────────────────────────────────────────────── @@ -26,10 +26,11 @@ export const withResolvers = (): Deferred => { // ─── Concurrency modes ────────────────────────────────────────────────────── export const CONCURRENCY_MODES = ['global-serial', 'bus-serial', 'parallel', 'auto'] as const -export type ConcurrencyMode = (typeof CONCURRENCY_MODES)[number] +export type ConcurrencyMode = (typeof CONCURRENCY_MODES)[number] // union type of the values in the CONCURRENCY_MODES array +export const DEFAULT_CONCURRENCY_MODE = 'bus-serial' export const resolveConcurrencyMode = (mode: ConcurrencyMode | undefined, fallback: ConcurrencyMode): ConcurrencyMode => { - const normalized_fallback = fallback === 'auto' ? 'bus-serial' : fallback + const normalized_fallback = fallback === 'auto' ? DEFAULT_CONCURRENCY_MODE : fallback if (!mode || mode === 'auto') { return normalized_fallback } @@ -120,6 +121,7 @@ export class HandlerLock { this.state = 'held' } + // used by EventBus.processEventImmediately to yield the parent handler's lock to the child event so it can be processed immediately yieldHandlerLockForChildRun(): boolean { if (!this.semaphore || this.state !== 'held') { return false @@ -129,6 +131,7 @@ export class HandlerLock { return true } + // used by EventBus.processEventImmediately to reacquire the handler lock after the child event has been processed async reclaimHandlerLockIfRunning(): Promise { if (!this.semaphore || this.state !== 'yielded') { return false @@ -143,6 +146,7 @@ export class HandlerLock { return true } + // used by EventBus.runEventHandler to exit the handler lock after the handler has finished executing exitHandlerRun(): void { if (this.state === 'closed') { return @@ -154,6 +158,7 @@ export class HandlerLock { } } + // used by EventBus.processEventImmediately to yield the handler lock and reacquire it after the child event has been processed async runQueueJump(fn: () => Promise): Promise { const yielded = this.yieldHandlerLockForChildRun() try { @@ -168,18 +173,17 @@ export class HandlerLock { // ─── LockManager ───────────────────────────────────────────────────────────── +// Interface that must be implemented by the EventBus class to be used by the LockManager export type EventBusInterfaceForLockManager = { - pending_event_queue: BaseEvent[] - in_flight_event_ids: Set - runloop_running: boolean - isIdle: () => boolean + isIdleAndQueueEmpty: () => boolean event_concurrency_default: ConcurrencyMode - handler_concurrency_default: ConcurrencyMode + event_handler_concurrency_default: ConcurrencyMode } +// The LockManager is responsible for managing the concurrency of events and handlers export class LockManager { - static global_event_semaphore = new AsyncSemaphore(1) - static global_handler_semaphore = new AsyncSemaphore(1) + static global_event_semaphore = new AsyncSemaphore(1) // used for the global-serial concurrency mode + static global_handler_semaphore = new AsyncSemaphore(1) // used for the global-serial concurrency mode private bus: EventBusInterfaceForLockManager // Live bus reference; used to read defaults and idle state. readonly bus_event_semaphore: AsyncSemaphore // Per-bus event semaphore; created with LockManager and never swapped. @@ -196,8 +200,8 @@ export class LockManager { constructor(bus: EventBusInterfaceForLockManager) { this.bus = bus - this.bus_event_semaphore = new AsyncSemaphore(1) - this.bus_handler_semaphore = new AsyncSemaphore(1) + this.bus_event_semaphore = new AsyncSemaphore(1) // used for the bus-serial concurrency mode + this.bus_handler_semaphore = new AsyncSemaphore(1) // used for the bus-serial concurrency mode this.pause_depth = 0 this.pause_waiters = [] @@ -209,9 +213,9 @@ export class LockManager { this.idle_check_streak = 0 } + // Low-level runloop pause: increments a re-entrant counter and returns a release + // function. Used for broad, bus-scoped pauses (e.g. runImmediatelyAcrossBuses). requestPause(): () => void { - // Low-level runloop pause: increments a re-entrant counter and returns a release - // function. Used for broad, bus-scoped pauses (e.g. runImmediatelyAcrossBuses). this.pause_depth += 1 let released = false return () => { @@ -267,10 +271,10 @@ export class LockManager { return this.active_handler_results.length > 0 } + // Queue-jump pause: wraps requestPause with per-handler deduping so repeated + // calls during the same handler run don't stack pauses. Released via + // releaseRunloopPauseForQueueJumpEvent when the handler finishes. requestRunloopPauseForQueueJumpEvent(result: EventResult): void { - // Queue-jump pause: wraps requestPause with per-handler deduping so repeated - // calls during the same handler run don't stack pauses. Released via - // releaseRunloopPauseForQueueJumpEvent when the handler finishes. if (this.queue_jump_pause_releases.has(result)) { return } @@ -289,7 +293,7 @@ export class LockManager { } waitForIdle(): Promise { - if (this.getIdleSnapshot()) { + if (this.bus.isIdleAndQueueEmpty()) { return Promise.resolve() } return new Promise((resolve) => { @@ -298,6 +302,8 @@ export class LockManager { }) } + // Called by EventBus.markEventCompleted and EventBus.markHandlerCompleted to notify + // waitUntilIdle() callers that the bus may now be idle. notifyIdleListeners(): void { // Fast-path: most completions have no waitUntilIdle() callers waiting, // so skip expensive idle snapshot scans in that common case. @@ -306,7 +312,7 @@ export class LockManager { return } - if (!this.getIdleSnapshot()) { + if (!this.bus.isIdleAndQueueEmpty()) { this.idle_check_streak = 0 if (this.idle_waiters.length > 0) { this.scheduleIdleCheck() @@ -335,25 +341,18 @@ export class LockManager { return semaphoreForMode(resolved, LockManager.global_event_semaphore, this.bus_event_semaphore) } - getSemaphoreForHandler(event: BaseEvent, options?: HandlerOptions): AsyncSemaphore | null { - const event_override = event.handler_concurrency && event.handler_concurrency !== 'auto' ? event.handler_concurrency : undefined + getSemaphoreForHandler(event: BaseEvent, handler?: Pick): AsyncSemaphore | null { + const event_override = + event.event_handler_concurrency && event.event_handler_concurrency !== 'auto' ? event.event_handler_concurrency : undefined const handler_override = - options?.handler_concurrency && options.handler_concurrency !== 'auto' ? options.handler_concurrency : undefined - const fallback = this.bus.handler_concurrency_default + handler?.event_handler_concurrency && handler.event_handler_concurrency !== 'auto' ? handler.event_handler_concurrency : undefined + const fallback = this.bus.event_handler_concurrency_default const resolved = resolveConcurrencyMode(event_override ?? handler_override ?? fallback, fallback) return semaphoreForMode(resolved, LockManager.global_handler_semaphore, this.bus_handler_semaphore) } - clear(): void { - this.pause_depth = 0 - this.pause_waiters = [] - this.queue_jump_pause_releases = new WeakMap() - this.active_handler_results = [] - this.idle_waiters = [] - this.idle_check_pending = false - this.idle_check_streak = 0 - } - + // Schedules a debounced idle check to run after a short delay. Used to gate + // waitUntilIdle() calls during handler execution and after event completion. private scheduleIdleCheck(): void { if (this.idle_check_pending) { return @@ -365,10 +364,14 @@ export class LockManager { }, 0) } - // Compute instantaneous idle snapshot from live bus state; used to gate waiters. - private getIdleSnapshot(): boolean { - return ( - this.bus.pending_event_queue.length === 0 && this.bus.in_flight_event_ids.size === 0 && this.bus.isIdle() && !this.bus.runloop_running - ) + // Reset all state to initial values + clear(): void { + this.pause_depth = 0 + this.pause_waiters = [] + this.queue_jump_pause_releases = new WeakMap() + this.active_handler_results = [] + this.idle_waiters = [] + this.idle_check_pending = false + this.idle_check_streak = 0 } } diff --git a/bubus-ts/src/type_inference.test.ts b/bubus-ts/src/type_inference.test.ts index 89a5d8c..87338db 100644 --- a/bubus-ts/src/type_inference.test.ts +++ b/bubus-ts/src/type_inference.test.ts @@ -1,11 +1,14 @@ +/* eslint-disable @typescript-eslint/no-unused-vars */ +// Do not remove the unused type/const names below; they are used to test type inference at compile time. + import { z } from 'zod' import { BaseEvent } from './base_event.js' import { EventBus } from './event_bus.js' +import type { EventResult } from './event_result.js' import type { EventResultType } from './types.js' -type IsEqual = - (() => T extends A ? 1 : 2) extends (() => T extends B ? 1 : 2) ? true : false +type IsEqual = (() => T extends A ? 1 : 2) extends () => T extends B ? 1 : 2 ? true : false type Assert = T const InferableResultEvent = BaseEvent.extend('InferableResultEvent', { @@ -15,6 +18,13 @@ const InferableResultEvent = BaseEvent.extend('InferableResultEvent', { type InferableResult = EventResultType> type _assert_inferable_result = Assert> +type InferableEventResultEntry = + InstanceType['event_results'] extends Map ? TResultEntry : never +type _assert_inferable_event_result_entry = Assert< + IsEqual>> +> +type InferableEventResultValue = InferableEventResultEntry extends { result?: infer TResultValue } ? TResultValue : never +type _assert_inferable_event_result_value = Assert> const NoSchemaEvent = BaseEvent.extend('NoSchemaEventForInference', {}) type NoSchemaResult = EventResultType> @@ -23,7 +33,7 @@ type _assert_no_schema_result = Assert> const bus = new EventBus('TypeInferenceBus') bus.on(InferableResultEvent, (event) => { - const _target: string = event.target_id + const target: string = event.target_id return { ok: true } }) @@ -35,4 +45,3 @@ bus.on(InferableResultEvent, () => 'not-ok') // String/wildcard keys remain best-effort and do not strongly enforce return shapes. bus.on('InferableResultEvent', () => 'anything') bus.on('*', () => 123) - diff --git a/bubus-ts/src/types.ts b/bubus-ts/src/types.ts index 4e8a591..118c5ca 100644 --- a/bubus-ts/src/types.ts +++ b/bubus-ts/src/types.ts @@ -1,5 +1,5 @@ +import { z } from 'zod' import type { BaseEvent } from './base_event.js' -import type { ConcurrencyMode } from './lock_manager.js' export type EventStatus = 'pending' | 'started' | 'completed' @@ -9,8 +9,7 @@ export type EventKey = string | EventClass export type EventWithResult = BaseEvent & { __event_result_type__?: TResult } -export type EventResultType = - TEvent extends { __event_result_type__?: infer TResult } ? TResult : unknown +export type EventResultType = TEvent extends { __event_result_type__?: infer TResult } ? TResult : unknown export type EventHandlerFunction = ( event: T @@ -20,11 +19,6 @@ export type EventHandlerFunction = ( // type will arrive, so return type checking intentionally degrades to unknown. export type UntypedEventHandlerFunction = (event: T) => void | unknown | Promise -export type HandlerOptions = { - handler_concurrency?: ConcurrencyMode - handler_timeout?: number | null -} - export type FindWindow = boolean | number export type FindOptions = { @@ -32,3 +26,69 @@ export type FindOptions = { future?: FindWindow child_of?: BaseEvent | null } + +const WRAPPER_TYPES = new Set(['optional', 'nullable', 'default', 'catch', 'prefault', 'readonly', 'nonoptional', 'exact_optional']) + +const OBJECT_LIKE_TYPES = new Set(['object', 'record', 'map', 'set']) + +const TYPE_ALIASES: Record = { + enum: 'string', + tuple: 'array', + void: 'undefined', + lazy: 'unknown', +} + +export const isZodSchema = (value: unknown): value is z.ZodTypeAny => !!value && typeof (value as z.ZodTypeAny).safeParse === 'function' + +export const extractZodShape = (raw: Record): z.ZodRawShape => { + const shape: Record = {} + for (const [key, value] of Object.entries(raw)) { + if (key === 'event_result_schema' || key === 'event_result_type') continue + if (isZodSchema(value)) shape[key] = value + } + return shape as z.ZodRawShape +} + +export const toJsonSchema = (schema: unknown): unknown => { + if (!schema || !isZodSchema(schema)) return schema + const zod_any = z as unknown as { toJSONSchema?: (input: z.ZodTypeAny) => unknown } + return typeof zod_any.toJSONSchema === 'function' ? zod_any.toJSONSchema(schema) : undefined +} + +export const getStringTypeName = (schema?: z.ZodTypeAny): string | undefined => { + if (!schema) return undefined + + const visited = new Set() + const infer = (value: z.ZodTypeAny): string => { + if (visited.has(value)) return 'unknown' + visited.add(value) + + const def = (value as unknown as { _def?: Record })._def ?? {} + const kind = typeof def.type === 'string' ? def.type : '' + if (!kind) return 'unknown' + + if (WRAPPER_TYPES.has(kind)) { + return isZodSchema(def.innerType) ? infer(def.innerType) : 'unknown' + } + if (kind === 'pipe') { + return isZodSchema(def.out) ? infer(def.out) : 'unknown' + } + if (kind === 'union') { + const options = (Array.isArray(def.options) ? def.options : []).filter(isZodSchema) + if (options.length === 0) return 'unknown' + const inferred = new Set(options.map((option) => infer(option))) + return inferred.size === 1 ? [...inferred][0] : 'unknown' + } + if (kind === 'literal') { + const literal = Array.isArray(def.values) ? def.values[0] : undefined + if (literal === null) return 'null' + if (typeof literal === 'object') return 'object' + if (typeof literal === 'function') return 'function' + return typeof literal + } + if (OBJECT_LIKE_TYPES.has(kind)) return 'object' + return TYPE_ALIASES[kind] ?? kind + } + + return infer(schema) +} diff --git a/bubus-ts/tests/_perf_profile.ts b/bubus-ts/tests/_perf_profile.ts index 8ec7ce0..327f5bf 100644 --- a/bubus-ts/tests/_perf_profile.ts +++ b/bubus-ts/tests/_perf_profile.ts @@ -50,8 +50,8 @@ console.log(`Memory after GC: RSS=${(mem_gc.rss / 1024 / 1024).toFixed(1)}MB, He const total_ms = t3 - t0 console.log( `Per-event: time=${(total_ms / total_events).toFixed(4)}ms, ` + - `heap=${(((mem_after.heapUsed - mem_before.heapUsed) / total_events) / 1024).toFixed(2)}KB, ` + - `heap_gc=${(((mem_gc.heapUsed - mem_before.heapUsed) / total_events) / 1024).toFixed(2)}KB` + `heap=${((mem_after.heapUsed - mem_before.heapUsed) / total_events / 1024).toFixed(2)}KB, ` + + `heap_gc=${((mem_gc.heapUsed - mem_before.heapUsed) / total_events / 1024).toFixed(2)}KB` ) console.log(`\nProcessed: ${processed_count}/${total_events}`) diff --git a/bubus-ts/tests/comprehensive_patterns.test.ts b/bubus-ts/tests/comprehensive_patterns.test.ts index b843f05..571cc06 100644 --- a/bubus-ts/tests/comprehensive_patterns.test.ts +++ b/bubus-ts/tests/comprehensive_patterns.test.ts @@ -257,7 +257,7 @@ test('done() on non-proxied event keeps bus paused during queue-jump', async () // Dispatch child via the raw bus (not the proxied event.bus) const child = bus.dispatch(ChildEvent({})) // Get the raw (non-proxied) event - const raw_child = child._original_event ?? child + const raw_child = child._event_original ?? child // done() on raw event bypasses handler_result injection from proxy await raw_child.done() // After done() returns, bus should still be paused because @@ -753,11 +753,11 @@ test('BUG: queue-jump two-bus bus-serial handlers should serialize on each bus', const bus_a = new EventBus('QJ2BS_A', { event_concurrency: 'bus-serial', - handler_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', }) const bus_b = new EventBus('QJ2BS_B', { event_concurrency: 'bus-serial', - handler_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', }) const log: string[] = [] @@ -821,11 +821,11 @@ test('BUG: queue-jump two-bus global-serial handlers should serialize across bot // Global-serial means ONE handler at a time GLOBALLY, across all buses. const bus_a = new EventBus('QJ2GS_A', { event_concurrency: 'bus-serial', - handler_concurrency: 'global-serial', + event_handler_concurrency: 'global-serial', }) const bus_b = new EventBus('QJ2GS_B', { event_concurrency: 'bus-serial', - handler_concurrency: 'global-serial', + event_handler_concurrency: 'global-serial', }) const log: string[] = [] @@ -898,11 +898,11 @@ test('BUG: queue-jump two-bus mixed: bus_a bus-serial, bus_b parallel', async () const bus_a = new EventBus('QJ2Mix1_A', { event_concurrency: 'bus-serial', - handler_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', }) const bus_b = new EventBus('QJ2Mix1_B', { event_concurrency: 'bus-serial', - handler_concurrency: 'parallel', // bus_b handlers should run in parallel + event_handler_concurrency: 'parallel', // bus_b handlers should run in parallel }) const log: string[] = [] @@ -961,11 +961,11 @@ test('BUG: queue-jump two-bus mixed: bus_a parallel, bus_b bus-serial', async () const bus_a = new EventBus('QJ2Mix2_A', { event_concurrency: 'bus-serial', - handler_concurrency: 'parallel', // bus_a handlers should run in parallel + event_handler_concurrency: 'parallel', // bus_a handlers should run in parallel }) const bus_b = new EventBus('QJ2Mix2_B', { event_concurrency: 'bus-serial', - handler_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', }) const log: string[] = [] @@ -1037,11 +1037,11 @@ test('BUG: queue-jump should respect bus-serial event concurrency on forward bus const bus_a = new EventBus('QJEvt_A', { event_concurrency: 'bus-serial', - handler_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', }) const bus_b = new EventBus('QJEvt_B', { event_concurrency: 'bus-serial', // only one event at a time on bus_b - handler_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', }) const log: string[] = [] @@ -1110,11 +1110,11 @@ test('queue-jump with fully-parallel forward bus starts immediately', async () = const bus_a = new EventBus('QJFullPar_A', { event_concurrency: 'bus-serial', - handler_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', }) const bus_b = new EventBus('QJFullPar_B', { event_concurrency: 'parallel', - handler_concurrency: 'parallel', + event_handler_concurrency: 'parallel', }) const log: string[] = [] @@ -1162,11 +1162,11 @@ test('queue-jump with parallel events but bus-serial handlers on forward bus ser const bus_a = new EventBus('QJEvtParHSer_A', { event_concurrency: 'bus-serial', - handler_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', }) const bus_b = new EventBus('QJEvtParHSer_B', { event_concurrency: 'parallel', // events can start concurrently - handler_concurrency: 'bus-serial', // but handlers serialize + event_handler_concurrency: 'bus-serial', // but handlers serialize }) const log: string[] = [] diff --git a/bubus-ts/tests/event_bus_proxy.test.ts b/bubus-ts/tests/event_bus_proxy.test.ts index 02e8159..0a910ad 100644 --- a/bubus-ts/tests/event_bus_proxy.test.ts +++ b/bubus-ts/tests/event_bus_proxy.test.ts @@ -154,6 +154,8 @@ test('event.bus.dispatch sets parent-child relationships through 3 levels', asyn // Parent-child relationships are set correctly assert.equal(child_ref!.event_parent_id, parent_event.event_id) assert.equal(grandchild_ref!.event_parent_id, child_ref!.event_id) + assert.equal(child_ref!.event_parent?.event_id, parent_event.event_id) + assert.equal(grandchild_ref!.event_parent?.event_id, child_ref!.event_id) }) test('event.bus with forwarding: child dispatched via event.bus goes to the correct bus', async () => { @@ -217,6 +219,7 @@ test('event.bus.dispatch from handler correctly attributes event_emitted_by_hand const child = Array.from(bus.event_history.values()).find((e) => e.event_type === 'ChildEvent') assert.ok(child, 'child event should be in history') assert.equal(child!.event_parent_id, parent.event_id) + assert.equal(child!.event_parent?.event_id, parent.event_id) // The child should have event_emitted_by_handler_id set to the handler that emitted it assert.ok(child!.event_emitted_by_handler_id, 'event_emitted_by_handler_id should be set on child events dispatched via event.bus') diff --git a/bubus-ts/tests/eventbus_basics.test.ts b/bubus-ts/tests/eventbus_basics.test.ts index abc3bff..ac3fbcc 100644 --- a/bubus-ts/tests/eventbus_basics.test.ts +++ b/bubus-ts/tests/eventbus_basics.test.ts @@ -18,7 +18,7 @@ test('EventBus initializes with correct defaults', async () => { assert.equal(bus.name, 'DefaultsBus') assert.equal(bus.max_history_size, 100) assert.equal(bus.event_concurrency_default, 'bus-serial') - assert.equal(bus.handler_concurrency_default, 'bus-serial') + assert.equal(bus.event_handler_concurrency_default, 'bus-serial') assert.equal(bus.event_timeout_default, 60) assert.equal(bus.event_history.size, 0) assert.ok(EventBus._all_instances.has(bus)) @@ -29,13 +29,13 @@ test('EventBus applies custom options', () => { const bus = new EventBus('CustomBus', { max_history_size: 500, event_concurrency: 'parallel', - handler_concurrency: 'global-serial', + event_handler_concurrency: 'global-serial', event_timeout: 30, }) assert.equal(bus.max_history_size, 500) assert.equal(bus.event_concurrency_default, 'parallel') - assert.equal(bus.handler_concurrency_default, 'global-serial') + assert.equal(bus.event_handler_concurrency_default, 'global-serial') assert.equal(bus.event_timeout_default, 30) }) @@ -70,7 +70,7 @@ test('EventBus exposes locks API surface', () => { test('EventBus locks methods are callable and preserve semaphore resolution behavior', async () => { const bus = new EventBus('GateInvocationBus', { event_concurrency: 'bus-serial', - handler_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', }) const GateEvent = BaseEvent.extend('GateInvocationEvent', {}) @@ -90,20 +90,20 @@ test('EventBus locks methods are callable and preserve semaphore resolution beha const event_with_global = GateEvent({ event_concurrency: 'global-serial', - handler_concurrency: 'global-serial', + event_handler_concurrency: 'global-serial', }) assert.equal(bus.locks.getSemaphoreForEvent(event_with_global), LockManager.global_event_semaphore) assert.equal(bus.locks.getSemaphoreForHandler(event_with_global), LockManager.global_handler_semaphore) const event_with_parallel = GateEvent({ event_concurrency: 'parallel', - handler_concurrency: 'parallel', + event_handler_concurrency: 'parallel', }) assert.equal(bus.locks.getSemaphoreForEvent(event_with_parallel), null) assert.equal(bus.locks.getSemaphoreForHandler(event_with_parallel), null) const event_using_handler_options = GateEvent({}) - assert.equal(bus.locks.getSemaphoreForHandler(event_using_handler_options, { handler_concurrency: 'parallel' }), null) + assert.equal(bus.locks.getSemaphoreForHandler(event_using_handler_options, { event_handler_concurrency: 'parallel' }), null) bus.dispatch(GateEvent({})) bus.locks.notifyIdleListeners() @@ -126,6 +126,40 @@ test('BaseEvent lifecycle methods are callable and preserve lifecycle behavior', assert.equal(dispatched.event_status, 'completed') }) +test('BaseEvent toJSON/fromJSON roundtrips runtime fields and event_results', async () => { + const RuntimeEvent = BaseEvent.extend('RuntimeSerializationEvent', { + event_result_schema: z.string(), + }) + const bus = new EventBus('RuntimeSerializationBus') + + bus.on(RuntimeEvent, () => 'ok') + + const event = bus.dispatch(RuntimeEvent({})) + await event.done() + + const json = event.toJSON() as Record + assert.equal(json.event_status, 'completed') + assert.equal(typeof json.event_created_ts, 'number') + assert.equal(typeof json.event_started_ts, 'number') + assert.equal(typeof json.event_completed_ts, 'number') + assert.equal(json.event_pending_bus_count, 0) + assert.ok(Array.isArray(json.event_results)) + const json_results = json.event_results as Array> + assert.equal(json_results.length, 1) + assert.equal(json_results[0].status, 'completed') + assert.equal(json_results[0].result, 'ok') + assert.equal((json_results[0].handler as Record).id, Array.from(event.event_results.values())[0].handler_id) + + const restored = RuntimeEvent.fromJSON?.(json) ?? RuntimeEvent(json as never) + assert.equal(restored.event_status, 'completed') + assert.equal(restored.event_created_ts, event.event_created_ts) + assert.equal(restored.event_pending_bus_count, 0) + assert.equal(restored.event_results.size, 1) + const restored_result = Array.from(restored.event_results.values())[0] + assert.equal(restored_result.status, 'completed') + assert.equal(restored_result.result, 'ok') +}) + // ─── Event dispatch and status lifecycle ───────────────────────────────────── test('dispatch returns pending event with correct initial state', async () => { @@ -141,7 +175,7 @@ test('dispatch returns pending event with correct initial state', async () => { assert.equal((event as any).data, 'hello') // event_path should include the bus name - const original = event._original_event ?? event + const original = event._event_original ?? event assert.ok(original.event_path.includes('LifecycleBus')) await bus.waitUntilIdle() @@ -158,7 +192,7 @@ test('event transitions through pending -> started -> completed', async () => { }) const event = bus.dispatch(TestEvent({})) - const original = event._original_event ?? event + const original = event._event_original ?? event await event.done() @@ -175,7 +209,7 @@ test('event with no handlers completes immediately', async () => { const event = bus.dispatch(OrphanEvent({})) await event.done() - const original = event._original_event ?? event + const original = event._event_original ?? event assert.equal(original.event_status, 'completed') assert.equal(original.event_results.size, 0) }) @@ -305,7 +339,7 @@ test('handler error is captured without crashing the bus', async () => { const event = bus.dispatch(ErrorEvent({})) await event.done() - const original = event._original_event ?? event + const original = event._event_original ?? event assert.equal(original.event_status, 'completed') assert.ok(original.event_errors.length > 0, 'event should record the error') @@ -320,7 +354,7 @@ test('handler error is captured without crashing the bus', async () => { test('one handler error does not prevent other handlers from running', async () => { const bus = new EventBus('IsolationBus', { max_history_size: 100, - handler_concurrency: 'parallel', + event_handler_concurrency: 'parallel', }) const MultiEvent = BaseEvent.extend('MultiEvent', {}) @@ -341,7 +375,7 @@ test('one handler error does not prevent other handlers from running', async () const event = bus.dispatch(MultiEvent({})) await event.done() - const original = event._original_event ?? event + const original = event._event_original ?? event assert.equal(original.event_status, 'completed') // Both non-erroring handlers should have run @@ -395,7 +429,7 @@ test('dispatch applies bus event_timeout_default when event has null timeout', a const TEvent = BaseEvent.extend('TEvent', {}) const event = bus.dispatch(TEvent({})) - const original = event._original_event ?? event + const original = event._event_original ?? event // The bus should have applied its default timeout assert.equal(original.event_timeout, 42) @@ -411,7 +445,7 @@ test('event with explicit timeout is not overridden by bus default', async () => const TEvent = BaseEvent.extend('TEvent', {}) const event = bus.dispatch(TEvent({ event_timeout: 10 })) - const original = event._original_event ?? event + const original = event._event_original ?? event assert.equal(original.event_timeout, 10) @@ -471,7 +505,7 @@ test('circular forwarding does not cause infinite loop', async () => { assert.equal(handler_calls.filter((h) => h === 'C').length, 1) // event_path should contain all three buses - const original = event._original_event ?? event + const original = event._event_original ?? event assert.ok(original.event_path.includes('CircA')) assert.ok(original.event_path.includes('CircB')) assert.ok(original.event_path.includes('CircC')) @@ -511,6 +545,51 @@ test('unreferenced EventBus can be garbage collected (not retained by _all_insta ) }) +test('unreferenced buses with event history are garbage collected without destroy()', async () => { + const gc = globalThis.gc as (() => void) | undefined + if (typeof gc !== 'function') { + return + } + + const GcEvent = BaseEvent.extend('GcNoDestroyEvent', {}) + const weak_refs: Array> = [] + + gc() + await delay(20) + gc() + const heap_before = process.memoryUsage().heapUsed + + const create_and_run_bus = async (index: number): Promise> => { + const bus = new EventBus(`GC-NoDestroy-${index}`, { max_history_size: 200 }) + bus.on(GcEvent, () => {}) + for (let i = 0; i < 200; i += 1) { + const event = bus.dispatch(GcEvent({})) + await event.done() + } + await bus.waitUntilIdle() + return new WeakRef(bus) + } + + for (let i = 0; i < 120; i += 1) { + weak_refs.push(await create_and_run_bus(i)) + } + + for (let i = 0; i < 30; i += 1) { + gc() + await delay(20) + } + + const alive_count = weak_refs.reduce((count, ref) => count + (ref.deref() ? 1 : 0), 0) + const heap_after = process.memoryUsage().heapUsed + + assert.equal(alive_count, 0, 'all unreferenced buses should be garbage collected without explicit destroy()') + assert.equal(EventBus._all_instances.size, 0, '_all_instances should not retain unreferenced buses') + assert.ok( + heap_after <= heap_before + 20 * 1024 * 1024, + `heap should return near baseline after GC, before=${(heap_before / 1024 / 1024).toFixed(1)}MB after=${(heap_after / 1024 / 1024).toFixed(1)}MB` + ) +}) + // ─── off() handler deregistration ──────────────────────────────────────────── test('off() removes a handler so it no longer fires', async () => { diff --git a/bubus-ts/tests/locking.test.ts b/bubus-ts/tests/locking.test.ts index f7ac09b..06e1022 100644 --- a/bubus-ts/tests/locking.test.ts +++ b/bubus-ts/tests/locking.test.ts @@ -74,7 +74,7 @@ M) Edge-cases - Multiple handlers for same event type with different options collide. - Handler throws synchronously before await (still counted, no leaks). - Handler returns a rejected promise (properly surfaced). -- Event emitted with event_concurrency/handler_concurrency invalid value (schema rejects). +- Event emitted with event_concurrency/event_handler_concurrency invalid value (schema rejects). - Event emitted with no bus set (done should reject). */ @@ -188,11 +188,11 @@ test('global-serial: handler semaphore serializes handlers across buses', async const bus_a = new EventBus('GlobalHandlerA', { event_concurrency: 'parallel', - handler_concurrency: 'global-serial', + event_handler_concurrency: 'global-serial', }) const bus_b = new EventBus('GlobalHandlerB', { event_concurrency: 'parallel', - handler_concurrency: 'global-serial', + event_handler_concurrency: 'global-serial', }) let in_flight = 0 @@ -355,7 +355,7 @@ test('parallel: events overlap on same bus when event_concurrency is parallel', const ParallelEvent = BaseEvent.extend('ParallelEvent', { order: z.number() }) const bus = new EventBus('ParallelEventBus', { event_concurrency: 'parallel', - handler_concurrency: 'parallel', + event_handler_concurrency: 'parallel', }) let in_flight = 0 @@ -378,11 +378,11 @@ test('parallel: events overlap on same bus when event_concurrency is parallel', assert.ok(max_in_flight >= 2) }) -test('parallel: handlers overlap for same event when handler_concurrency is parallel', async () => { +test('parallel: handlers overlap for same event when event_handler_concurrency is parallel', async () => { const ParallelHandlerEvent = BaseEvent.extend('ParallelHandlerEvent', {}) const bus = new EventBus('ParallelHandlerBus', { event_concurrency: 'bus-serial', - handler_concurrency: 'parallel', + event_handler_concurrency: 'parallel', }) let in_flight = 0 @@ -422,11 +422,11 @@ test('parallel: global-serial handler semaphore still serializes across buses', const bus_a = new EventBus('ParallelHandlerGlobalA', { event_concurrency: 'parallel', - handler_concurrency: 'global-serial', + event_handler_concurrency: 'global-serial', }) const bus_b = new EventBus('ParallelHandlerGlobalB', { event_concurrency: 'parallel', - handler_concurrency: 'global-serial', + event_handler_concurrency: 'global-serial', }) let in_flight = 0 @@ -453,11 +453,11 @@ test('parallel: global-serial handler semaphore still serializes across buses', assert.equal(max_in_flight, 1) }) -test('precedence: event handler_concurrency overrides handler options', async () => { +test('precedence: event event_handler_concurrency overrides handler options', async () => { const OverrideEvent = BaseEvent.extend('OverrideEvent', { - handler_concurrency: z.literal('bus-serial'), + event_handler_concurrency: z.literal('bus-serial'), }) - const bus = new EventBus('OverrideBus', { handler_concurrency: 'parallel' }) + const bus = new EventBus('OverrideBus', { event_handler_concurrency: 'parallel' }) let in_flight = 0 let max_in_flight = 0 @@ -470,10 +470,10 @@ test('precedence: event handler_concurrency overrides handler options', async () in_flight -= 1 } - bus.on(OverrideEvent, handler, { handler_concurrency: 'parallel' }) - bus.on(OverrideEvent, handler, { handler_concurrency: 'parallel' }) + bus.on(OverrideEvent, handler, { event_handler_concurrency: 'parallel' }) + bus.on(OverrideEvent, handler, { event_handler_concurrency: 'parallel' }) - const event = bus.dispatch(OverrideEvent({ handler_concurrency: 'bus-serial' })) + const event = bus.dispatch(OverrideEvent({ event_handler_concurrency: 'bus-serial' })) await sleep(0) resolve() await event.done() @@ -484,7 +484,7 @@ test('precedence: event handler_concurrency overrides handler options', async () test('precedence: handler options override bus defaults when event has no override', async () => { const OptionEvent = BaseEvent.extend('OptionEvent', {}) - const bus = new EventBus('OptionBus', { handler_concurrency: 'bus-serial' }) + const bus = new EventBus('OptionBus', { event_handler_concurrency: 'bus-serial' }) let in_flight = 0 let max_in_flight = 0 @@ -504,8 +504,8 @@ test('precedence: handler options override bus defaults when event has no overri in_flight -= 1 } - bus.on(OptionEvent, handler_a, { handler_concurrency: 'parallel' }) - bus.on(OptionEvent, handler_b, { handler_concurrency: 'parallel' }) + bus.on(OptionEvent, handler_a, { event_handler_concurrency: 'parallel' }) + bus.on(OptionEvent, handler_b, { event_handler_concurrency: 'parallel' }) const event = bus.dispatch(OptionEvent({})) await sleep(0) @@ -516,11 +516,11 @@ test('precedence: handler options override bus defaults when event has no overri assert.ok(max_in_flight >= 2) }) -test('precedence: event handler_concurrency overrides handler options to parallel', async () => { +test('precedence: event event_handler_concurrency overrides handler options to parallel', async () => { const OverrideEvent = BaseEvent.extend('OverrideEventParallelHandlers', { - handler_concurrency: z.literal('parallel'), + event_handler_concurrency: z.literal('parallel'), }) - const bus = new EventBus('OverrideParallelHandlersBus', { handler_concurrency: 'bus-serial' }) + const bus = new EventBus('OverrideParallelHandlersBus', { event_handler_concurrency: 'bus-serial' }) let in_flight = 0 let max_in_flight = 0 @@ -533,10 +533,10 @@ test('precedence: event handler_concurrency overrides handler options to paralle in_flight -= 1 } - bus.on(OverrideEvent, handler, { handler_concurrency: 'bus-serial' }) - bus.on(OverrideEvent, handler, { handler_concurrency: 'bus-serial' }) + bus.on(OverrideEvent, handler, { event_handler_concurrency: 'bus-serial' }) + bus.on(OverrideEvent, handler, { event_handler_concurrency: 'bus-serial' }) - const event = bus.dispatch(OverrideEvent({ handler_concurrency: 'parallel' })) + const event = bus.dispatch(OverrideEvent({ event_handler_concurrency: 'parallel' })) await sleep(0) resolve() await event.done() @@ -552,7 +552,7 @@ test('precedence: event event_concurrency overrides bus defaults to parallel', a }) const bus = new EventBus('OverrideParallelEventsBus', { event_concurrency: 'bus-serial', - handler_concurrency: 'parallel', + event_handler_concurrency: 'parallel', }) let in_flight = 0 @@ -583,7 +583,7 @@ test('precedence: event event_concurrency overrides bus defaults to bus-serial', }) const bus = new EventBus('OverrideBusSerialEventsBus', { event_concurrency: 'parallel', - handler_concurrency: 'parallel', + event_handler_concurrency: 'parallel', }) let in_flight = 0 @@ -611,11 +611,11 @@ test('global-serial + handler parallel: handlers overlap but events do not acros const bus_a = new EventBus('GlobalSerialParallelA', { event_concurrency: 'global-serial', - handler_concurrency: 'parallel', + event_handler_concurrency: 'parallel', }) const bus_b = new EventBus('GlobalSerialParallelB', { event_concurrency: 'global-serial', - handler_concurrency: 'parallel', + event_handler_concurrency: 'parallel', }) let in_flight = 0 @@ -647,7 +647,7 @@ test('event parallel + handler bus-serial: handlers serialize within a bus acros const ParallelEvent = BaseEvent.extend('ParallelEventsSerialHandlers', { order: z.number() }) const bus = new EventBus('ParallelEventsSerialHandlersBus', { event_concurrency: 'parallel', - handler_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', }) let in_flight = 0 @@ -675,11 +675,11 @@ test('event parallel + handler bus-serial: handlers overlap across buses', async const bus_a = new EventBus('ParallelBusHandlersA', { event_concurrency: 'parallel', - handler_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', }) const bus_b = new EventBus('ParallelBusHandlersB', { event_concurrency: 'parallel', - handler_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', }) let in_flight = 0 @@ -710,11 +710,11 @@ test('handler options can enforce global-serial even when bus defaults to parall const bus_a = new EventBus('HandlerOptionsGlobalA', { event_concurrency: 'parallel', - handler_concurrency: 'parallel', + event_handler_concurrency: 'parallel', }) const bus_b = new EventBus('HandlerOptionsGlobalB', { event_concurrency: 'parallel', - handler_concurrency: 'parallel', + event_handler_concurrency: 'parallel', }) let in_flight = 0 @@ -728,8 +728,8 @@ test('handler options can enforce global-serial even when bus defaults to parall in_flight -= 1 } - bus_a.on(HandlerEvent, handler, { handler_concurrency: 'global-serial' }) - bus_b.on(HandlerEvent, handler, { handler_concurrency: 'global-serial' }) + bus_a.on(HandlerEvent, handler, { event_handler_concurrency: 'global-serial' }) + bus_b.on(HandlerEvent, handler, { event_handler_concurrency: 'global-serial' }) bus_a.dispatch(HandlerEvent({ source: 'a' })) bus_b.dispatch(HandlerEvent({ source: 'b' })) @@ -763,11 +763,11 @@ test('auto: event_concurrency auto resolves to bus defaults', async () => { assert.equal(max_in_flight, 1) }) -test('auto: handler_concurrency auto resolves to bus defaults', async () => { +test('auto: event_handler_concurrency auto resolves to bus defaults', async () => { const AutoHandlerEvent = BaseEvent.extend('AutoHandlerEvent', { - handler_concurrency: z.literal('auto'), + event_handler_concurrency: z.literal('auto'), }) - const bus = new EventBus('AutoHandlerBus', { handler_concurrency: 'bus-serial' }) + const bus = new EventBus('AutoHandlerBus', { event_handler_concurrency: 'bus-serial' }) let in_flight = 0 let max_in_flight = 0 @@ -783,7 +783,7 @@ test('auto: handler_concurrency auto resolves to bus defaults', async () => { bus.on(AutoHandlerEvent, handler) bus.on(AutoHandlerEvent, handler) - const event = bus.dispatch(AutoHandlerEvent({ handler_concurrency: 'auto' })) + const event = bus.dispatch(AutoHandlerEvent({ event_handler_concurrency: 'auto' })) await sleep(0) resolve() await event.done() @@ -898,7 +898,7 @@ test('queue-jump: awaiting in-flight event does not double-run handlers', async const InFlightEvent = BaseEvent.extend('InFlightEvent', {}) const bus = new EventBus('InFlightBus', { event_concurrency: 'parallel', - handler_concurrency: 'parallel', + event_handler_concurrency: 'parallel', }) let handler_runs = 0 diff --git a/bubus-ts/tests/parent_child.test.ts b/bubus-ts/tests/parent_child.test.ts index 698c5b4..8470772 100644 --- a/bubus-ts/tests/parent_child.test.ts +++ b/bubus-ts/tests/parent_child.test.ts @@ -22,6 +22,7 @@ test('eventIsChildOf and eventIsParentOf work for direct children', async () => assert.ok(child_event) assert.equal(child_event.event_parent_id, parent_event.event_id) + assert.equal(child_event.event_parent?.event_id, parent_event.event_id) assert.equal(bus.eventIsChildOf(child_event, parent_event), true) assert.equal(bus.eventIsParentOf(parent_event, child_event), true) }) @@ -48,6 +49,8 @@ test('eventIsChildOf works for grandchildren', async () => { assert.equal(bus.eventIsChildOf(child_event, parent_event), true) assert.equal(bus.eventIsChildOf(grandchild_event, parent_event), true) + assert.equal(child_event.event_parent?.event_id, parent_event.event_id) + assert.equal(grandchild_event.event_parent?.event_id, child_event.event_id) assert.equal(bus.eventIsParentOf(parent_event, grandchild_event), true) }) diff --git a/bubus-ts/tests/performance.test.ts b/bubus-ts/tests/performance.test.ts index 0d4d849..4e012e7 100644 --- a/bubus-ts/tests/performance.test.ts +++ b/bubus-ts/tests/performance.test.ts @@ -48,7 +48,7 @@ test('processes 50k events within reasonable time', { timeout: 30_000 }, async ( `\n perf: ${total_events} events in ${total_ms}ms (${Math.round(total_events / (total_ms / 1000))}/s)` + `\n dispatch: ${dispatch_ms}ms | await: ${await_ms}ms` + `\n memory: before=${mb(mem_before.heapUsed)}MB β†’ dispatch=${mb(mem_dispatch.heapUsed)}MB β†’ done=${mb(mem_done.heapUsed)}MB β†’ gc=${mb(mem_gc.heapUsed)}MB` + - `\n per-event: time=${(total_ms / total_events).toFixed(4)}ms | heap=${(((mem_done.heapUsed - mem_before.heapUsed) / total_events) / 1024).toFixed(2)}KB | heap_gc=${(((mem_gc.heapUsed - mem_before.heapUsed) / total_events) / 1024).toFixed(2)}KB` + + `\n per-event: time=${(total_ms / total_events).toFixed(4)}ms | heap=${((mem_done.heapUsed - mem_before.heapUsed) / total_events / 1024).toFixed(2)}KB | heap_gc=${((mem_gc.heapUsed - mem_before.heapUsed) / total_events / 1024).toFixed(2)}KB` + `\n rss: before=${mb(mem_before.rss)}MB β†’ done=${mb(mem_done.rss)}MB β†’ gc=${mb(mem_gc.rss)}MB` ) @@ -103,7 +103,7 @@ test('500 ephemeral buses with 100 events each', { timeout: 30_000 }, async () = console.log( `\n perf: ${total_buses} buses Γ— ${events_per_bus} events = ${total_events} total in ${total_ms}ms (${Math.round(total_events / (total_ms / 1000))}/s)` + `\n memory: before=${mb(mem_before.heapUsed)}MB β†’ done=${mb(mem_done.heapUsed)}MB β†’ gc=${mb(mem_gc.heapUsed)}MB` + - `\n per-event: time=${(total_ms / total_events).toFixed(4)}ms | heap=${(((mem_done.heapUsed - mem_before.heapUsed) / total_events) / 1024).toFixed(2)}KB | heap_gc=${(((mem_gc.heapUsed - mem_before.heapUsed) / total_events) / 1024).toFixed(2)}KB` + + `\n per-event: time=${(total_ms / total_events).toFixed(4)}ms | heap=${((mem_done.heapUsed - mem_before.heapUsed) / total_events / 1024).toFixed(2)}KB | heap_gc=${((mem_gc.heapUsed - mem_before.heapUsed) / total_events / 1024).toFixed(2)}KB` + `\n rss: before=${mb(mem_before.rss)}MB β†’ done=${mb(mem_done.rss)}MB β†’ gc=${mb(mem_gc.rss)}MB` + `\n live bus instances: ${EventBus._all_instances.size}` ) @@ -145,10 +145,8 @@ test('50k events with ephemeral on/off handler registration across 2 buses', { t const bus_b_any = bus_b as any const original_process_a = typeof bus_a_any.processEvent === 'function' ? bus_a_any.processEvent.bind(bus_a) : null const original_process_b = typeof bus_b_any.processEvent === 'function' ? bus_b_any.processEvent.bind(bus_b) : null - const original_run_handler_a = - typeof bus_a_any.runEventHandler === 'function' ? bus_a_any.runEventHandler.bind(bus_a) : null - const original_run_handler_b = - typeof bus_b_any.runEventHandler === 'function' ? bus_b_any.runEventHandler.bind(bus_b) : null + const original_run_handler_a = typeof bus_a_any.runEventHandler === 'function' ? bus_a_any.runEventHandler.bind(bus_a) : null + const original_run_handler_b = typeof bus_b_any.runEventHandler === 'function' ? bus_b_any.runEventHandler.bind(bus_b) : null if (original_process_a) { bus_a_any.processEvent = async (event: any) => { @@ -240,7 +238,7 @@ test('50k events with ephemeral on/off handler registration across 2 buses', { t `\n timings: on=${on_ms.toFixed(0)}ms | off=${off_ms.toFixed(0)}ms | dispatch_a=${dispatch_a_ms.toFixed(0)}ms | dispatch_b=${dispatch_b_ms.toFixed(0)}ms | done=${done_ms.toFixed(0)}ms` + `\n processing: bus_a=${process_a_ms.toFixed(0)}ms | bus_b=${process_b_ms.toFixed(0)}ms | handlers_a=${handler_a_ms.toFixed(0)}ms | handlers_b=${handler_b_ms.toFixed(0)}ms` + `\n memory: before=${mb(mem_before.heapUsed)}MB β†’ done=${mb(mem_done.heapUsed)}MB β†’ gc=${mb(mem_gc.heapUsed)}MB` + - `\n per-event: time=${(total_ms / total_events).toFixed(4)}ms | heap=${(((mem_done.heapUsed - mem_before.heapUsed) / total_events) / 1024).toFixed(2)}KB | heap_gc=${(((mem_gc.heapUsed - mem_before.heapUsed) / total_events) / 1024).toFixed(2)}KB` + + `\n per-event: time=${(total_ms / total_events).toFixed(4)}ms | heap=${((mem_done.heapUsed - mem_before.heapUsed) / total_events / 1024).toFixed(2)}KB | heap_gc=${((mem_gc.heapUsed - mem_before.heapUsed) / total_events / 1024).toFixed(2)}KB` + `\n rss: before=${mb(mem_before.rss)}MB β†’ done=${mb(mem_done.rss)}MB β†’ gc=${mb(mem_gc.rss)}MB` + `\n bus_a handlers: ${bus_a.handlers.size} | bus_b handlers: ${bus_b.handlers.size}` ) @@ -390,7 +388,7 @@ test('worst-case: forwarding + queue-jump + timeouts + cancellation at scale', { `\n child: bus_c=${child_handled_c} | grandchild=${grandchild_handled}` + `\n timeouts=${timeout_count} cancellations=${cancel_count}` + `\n memory: before=${mb(mem_before.heapUsed)}MB β†’ done=${mb(mem_done.heapUsed)}MB β†’ gc=${mb(mem_gc.heapUsed)}MB (delta=${mem_delta_mb.toFixed(1)}MB)` + - `\n per-event (est): time=${(total_ms / estimated_events).toFixed(4)}ms | heap=${(((mem_done.heapUsed - mem_before.heapUsed) / estimated_events) / 1024).toFixed(2)}KB | heap_gc=${(((mem_gc.heapUsed - mem_before.heapUsed) / estimated_events) / 1024).toFixed(2)}KB` + + `\n per-event (est): time=${(total_ms / estimated_events).toFixed(4)}ms | heap=${((mem_done.heapUsed - mem_before.heapUsed) / estimated_events / 1024).toFixed(2)}KB | heap_gc=${((mem_gc.heapUsed - mem_before.heapUsed) / estimated_events / 1024).toFixed(2)}KB` + `\n rss: before=${mb(mem_before.rss)}MB β†’ done=${mb(mem_done.rss)}MB β†’ gc=${mb(mem_gc.rss)}MB` + `\n history: a=${bus_a.event_history.size} b=${bus_b.event_history.size} c=${bus_c.event_history.size}` + `\n handlers: a=${bus_a.handlers.size} b=${bus_b.handlers.size} c=${bus_c.handlers.size}` + diff --git a/bubus-ts/tests/timeout.test.ts b/bubus-ts/tests/timeout.test.ts index 0a8bb4e..c584110 100644 --- a/bubus-ts/tests/timeout.test.ts +++ b/bubus-ts/tests/timeout.test.ts @@ -135,7 +135,7 @@ test('handler timeouts fire across concurrency modes', async () => { for (const handler_mode of modes) { const bus = new EventBus(`Timeout-${event_mode}-${handler_mode}`, { event_concurrency: event_mode, - handler_concurrency: handler_mode, + event_handler_concurrency: handler_mode, }) bus.on(TimeoutEvent, async () => { @@ -161,7 +161,7 @@ test('handler timeouts fire across concurrency modes', async () => { test('timeout still marks event failed when other handlers finish', async () => { const bus = new EventBus('TimeoutParallelHandlers', { event_concurrency: 'parallel', - handler_concurrency: 'parallel', + event_handler_concurrency: 'parallel', }) const results: string[] = [] @@ -292,7 +292,7 @@ test('slow handler and slow event warnings can both fire', async () => { test('event-level concurrency overrides do not bypass timeouts', async () => { const bus = new EventBus('TimeoutEventOverrideBus', { event_concurrency: 'global-serial', - handler_concurrency: 'global-serial', + event_handler_concurrency: 'global-serial', }) bus.on(TimeoutEvent, async () => { @@ -304,7 +304,7 @@ test('event-level concurrency overrides do not bypass timeouts', async () => { TimeoutEvent({ event_timeout: 0.01, event_concurrency: 'parallel', - handler_concurrency: 'parallel', + event_handler_concurrency: 'parallel', }) ) await event.done() @@ -317,7 +317,7 @@ test('event-level concurrency overrides do not bypass timeouts', async () => { test('handler-level concurrency overrides do not bypass timeouts', async () => { const bus = new EventBus('TimeoutHandlerOverrideBus', { event_concurrency: 'parallel', - handler_concurrency: 'global-serial', + event_handler_concurrency: 'global-serial', }) const order: string[] = [] @@ -330,7 +330,7 @@ test('handler-level concurrency overrides do not bypass timeouts', async () => { order.push('slow_end') return 'slow' }, - { handler_concurrency: 'bus-serial' } + { event_handler_concurrency: 'bus-serial' } ) bus.on( @@ -341,7 +341,7 @@ test('handler-level concurrency overrides do not bypass timeouts', async () => { order.push('fast_end') return 'fast' }, - { handler_concurrency: 'parallel' } + { event_handler_concurrency: 'parallel' } ) const event = bus.dispatch(TimeoutEvent({ event_timeout: 0.01 })) @@ -423,7 +423,7 @@ for (const handler_mode of STEP1_HANDLER_MODES) { const bus = new EventBus(`TimeoutLeakBus-${handler_mode}`, { event_concurrency: 'bus-serial', - handler_concurrency: handler_mode, + event_handler_concurrency: handler_mode, }) const semaphore = getHandlerSemaphore(bus, handler_mode) const baseline_in_use = semaphore.in_use @@ -480,7 +480,7 @@ for (const handler_mode of STEP1_HANDLER_MODES) { const bus = new EventBus(`TimeoutContentionBus-${handler_mode}`, { event_concurrency: 'bus-serial', - handler_concurrency: handler_mode, + event_handler_concurrency: handler_mode, }) const semaphore = getHandlerSemaphore(bus, handler_mode) const baseline_in_use = semaphore.in_use @@ -491,7 +491,7 @@ for (const handler_mode of STEP1_HANDLER_MODES) { }) bus.on(ParentEvent, async (event) => { - const child = event.bus?.emit(ChildEvent({ event_timeout: 0.2, handler_concurrency: 'parallel' }))! + const child = event.bus?.emit(ChildEvent({ event_timeout: 0.2, event_handler_concurrency: 'parallel' }))! await child.done() return 'parent_main' }) @@ -522,7 +522,7 @@ for (const handler_mode of STEP1_HANDLER_MODES) { const bus = new EventBus(`TimeoutFollowupBus-${handler_mode}`, { event_concurrency: 'bus-serial', - handler_concurrency: handler_mode, + event_handler_concurrency: handler_mode, }) const semaphore = getHandlerSemaphore(bus, handler_mode) const baseline_in_use = semaphore.in_use @@ -586,7 +586,7 @@ for (const handler_mode of STEP1_HANDLER_MODES) { const bus = new EventBus(`NestedPermitBus-${handler_mode}`, { event_concurrency: 'bus-serial', - handler_concurrency: handler_mode, + event_handler_concurrency: handler_mode, }) const semaphore = getHandlerSemaphore(bus, handler_mode) const baseline_in_use = semaphore.in_use @@ -653,7 +653,7 @@ test('parent timeout cancels pending child handler results under serial handler const bus = new EventBus('TimeoutCancelBus', { event_concurrency: 'bus-serial', - handler_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', }) let child_runs = 0 @@ -729,7 +729,7 @@ test('multi-level timeout cascade with mixed cancellations', async () => { const bus = new EventBus('TimeoutCascadeBus', { event_concurrency: 'bus-serial', - handler_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', }) let queued_child: InstanceType | null = null @@ -890,7 +890,7 @@ test('three-level timeout cascade with per-level timeouts and cascading cancella const bus = new EventBus('Cascade3LevelBus', { event_concurrency: 'bus-serial', - handler_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', }) const execution_log: string[] = [] @@ -1187,7 +1187,7 @@ test('cancellation error chain preserves cause references through hierarchy', as const bus = new EventBus('ErrorChainBus', { event_concurrency: 'bus-serial', - handler_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', }) let inner_ref: InstanceType | null = null @@ -1270,7 +1270,7 @@ test('parent timeout cancels children that have no timeout of their own', async const bus = new EventBus('TimeoutBoundaryBus', { event_concurrency: 'bus-serial', - handler_concurrency: 'bus-serial', + event_handler_concurrency: 'bus-serial', event_timeout: null, // no bus-level default }) diff --git a/bubus-ts/tests/typed_results.test.ts b/bubus-ts/tests/typed_results.test.ts index f498349..acec6fa 100644 --- a/bubus-ts/tests/typed_results.test.ts +++ b/bubus-ts/tests/typed_results.test.ts @@ -34,6 +34,35 @@ const ComplexResultEvent = BaseEvent.extend('ComplexResultEvent', { const NoSchemaEvent = BaseEvent.extend('NoSchemaEvent', {}) +const AutoObjectResultEvent = BaseEvent.extend('AutoObjectResultEvent', { + event_result_schema: z.object({ ok: z.boolean() }), +}) + +const AutoRecordResultEvent = BaseEvent.extend('AutoRecordResultEvent', { + event_result_schema: z.record(z.string(), z.number()), +}) + +const AutoMapResultEvent = BaseEvent.extend('AutoMapResultEvent', { + event_result_schema: z.map(z.string(), z.number()), +}) + +const AutoStringResultEvent = BaseEvent.extend('AutoStringResultEvent', { + event_result_schema: z.string(), +}) + +const AutoNumberResultEvent = BaseEvent.extend('AutoNumberResultEvent', { + event_result_schema: z.number(), +}) + +const AutoBooleanResultEvent = BaseEvent.extend('AutoBooleanResultEvent', { + event_result_schema: z.boolean(), +}) + +const ExplicitTypeWinsEvent = BaseEvent.extend('ExplicitTypeWinsEvent', { + event_result_schema: z.string(), + event_result_type: 'CustomResultType', +}) + test('typed result schema validates and parses handler result', async () => { const bus = new EventBus('TypedResultBus') @@ -111,6 +140,27 @@ test('complex result schema validates nested data', async () => { assert.deepEqual(result.result, { items: ['a', 'b'], metadata: { a: 1, b: 2 } }) }) +test('event_result_type auto-infers from common event_result_schema types', () => { + assert.equal(AutoObjectResultEvent.event_result_type, 'object') + assert.equal(AutoRecordResultEvent.event_result_type, 'object') + assert.equal(AutoMapResultEvent.event_result_type, 'object') + assert.equal(AutoStringResultEvent.event_result_type, 'string') + assert.equal(AutoNumberResultEvent.event_result_type, 'number') + assert.equal(AutoBooleanResultEvent.event_result_type, 'boolean') + + assert.equal(AutoObjectResultEvent({}).event_result_type, 'object') + assert.equal(AutoRecordResultEvent({}).event_result_type, 'object') + assert.equal(AutoMapResultEvent({}).event_result_type, 'object') + assert.equal(AutoStringResultEvent({}).event_result_type, 'string') + assert.equal(AutoNumberResultEvent({}).event_result_type, 'number') + assert.equal(AutoBooleanResultEvent({}).event_result_type, 'boolean') +}) + +test('explicit event_result_type is not overridden by inference', () => { + assert.equal(ExplicitTypeWinsEvent.event_result_type, 'CustomResultType') + assert.equal(ExplicitTypeWinsEvent({}).event_result_type, 'CustomResultType') +}) + test('fromJSON converts event_result_schema into zod schema', async () => { const bus = new EventBus('FromJsonResultBus') From daa7dabcb00ad3811e051018f6a31d19363bb045 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Sat, 7 Feb 2026 19:59:52 -0800 Subject: [PATCH 60/79] make awaiting event wait for processing on all busses, raise ExceptionGroup for multiple exceptions --- bubus/models.py | 97 +++++++++++-- bubus/service.py | 100 ++++++++++++- tests/test_comprehensive_patterns.py | 63 ++++++++- tests/test_eventbus.py | 91 ++++++++++++ tests/test_find.py | 125 +++++++++++++++++ tests/test_handler_timeout.py | 203 +++++++++++++++++++++++++++ tests/test_name_conflict_gc.py | 91 +++++++++++- tests/test_stress_20k_events.py | 189 +++++++++++++++++++++++++ 8 files changed, 940 insertions(+), 19 deletions(-) diff --git a/bubus/models.py b/bubus/models.py index 870fd79..b384506 100644 --- a/bubus/models.py +++ b/bubus/models.py @@ -300,6 +300,23 @@ def _remove_self_from_queue(self, bus: 'EventBus') -> bool: return True return False + def _is_queued_on_any_bus(self) -> bool: + """ + Check whether this event is currently queued on any live EventBus. + + This prevents premature completion when an event has been forwarded to + another bus but that bus hasn't processed it yet. + """ + from bubus.service import EventBus + + for bus in list(EventBus.all_instances): + if not bus or not bus.event_queue or not hasattr(bus.event_queue, '_queue'): + continue + queue = cast(deque[BaseEvent[Any]], bus.event_queue._queue) # type: ignore[attr-defined] + if self in queue: + return True + return False + async def _process_self_on_all_buses(self) -> None: """ Process this specific event on all buses where it's queued. @@ -576,18 +593,18 @@ async def event_results_filtered( } if raise_if_any and error_results: - failing_handler, failing_result = list(error_results.items())[0] # throw first error - original_error = failing_result.error or cast(Any, failing_result.result) - - # Log the handler context information instead of wrapping the exception - logger.debug(f'Event handler {failing_handler}({self}) returned an error -> {original_error}') - - # Re-raise the original exception to preserve its type and structured data - if isinstance(original_error, BaseException): - raise original_error - else: - # Fallback for non-exception errors (shouldn't happen in practice) - raise Exception(str(original_error)) + if len(error_results) == 1: + single_result = next(iter(error_results.values())) + single_error = single_result.error or cast(Any, single_result.result) + if isinstance(single_error, BaseException): + raise single_error + raise Exception(str(single_error)) + + collected_errors = self._collect_handler_errors(include_cancelled=True) + raise ExceptionGroup( + f'Event {self.event_type}#{self.event_id[-4:]} had {len(collected_errors)} handler error(s)', + collected_errors, + ) if raise_if_none and not included_results: raise ValueError( @@ -602,6 +619,54 @@ async def event_results_filtered( return event_results_by_handler_id + async def raise_if_errors( + self, + timeout: float | None = None, + include_cancelled: bool = False, + ) -> None: + """ + Raise an ExceptionGroup containing all handler errors for this event. + + This waits for event completion, then aggregates handler failures from + event_results. By default, asyncio.CancelledError entries are ignored. + """ + assert self.event_completed_signal is not None, 'Event cannot be awaited outside of an async context' + await asyncio.wait_for(self.event_completed_signal.wait(), timeout=timeout or self.event_timeout) + + collected_errors = self._collect_handler_errors(include_cancelled=include_cancelled) + + if collected_errors: + raise ExceptionGroup( + f'Event {self.event_type}#{self.event_id[-4:]} had {len(collected_errors)} handler error(s)', + collected_errors, + ) + + def _collect_handler_errors(self, include_cancelled: bool) -> list[Exception]: + """Collect handler errors as Exception instances for aggregation.""" + collected_errors: list[Exception] = [] + for event_result in self.event_results.values(): + original_error = event_result.error + if original_error is None and isinstance(event_result.result, BaseException): + original_error = event_result.result + + if original_error is None: + continue + + if isinstance(original_error, asyncio.CancelledError) and not include_cancelled: + continue + + if isinstance(original_error, Exception): + collected_errors.append(original_error) + continue + + wrapped = RuntimeError( + f'Non-Exception handler error from {event_result.eventbus_name}.{event_result.handler_name}: ' + f'{type(original_error).__name__}: {original_error}' + ) + wrapped.__cause__ = original_error + collected_errors.append(wrapped) + return collected_errors + async def event_results_by_handler_id( self, timeout: float | None = None, @@ -783,6 +848,11 @@ def event_mark_complete_if_all_handlers_completed(self) -> None: # ) return + # Forwarded events may still be waiting in another bus queue. + # Don't mark complete until all queue copies have been consumed. + if self._is_queued_on_any_bus(): + return + # Recursively check if all child events are also complete if not self.event_are_all_children_complete(): # incomplete_children = [c for c in self.event_children if c.event_status != 'completed'] @@ -871,7 +941,8 @@ def event_bus(self) -> 'EventBus': def attr_name_allowed(key: str) -> bool: - return key in pydantic_builtin_attrs or key in event_builtin_attrs or key.startswith('_') + allowed_unprefixed_attrs = {'raise_if_errors'} + return key in pydantic_builtin_attrs or key in event_builtin_attrs or key.startswith('_') or key in allowed_unprefixed_attrs # PSA: All BaseEvent buil-in attrs and methods must be prefixed with "event_" in order to avoid clashing with data contents (which share a namespace with the metadata) diff --git a/bubus/service.py b/bubus/service.py index 076124d..44c893e 100644 --- a/bubus/service.py +++ b/bubus/service.py @@ -1063,8 +1063,13 @@ def close_with_cleanup() -> None: self._on_idle = asyncio.Event() self._on_idle.clear() # Start in a busy state unless we confirm queue is empty by running step() at least once - # Create and start the run loop task - self._runloop_task = loop.create_task(self._run_loop(), name=f'{self}._run_loop') + # Create and start the run loop task. + # Use a weakref-based runner so an unreferenced EventBus can be GC'd + # without requiring explicit stop(clear=True) by callers. + self._runloop_task = loop.create_task( + EventBus._run_loop_weak(weakref.ref(self)), + name=f'{self}._run_loop', + ) self._is_running = True except RuntimeError: # No event loop - will start when one becomes available @@ -1229,6 +1234,97 @@ async def _run_loop(self) -> None: # Don't call stop() here as it might create new tasks self._is_running = False + @staticmethod + async def _run_loop_weak(bus_ref: 'weakref.ReferenceType[EventBus]') -> None: + """ + Weakref-based run loop. + + Unlike a bound coroutine (self._run_loop), this runner avoids holding a + strong EventBus reference while idle, allowing unreferenced buses to be + garbage-collected naturally without an explicit stop(). + """ + try: + while True: + bus = bus_ref() + if bus is None or not bus._is_running: + break + + queue = bus.event_queue + on_idle = bus._on_idle + del bus + + if queue is None or on_idle is None: + await asyncio.sleep(0.01) + continue + + event: BaseEvent[Any] | None = None + try: + get_next_queued_event = asyncio.create_task(queue.get()) + if hasattr(get_next_queued_event, '_log_destroy_pending'): + get_next_queued_event._log_destroy_pending = False # type: ignore[attr-defined] + has_next_event, _pending = await asyncio.wait({get_next_queued_event}, timeout=0.1) + if not has_next_event: + get_next_queued_event.cancel() + bus = bus_ref() + if bus is None: + break + if bus._on_idle and bus.event_queue: + if not (bus.events_pending or bus.events_started or bus.event_queue.qsize()): + bus._on_idle.set() + del bus + continue + + event = await get_next_queued_event + except QueueShutDown: + break + except asyncio.CancelledError: + break + except RuntimeError as e: + if 'Event loop is closed' in str(e) or 'no running event loop' in str(e): + break + logger.exception(f'❌ Weak run loop runtime error: {type(e).__name__} {e}', exc_info=True) + continue + except Exception as e: + logger.exception(f'❌ Weak run loop error: {type(e).__name__} {e}', exc_info=True) + continue + + bus = bus_ref() + if bus is None: + try: + queue.task_done() + except Exception: + pass + break + + try: + if bus._on_idle: + bus._on_idle.clear() + + async with _get_global_lock(): + if event is not None: + await bus.handle_event(event) + queue.task_done() + + if bus._on_idle and bus.event_queue: + if not (bus.events_pending or bus.events_started or bus.event_queue.qsize()): + bus._on_idle.set() + except QueueShutDown: + break + except asyncio.CancelledError: + break + except RuntimeError as e: + if 'Event loop is closed' in str(e) or 'no running event loop' in str(e): + break + logger.exception(f'❌ Weak run loop runtime error: {type(e).__name__} {e}', exc_info=True) + except Exception as e: + logger.exception(f'❌ Weak run loop error: {type(e).__name__} {e}', exc_info=True) + finally: + del bus + finally: + bus = bus_ref() + if bus is not None: + bus._is_running = False + async def _get_next_event(self, wait_for_timeout: float = 0.1) -> 'BaseEvent[Any] | None': """Get the next event from the queue""" diff --git a/tests/test_comprehensive_patterns.py b/tests/test_comprehensive_patterns.py index cd86ae9..e8a5784 100644 --- a/tests/test_comprehensive_patterns.py +++ b/tests/test_comprehensive_patterns.py @@ -81,9 +81,13 @@ async def parent_bus1_handler(event: ParentEvent) -> str: print(' Handlers that processed this event:') for result in child_event_sync.event_results.values(): print(f' - {result.handler_name} (bus: {result.eventbus_name})') - # The event was processed by bus1 using bus2.dispatch handler + # The event was forwarded from bus1 and processed by bus2. assert any( - 'bus2' in result.handler_name and 'dispatch' in result.handler_name + result.eventbus_name == 'bus1' and 'dispatch' in result.handler_name + for result in child_event_sync.event_results.values() + ) + assert any( + result.eventbus_name == 'bus2' and 'child_bus2_event_handler' in result.handler_name for result in child_event_sync.event_results.values() ) print(' Event was successfully forwarded to bus2') @@ -112,6 +116,9 @@ async def parent_bus1_handler(event: ParentEvent) -> str: await bus1.wait_until_idle() await bus2.wait_until_idle() + # This is a happy-path test: no handler should have errored. + assert all(result.error is None for result in parent_event.event_results.values()), parent_event.event_results + # Verify all child events have correct parent print('\n5. Verifying all events have correct parent...') all_events = list(bus1.event_history.values()) @@ -175,6 +182,47 @@ async def parent_bus1_handler(event: ParentEvent) -> str: await bus2.stop(clear=True) +async def test_await_forwarded_event_waits_for_target_bus_handlers(): + """ + Awaiting a dispatched event on source bus must wait for forwarded target-bus + handlers too, not only the source forwarding handler. + """ + bus_src = EventBus(name='ForwardWaitSrc') + bus_dst = EventBus(name='ForwardWaitDst') + + class ForwardedEvent(BaseEvent[str]): + pass + + target_started = asyncio.Event() + target_finished = asyncio.Event() + + async def target_handler(event: ForwardedEvent) -> str: + target_started.set() + await asyncio.sleep(0.05) + target_finished.set() + return 'target_done' + + bus_src.on('*', bus_dst.dispatch) + bus_dst.on(ForwardedEvent, target_handler) + + try: + t0 = asyncio.get_running_loop().time() + event = await bus_src.dispatch(ForwardedEvent()) + elapsed = asyncio.get_running_loop().time() - t0 + + assert target_started.is_set() + assert target_finished.is_set() + assert elapsed >= 0.04 + assert any( + result.eventbus_name == 'ForwardWaitDst' and result.handler_name.endswith('target_handler') + for result in event.event_results.values() + ), event.event_results + assert all(result.status in ('completed', 'error') for result in event.event_results.values()) + finally: + await bus_src.stop(clear=True) + await bus_dst.stop(clear=True) + + async def test_race_condition_stress(): """Stress test to ensure no race conditions.""" print('\n=== Test Race Condition Stress ===') @@ -727,6 +775,7 @@ async def test_multiple_awaits_same_event(): bus = EventBus(name='MultiAwaitBus', max_history_size=100) execution_order: list[str] = [] await_results: list[str] = [] + child_ref: BaseEvent[str] | None = None class Event1(BaseEvent[str]): pass @@ -738,10 +787,12 @@ class ChildEvent(BaseEvent[str]): pass async def event1_handler(event: Event1) -> str: + nonlocal child_ref execution_order.append('Event1_start') # Dispatch child child = bus.dispatch(ChildEvent()) + child_ref = child # Create multiple concurrent awaits on the same child async def await_child(name: str): @@ -788,13 +839,19 @@ async def child_handler(event: ChildEvent) -> str: assert 'await1_completed' in await_results assert 'await2_completed' in await_results - # Child should have executed before Event1 ended + # Child should have executed exactly once and before Event1 ended + assert execution_order.count('Child_start') == 1 + assert execution_order.count('Child_end') == 1 assert 'Child_start' in execution_order assert 'Child_end' in execution_order child_end_idx = execution_order.index('Child_end') event1_end_idx = execution_order.index('Event1_end') assert child_end_idx < event1_end_idx + # Child event should have exactly one handler result (no double-run). + assert child_ref is not None + assert len(child_ref.event_results) == 1 + # E2 should NOT have executed yet assert 'Event2_start' not in execution_order, \ f'E2 should NOT have started. Order: {execution_order}' diff --git a/tests/test_eventbus.py b/tests/test_eventbus.py index 10453e3..f90c0b1 100644 --- a/tests/test_eventbus.py +++ b/tests/test_eventbus.py @@ -483,6 +483,97 @@ async def working_handler(event: BaseEvent) -> str: assert working_result.result == 'worked' assert results == ['success'] + async def test_raise_if_errors_raises_exception_group_with_all_handler_errors(self, eventbus): + """raise_if_errors() should aggregate all handler failures into ExceptionGroup.""" + + async def failing_handler_one(event: BaseEvent) -> str: + raise ValueError('first failure') + + async def failing_handler_two(event: BaseEvent) -> str: + raise RuntimeError('second failure') + + async def working_handler(event: BaseEvent) -> str: + return 'worked' + + eventbus.on('UserActionEvent', failing_handler_one) + eventbus.on('UserActionEvent', failing_handler_two) + eventbus.on('UserActionEvent', working_handler) + + event = await eventbus.dispatch(UserActionEvent(action='test', user_id='u1')) + + with pytest.raises(ExceptionGroup) as exc_info: + await event.raise_if_errors() + + grouped_errors = exc_info.value.exceptions + assert len(grouped_errors) == 2 + assert {type(err) for err in grouped_errors} == {ValueError, RuntimeError} + assert {'first failure', 'second failure'} == {str(err) for err in grouped_errors} + + async def test_raise_if_errors_waits_for_completion(self, eventbus): + """raise_if_errors() should wait for completion when called on pending events.""" + handler_started = asyncio.Event() + + async def delayed_failure(event: BaseEvent) -> str: + handler_started.set() + await asyncio.sleep(0.02) + raise ValueError('delayed failure') + + eventbus.on('UserActionEvent', delayed_failure) + + event = eventbus.dispatch(UserActionEvent(action='test', user_id='u1')) + await handler_started.wait() + + with pytest.raises(ExceptionGroup) as exc_info: + await event.raise_if_errors(timeout=1) + + assert len(exc_info.value.exceptions) == 1 + assert isinstance(exc_info.value.exceptions[0], ValueError) + + async def test_raise_if_errors_noop_when_no_errors(self, eventbus): + """raise_if_errors() should return normally when no handler failed.""" + + async def working_handler(event: BaseEvent) -> str: + return 'ok' + + eventbus.on('UserActionEvent', working_handler) + + event = await eventbus.dispatch(UserActionEvent(action='test', user_id='u1')) + await event.raise_if_errors() + + async def test_event_result_raises_exception_group_when_multiple_handlers_fail(self, eventbus): + """event_result() should raise ExceptionGroup when multiple handler failures exist.""" + + async def failing_handler_one(event: BaseEvent) -> str: + raise ValueError('first failure') + + async def failing_handler_two(event: BaseEvent) -> str: + raise RuntimeError('second failure') + + eventbus.on('UserActionEvent', failing_handler_one) + eventbus.on('UserActionEvent', failing_handler_two) + + event = await eventbus.dispatch(UserActionEvent(action='test', user_id='u1')) + + with pytest.raises(ExceptionGroup) as exc_info: + await event.event_result() + + grouped_errors = exc_info.value.exceptions + assert len(grouped_errors) == 2 + assert {type(err) for err in grouped_errors} == {ValueError, RuntimeError} + + async def test_event_result_single_handler_error_raises_original_exception(self, eventbus): + """event_result() should preserve original exception type when only one handler fails.""" + + async def failing_handler(event: BaseEvent) -> str: + raise ValueError('single failure') + + eventbus.on('UserActionEvent', failing_handler) + + event = await eventbus.dispatch(UserActionEvent(action='test', user_id='u1')) + + with pytest.raises(ValueError, match='single failure'): + await event.event_result() + class TestBatchOperations: """Test batch event operations""" diff --git a/tests/test_find.py b/tests/test_find.py index 6c4e574..bce08d7 100644 --- a/tests/test_find.py +++ b/tests/test_find.py @@ -373,6 +373,36 @@ async def test_returns_most_recent_match(self): finally: await bus.stop(clear=True) + async def test_past_ignores_in_progress_until_event_completes(self): + """History search should only return completed events, never in-progress ones.""" + bus = EventBus() + + try: + release_handler = asyncio.Event() + + async def slow_handler(event: ParentEvent) -> str: + await release_handler.wait() + return 'done' + + bus.on(ParentEvent, slow_handler) + + dispatched = bus.dispatch(ParentEvent()) + await asyncio.sleep(0.02) # Let handler start. + + # In-progress event should not be returned by history search. + found_while_running = await bus.find(ParentEvent, past=True, future=False) + assert found_while_running is None + + release_handler.set() + await dispatched + await bus.wait_until_idle() + + found_after_completion = await bus.find(ParentEvent, past=True, future=False) + assert found_after_completion is not None + assert found_after_completion.event_id == dispatched.event_id + finally: + await bus.stop(clear=True) + class TestFindFutureOnly: """Tests for find(past=False, future=...) - equivalent to expect().""" @@ -435,6 +465,101 @@ async def test_ignores_past_events(self): finally: await bus.stop(clear=True) + async def test_future_works_with_string_event_type(self): + """find('EventName', ...) resolves using string keys, not just model classes.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + async def dispatch_after_delay(): + await asyncio.sleep(0.05) + return await bus.dispatch(ParentEvent()) + + find_task = asyncio.create_task(bus.find('ParentEvent', past=False, future=1)) + dispatch_task = asyncio.create_task(dispatch_after_delay()) + + found, dispatched = await asyncio.gather(find_task, dispatch_task) + + assert found is not None + assert found.event_id == dispatched.event_id + assert found.event_type == 'ParentEvent' + finally: + await bus.stop(clear=True) + + async def test_multiple_concurrent_find_waiters_resolve_correct_events(self): + """Concurrent find() waiters should each resolve to the correct event.""" + bus = EventBus() + + try: + # Keep one permanent handler so we can assert temporary find handlers are cleaned up. + bus.on(ScreenshotEvent, lambda e: 'done') + baseline_handler_count = len(bus.handlers.get('ScreenshotEvent', [])) + + wait_for_a = asyncio.create_task( + bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'tab-a', + past=False, + future=1, + ) + ) + wait_for_b = asyncio.create_task( + bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == 'tab-b', + past=False, + future=1, + ) + ) + + await asyncio.sleep(0.02) + event_a = await bus.dispatch(ScreenshotEvent(target_id='tab-a')) + event_b = await bus.dispatch(ScreenshotEvent(target_id='tab-b')) + + found_a, found_b = await asyncio.gather(wait_for_a, wait_for_b) + + assert found_a is not None + assert found_b is not None + assert found_a.event_id == event_a.event_id + assert found_b.event_id == event_b.event_id + + # All temporary find handlers should be removed. + assert len(bus.handlers.get('ScreenshotEvent', [])) == baseline_handler_count + finally: + await bus.stop(clear=True) + + async def test_find_future_resolves_before_handlers_complete(self): + """find(future=...) resolves on dispatch, before slow handlers complete.""" + bus = EventBus() + + try: + processing_complete = False + + async def slow_handler(event: ParentEvent) -> str: + nonlocal processing_complete + await asyncio.sleep(0.1) + processing_complete = True + return 'done' + + bus.on(ParentEvent, slow_handler) + + find_task = asyncio.create_task(bus.find(ParentEvent, past=False, future=1)) + await asyncio.sleep(0.01) + + dispatched = bus.dispatch(ParentEvent()) + found = await find_task + + assert found is not None + assert found.event_id == dispatched.event_id + assert processing_complete is False + assert found.event_status in ('pending', 'started') + + await bus.wait_until_idle() + assert processing_complete is True + finally: + await bus.stop(clear=True) + class TestFindNeitherPastNorFuture: """Tests for find(past=False, future=False) - should return None.""" diff --git a/tests/test_handler_timeout.py b/tests/test_handler_timeout.py index 3952429..8b7ba77 100644 --- a/tests/test_handler_timeout.py +++ b/tests/test_handler_timeout.py @@ -176,3 +176,206 @@ async def test_nested_timeout_scenario_from_issue(): # # assert 'ChildEvent' in str(exc_info.value) or 'ChildEvent' in str(exc_info.value) await bus.stop(clear=True, timeout=0) + + +@pytest.mark.asyncio +async def test_handler_timeout_marks_error_and_other_handlers_still_complete(): + """Focused timeout behavior: one handler times out, another still completes.""" + bus = EventBus(name='TimeoutFocusedBus') + + class TimeoutFocusedEvent(BaseEvent[str]): + event_timeout: float | None = 0.01 + + execution_order: list[str] = [] + + async def slow_handler(event: TimeoutFocusedEvent) -> str: + execution_order.append('slow_start') + await asyncio.sleep(0.05) + execution_order.append('slow_end') + return 'slow' + + async def fast_handler(event: TimeoutFocusedEvent) -> str: + execution_order.append('fast_start') + return 'fast' + + bus.on(TimeoutFocusedEvent, slow_handler) + bus.on(TimeoutFocusedEvent, fast_handler) + + try: + event = await bus.dispatch(TimeoutFocusedEvent()) + await bus.wait_until_idle() + + slow_result = next((r for r in event.event_results.values() if r.handler_name.endswith('slow_handler')), None) + fast_result = next((r for r in event.event_results.values() if r.handler_name.endswith('fast_handler')), None) + + assert slow_result is not None + assert slow_result.status == 'error' + assert isinstance(slow_result.error, TimeoutError) + + assert fast_result is not None + assert fast_result.status == 'completed' + assert fast_result.result == 'fast' + assert 'fast_start' in execution_order + finally: + await bus.stop(clear=True, timeout=0) + + +@pytest.mark.asyncio +async def test_multi_bus_timeout_is_recorded_on_target_bus(): + """Closest Python equivalent: same event dispatched to two buses, timeout on target bus is captured.""" + bus_a = EventBus(name='MultiTimeoutA') + bus_b = EventBus(name='MultiTimeoutB') + + class MultiBusTimeoutEvent(BaseEvent[str]): + event_timeout: float | None = 0.01 + + async def slow_target_handler(event: MultiBusTimeoutEvent) -> str: + await asyncio.sleep(0.05) + return 'slow' + + bus_b.on(MultiBusTimeoutEvent, slow_target_handler) + + try: + event = MultiBusTimeoutEvent() + bus_a.dispatch(event) + bus_b.dispatch(event) + await bus_b.wait_until_idle() + + bus_b_result = next((r for r in event.event_results.values() if r.eventbus_name == bus_b.name), None) + assert bus_b_result is not None + assert bus_b_result.status == 'error' + assert isinstance(bus_b_result.error, TimeoutError) + assert event.event_path == ['MultiTimeoutA', 'MultiTimeoutB'] + finally: + await bus_a.stop(clear=True, timeout=0) + await bus_b.stop(clear=True, timeout=0) + + +@pytest.mark.asyncio +async def test_followup_event_runs_after_parent_timeout_in_queue_jump_path(): + """ + Regression guard: timeout in a handler that awaited a child event should not + stall subsequent events on the same bus. + """ + bus = EventBus(name='TimeoutQueueJumpFollowupBus') + + class ParentEvent(BaseEvent[str]): + event_timeout: float | None = 0.02 + + class ChildEvent(BaseEvent[str]): + event_timeout: float | None = 0.2 + + class TailEvent(BaseEvent[str]): + event_timeout: float | None = 0.2 + + tail_runs = 0 + + async def child_handler(event: ChildEvent) -> str: + await asyncio.sleep(0.001) + return 'child_done' + + async def parent_handler(event: ParentEvent) -> str: + child = bus.dispatch(ChildEvent()) + await child + await asyncio.sleep(0.05) # Exceeds parent timeout + return 'parent_done' + + async def tail_handler(event: TailEvent) -> str: + nonlocal tail_runs + tail_runs += 1 + return 'tail_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, child_handler) + bus.on(TailEvent, tail_handler) + + try: + parent = await bus.dispatch(ParentEvent()) + await bus.wait_until_idle() + + parent_result = next(iter(parent.event_results.values())) + assert parent_result.status == 'error' + assert isinstance(parent_result.error, TimeoutError) + + tail = bus.dispatch(TailEvent()) + completed_tail = await asyncio.wait_for(tail, timeout=1.0) + assert completed_tail.event_status == 'completed' + assert tail_runs == 1 + finally: + await bus.stop(clear=True, timeout=0) + + +@pytest.mark.asyncio +async def test_forwarded_timeout_path_does_not_stall_followup_events(): + """ + Regression guard: if a forwarded awaited child times out, subsequent events + should still run on both source and target buses. + """ + bus_a = EventBus(name='TimeoutForwardA') + bus_b = EventBus(name='TimeoutForwardB') + + class ParentEvent(BaseEvent[str]): + event_timeout: float | None = 0.02 + + class ChildEvent(BaseEvent[str]): + event_timeout: float | None = 0.01 + + class TailEvent(BaseEvent[str]): + event_timeout: float | None = 0.2 + + bus_a_tail_runs = 0 + bus_b_tail_runs = 0 + child_ref: ChildEvent | None = None + + async def parent_handler(event: ParentEvent) -> str: + nonlocal child_ref + child = bus_a.dispatch(ChildEvent()) + child_ref = child + await child + return 'parent_done' + + async def slow_child_handler(event: ChildEvent) -> str: + await asyncio.sleep(0.05) # Guaranteed timeout on child. + return 'child_done' + + async def tail_handler_a(event: TailEvent) -> str: + nonlocal bus_a_tail_runs + bus_a_tail_runs += 1 + return 'tail_a' + + async def tail_handler_b(event: TailEvent) -> str: + nonlocal bus_b_tail_runs + bus_b_tail_runs += 1 + return 'tail_b' + + bus_a.on(ParentEvent, parent_handler) + bus_a.on(TailEvent, tail_handler_a) + bus_a.on('*', bus_b.dispatch) + bus_b.on(ChildEvent, slow_child_handler) + bus_b.on(TailEvent, tail_handler_b) + + try: + parent = await bus_a.dispatch(ParentEvent()) + await bus_a.wait_until_idle() + await bus_b.wait_until_idle() + + parent_result = next(iter(parent.event_results.values())) + assert parent_result.status == 'completed' + + assert child_ref is not None + assert any( + isinstance(result.error, TimeoutError) for result in child_ref.event_results.values() + ), child_ref.event_results + + # Lock/queue state should remain healthy after timeout. + tail = bus_a.dispatch(TailEvent()) + completed_tail = await asyncio.wait_for(tail, timeout=1.0) + await bus_a.wait_until_idle() + await bus_b.wait_until_idle() + + assert completed_tail.event_status == 'completed' + assert bus_a_tail_runs == 1 + assert bus_b_tail_runs == 1 + finally: + await bus_a.stop(clear=True, timeout=0) + await bus_b.stop(clear=True, timeout=0) diff --git a/tests/test_name_conflict_gc.py b/tests/test_name_conflict_gc.py index d136623..0f1448f 100644 --- a/tests/test_name_conflict_gc.py +++ b/tests/test_name_conflict_gc.py @@ -6,11 +6,12 @@ name conflicts when creating new instances with the same name. """ +import asyncio import weakref import pytest -from bubus import EventBus +from bubus import BaseEvent, EventBus class TestNameConflictGC: @@ -174,3 +175,91 @@ def test_concurrent_name_creation(self): assert bus1.name == 'ConcurrentTest' assert bus2.name.startswith('ConcurrentTest_') assert bus2.name != bus1.name + + @pytest.mark.asyncio + async def test_unreferenced_buses_with_history_can_be_cleaned_without_instance_leak(self): + """ + Buses with populated history may outlive local scope while runloops are still active, + but they must be releasable via explicit cleanup without leaking all_instances. + """ + import gc + + class GcHistoryEvent(BaseEvent[str]): + pass + + baseline_instances = len(EventBus.all_instances) + refs: list[weakref.ReferenceType[EventBus]] = [] + + async def create_and_fill_bus(index: int) -> weakref.ReferenceType[EventBus]: + bus = EventBus(name=f'GCNoStopBus_{index}') + bus.on(GcHistoryEvent, lambda e: 'ok') + for _ in range(40): + await bus.dispatch(GcHistoryEvent()) + await bus.wait_until_idle() + return weakref.ref(bus) + + for i in range(30): + refs.append(await create_and_fill_bus(i)) + + # Encourage GC/finalization first (best effort without explicit stop()). + for _ in range(20): + gc.collect() + await asyncio.sleep(0.02) + + alive_buses = [ref() for ref in refs if ref() is not None] + still_live = [bus for bus in alive_buses if bus is not None] + + # Deterministically clean up anything still alive. + for bus in still_live: + await bus.stop(clear=True, timeout=0) + # Loop variable keeps a strong ref to the last bus in CPython. + if still_live: + del bus + del still_live + del alive_buses + + # Final GC and WeakSet purge. + for _ in range(10): + gc.collect() + await asyncio.sleep(0.01) + _ = list(EventBus.all_instances) + + assert all(ref() is None for ref in refs), 'all buses should be collectable after cleanup' + assert len(EventBus.all_instances) <= baseline_instances + + @pytest.mark.asyncio + async def test_unreferenced_buses_with_history_are_collected_without_stop(self): + """ + Unreferenced buses should be collectable without explicit stop(clear=True), + even after processing events and populating history. + """ + import gc + + class GcImplicitEvent(BaseEvent[str]): + pass + + baseline_instances = len(EventBus.all_instances) + refs: list[weakref.ReferenceType[EventBus]] = [] + + async def create_and_fill_bus(index: int) -> weakref.ReferenceType[EventBus]: + bus = EventBus(name=f'GCImplicitNoStop_{index}') + bus.on(GcImplicitEvent, lambda e: 'ok') + for _ in range(30): + await bus.dispatch(GcImplicitEvent()) + await bus.wait_until_idle() + return weakref.ref(bus) + + for i in range(20): + refs.append(await create_and_fill_bus(i)) + + for _ in range(80): + gc.collect() + await asyncio.sleep(0.02) + if all(ref() is None for ref in refs): + break + + # Force WeakSet iteration to purge any dead refs. + _ = list(EventBus.all_instances) + + assert all(ref() is None for ref in refs), 'all unreferenced buses should be collected without stop()' + assert len(EventBus.all_instances) <= baseline_instances diff --git a/tests/test_stress_20k_events.py b/tests/test_stress_20k_events.py index aea78c8..0f14cfe 100644 --- a/tests/test_stress_20k_events.py +++ b/tests/test_stress_20k_events.py @@ -241,3 +241,192 @@ async def slow_handler(event: BaseEvent) -> None: finally: # Properly stop the bus to clean up pending tasks await bus.stop(timeout=0, clear=True) # Don't wait, just force cleanup + + +@pytest.mark.asyncio +async def test_ephemeral_buses_with_forwarding_churn(): + """ + Closest Python equivalent to request-scoped bus churn: + create short-lived buses, forward between them, process events, then clear. + """ + total_bus_pairs = 60 + events_per_pair = 20 + total_events = total_bus_pairs * events_per_pair + initial_instances = len(EventBus.all_instances) + + handled_a = 0 + handled_b = 0 + + start = time.time() + + for idx in range(total_bus_pairs): + bus_a = EventBus(name=f'EphemeralA_{idx}_{os.getpid()}', middlewares=[]) + bus_b = EventBus(name=f'EphemeralB_{idx}_{os.getpid()}', middlewares=[]) + + async def handler_a(event: SimpleEvent) -> None: + nonlocal handled_a + handled_a += 1 + + async def handler_b(event: SimpleEvent) -> None: + nonlocal handled_b + handled_b += 1 + + bus_a.on(SimpleEvent, handler_a) + bus_b.on(SimpleEvent, handler_b) + bus_a.on('*', bus_b.dispatch) + + try: + pending = [bus_a.dispatch(SimpleEvent()) for _ in range(events_per_pair)] + await asyncio.gather(*pending) + await bus_a.wait_until_idle() + await bus_b.wait_until_idle() + + assert bus_a.max_history_size is None or len(bus_a.event_history) <= bus_a.max_history_size + assert bus_b.max_history_size is None or len(bus_b.event_history) <= bus_b.max_history_size + finally: + await bus_a.stop(timeout=0, clear=True) + await bus_b.stop(timeout=0, clear=True) + + duration = time.time() - start + gc.collect() + + assert handled_a == total_events + assert handled_b == total_events + assert len(EventBus.all_instances) <= initial_instances + assert duration < 60, f'Ephemeral bus churn took too long: {duration:.2f}s' + + +@pytest.mark.asyncio +async def test_forwarding_queue_jump_timeout_mix_stays_stable(): + """ + Stress a mixed path in Python: + parent handler awaits forwarded child events, with intermittent child timeouts. + """ + class MixedParentEvent(BaseEvent): + iteration: int = 0 + event_timeout: float | None = 0.2 + + class MixedChildEvent(BaseEvent): + iteration: int = 0 + event_timeout: float | None = 0.05 + + history_limit = 500 + total_iterations = 300 + + bus_a = EventBus(name='MixedPathA', max_history_size=history_limit, middlewares=[]) + bus_b = EventBus(name='MixedPathB', max_history_size=history_limit, middlewares=[]) + + parent_handled = 0 + child_handled = 0 + child_events: list[MixedChildEvent] = [] + + async def child_handler(event: MixedChildEvent) -> str: + nonlocal child_handled + child_handled += 1 + if event.iteration % 7 == 0: + await asyncio.sleep(0.01) + else: + await asyncio.sleep(0.0005) + return 'child_done' + + async def parent_handler(event: MixedParentEvent) -> str: + nonlocal parent_handled + parent_handled += 1 + + child_timeout = 0.001 if event.iteration % 7 == 0 else 0.05 + child = bus_a.dispatch(MixedChildEvent(iteration=event.iteration, event_timeout=child_timeout)) + bus_b.dispatch(child) + child_events.append(child) + await child + return 'parent_done' + + bus_a.on(MixedParentEvent, parent_handler) + bus_b.on(MixedChildEvent, child_handler) + + start = time.time() + try: + for i in range(total_iterations): + await bus_a.dispatch(MixedParentEvent(iteration=i)) + + await bus_a.wait_until_idle() + await bus_b.wait_until_idle() + finally: + await bus_a.stop(timeout=0, clear=True) + await bus_b.stop(timeout=0, clear=True) + + duration = time.time() - start + + assert parent_handled == total_iterations + assert child_handled == total_iterations + timeout_count = sum( + 1 + for child in child_events + if any(isinstance(result.error, TimeoutError) for result in child.event_results.values()) + ) + assert timeout_count > 0 + assert len(bus_a.event_history) <= history_limit + assert len(bus_b.event_history) <= history_limit + assert duration < 60, f'Mixed forwarding/queue-jump/timeout path took too long: {duration:.2f}s' + + +@pytest.mark.asyncio +async def test_history_bound_is_strict_after_idle(): + """After steady-state processing, history should stay within max_history_size.""" + bus = EventBus(name='StrictHistoryBound', max_history_size=25, middlewares=[]) + + async def handler(event: SimpleEvent) -> None: + return None + + bus.on(SimpleEvent, handler) + + try: + for _ in range(200): + await bus.dispatch(SimpleEvent()) + + await bus.wait_until_idle() + assert len(bus.event_history) <= 25 + finally: + await bus.stop(timeout=0, clear=True) + + +@pytest.mark.asyncio +async def test_basic_throughput_floor_regression_guard(): + """ + Throughput regression guard (Python-specific floor). + Keeps threshold conservative to avoid CI flakiness while still catching + severe slowdowns. + """ + bus = EventBus(name='ThroughputFloor', middlewares=[]) + + processed = 0 + + async def handler(event: SimpleEvent) -> None: + nonlocal processed + processed += 1 + + bus.on(SimpleEvent, handler) + + total_events = 5_000 + batch_size = 50 + pending: list[BaseEvent[Any]] = [] + + start = time.time() + try: + for _ in range(total_events): + pending.append(bus.dispatch(SimpleEvent())) + if len(pending) >= batch_size: + await asyncio.gather(*pending) + pending.clear() + + if pending: + await asyncio.gather(*pending) + + await bus.wait_until_idle() + finally: + await bus.stop(timeout=0, clear=True) + + duration = time.time() - start + rate = total_events / duration + + assert processed == total_events + assert rate >= 600, f'Throughput regression: {rate:.0f} events/sec (expected >= 600 events/sec)' From 4d9d88cfcf87ee94bbd50ec29d4a0573b0e89c93 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Sat, 7 Feb 2026 20:00:04 -0800 Subject: [PATCH 61/79] bump version --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 17b5c48..1ed8f9f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ name = "bubus" description = "Advanced Pydantic-powered event bus with async support" authors = [{ name = "Nick Sweeting" }] -version = "1.7.2" +version = "1.7.3" readme = "README.md" requires-python = ">=3.11,<4.0" classifiers = [ From 9b6d4bfb4b1fdb0fbe9112f122a431e78ce8e56f Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Sun, 8 Feb 2026 23:02:23 -0500 Subject: [PATCH 62/79] Update README.md --- README.md | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 0de9965..69b3892 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,29 @@ -# `bubus`: πŸ“’ Production-ready event bus library for Python +# `bubus`: πŸ“’ Production-ready event bus library for Python AND JS -Bubus is a simple in-memory event bus library for async Python. +Bubus is an in-memory event bus library for async Python and Typescript (both node & browser). -It's designed for quickly building event-driven applications with Python in a way that "just works" with async support, proper support for nested events, and real concurrency control. It's very similar to `EventEmitter` or [`emittery`](https://github.com/sindresorhus/emittery) in JS. +It's designed for quickly building resilient, predictable, complex event-driven applications in Python and JS. -It provides a [pydantic](https://docs.pydantic.dev/latest/)-based API for implementing publish-subscribe patterns with type safety, async/sync handler support, and advanced features like event forwarding between buses, parent event tracking, multiple execution strategies, and more. +It "just works" with an intuitive, but powerful event JSON format + dispatch API that's consistent across both languages and scales consistently from one even up to millions: +```python +bus.on(SomeEvent, some_function) +bus.emit(SomeEvent({some_data: 132})) +``` + +It's async native, has proper automatic nested event tracking, and powerful concurrency control options. The API is inspired by `EventEmitter` or [`emittery`](https://github.com/sindresorhus/emittery) in JS, but it takes it a step further: + +- nice Pydantic / Zod schemas for events that can be exchanged between both languages +- automatic UUIDv7s and monotonic nanosecond timestamps for ordering events globally +- built in locking options to force strict global FIFO procesing or fully parallel processing + +--- + +♾️ It's inspired by the simplicity of async and events in `JS` but with baked-in features that allow to eliminate most of the tedious repetitive complexity in event-driven codebases: -♾️ It's inspired by the simplicity of async and events in `JS`, we aim to bring a fully type-checked [`EventTarget`](https://developer.mozilla.org/en-US/docs/Web/API/EventTarget)-style API to Python. +- correct timeout enforcement across multiple levels of events, if a parent times out it correctly aborts all child event processing +- ability to strongly type hint and enforce the return type of event handlers at compile-time +- ability to queue events on the bus, or inline await them for immediate execution like a normal function call +- handles ~5,000 events/sec/core in both languages, with ~2kb/event RAM consumed per event during active processing
    @@ -15,7 +32,7 @@ It provides a [pydantic](https://docs.pydantic.dev/latest/)-based API for implem Install bubus and get started with a simple event-driven application: ```bash -pip install bubus +pip install bubus # see ./bubus-ts/README.md for JS instructions ``` ```python From fd7e6b5eacba3d57471b7c15aab8b76be8e82cfd Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Sun, 8 Feb 2026 23:06:51 -0500 Subject: [PATCH 63/79] Revise README to emphasize multi-language support Updated project description to reflect multi-language support. --- README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 69b3892..573e05b 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,7 @@ -# `bubus`: πŸ“’ Production-ready event bus library for Python AND JS +# `bubus`: πŸ“’ Production-ready multi-language event bus library + +image + Bubus is an in-memory event bus library for async Python and Typescript (both node & browser). From dc29a76b84e9a572b33d053cb12e8508508e420a Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Sun, 8 Feb 2026 23:07:31 -0500 Subject: [PATCH 64/79] Update README for brevity and clarity Shortened references to Typescript and applications. --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 573e05b..739a53b 100644 --- a/README.md +++ b/README.md @@ -3,9 +3,9 @@ image -Bubus is an in-memory event bus library for async Python and Typescript (both node & browser). +Bubus is an in-memory event bus library for async Python and TS (node/browser). -It's designed for quickly building resilient, predictable, complex event-driven applications in Python and JS. +It's designed for quickly building resilient, predictable, complex event-driven apps. It "just works" with an intuitive, but powerful event JSON format + dispatch API that's consistent across both languages and scales consistently from one even up to millions: ```python From 4228337af2d9240a6141be575f1bef1839d5d3a7 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Sun, 8 Feb 2026 23:07:58 -0500 Subject: [PATCH 65/79] Update README description for bubus Removed 'library' from the project description. --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 739a53b..e591d09 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,7 @@ -# `bubus`: πŸ“’ Production-ready multi-language event bus library +# `bubus`: πŸ“’ Production-ready multi-language event bus image - Bubus is an in-memory event bus library for async Python and TS (node/browser). It's designed for quickly building resilient, predictable, complex event-driven apps. From fd52a82c926e6641a5033a6d31d1a2504bb94463 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Sun, 8 Feb 2026 23:18:19 -0500 Subject: [PATCH 66/79] Update README with implementation badges Added badges for Python and TypeScript implementations, as well as NPM version. --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index e591d09..1fc2644 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,11 @@ # `bubus`: πŸ“’ Production-ready multi-language event bus +[![DeepWiki: Python](https://img.shields.io/badge/DeepWiki-bbus%2FPython-yellow.svg?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACwAAAAyCAYAAAAnWDnqAAAAAXNSR0IArs4c6QAAA05JREFUaEPtmUtyEzEQhtWTQyQLHNak2AB7ZnyXZMEjXMGeK/AIi+QuHrMnbChYY7MIh8g01fJoopFb0uhhEqqcbWTp06/uv1saEDv4O3n3dV60RfP947Mm9/SQc0ICFQgzfc4CYZoTPAswgSJCCUJUnAAoRHOAUOcATwbmVLWdGoH//PB8mnKqScAhsD0kYP3j/Yt5LPQe2KvcXmGvRHcDnpxfL2zOYJ1mFwrryWTz0advv1Ut4CJgf5uhDuDj5eUcAUoahrdY/56ebRWeraTjMt/00Sh3UDtjgHtQNHwcRGOC98BJEAEymycmYcWwOprTgcB6VZ5JK5TAJ+fXGLBm3FDAmn6oPPjR4rKCAoJCal2eAiQp2x0vxTPB3ALO2CRkwmDy5WohzBDwSEFKRwPbknEggCPB/imwrycgxX2NzoMCHhPkDwqYMr9tRcP5qNrMZHkVnOjRMWwLCcr8ohBVb1OMjxLwGCvjTikrsBOiA6fNyCrm8V1rP93iVPpwaE+gO0SsWmPiXB+jikdf6SizrT5qKasx5j8ABbHpFTx+vFXp9EnYQmLx02h1QTTrl6eDqxLnGjporxl3NL3agEvXdT0WmEost648sQOYAeJS9Q7bfUVoMGnjo4AZdUMQku50McDcMWcBPvr0SzbTAFDfvJqwLzgxwATnCgnp4wDl6Aa+Ax283gghmj+vj7feE2KBBRMW3FzOpLOADl0Isb5587h/U4gGvkt5v60Z1VLG8BhYjbzRwyQZemwAd6cCR5/XFWLYZRIMpX39AR0tjaGGiGzLVyhse5C9RKC6ai42ppWPKiBagOvaYk8lO7DajerabOZP46Lby5wKjw1HCRx7p9sVMOWGzb/vA1hwiWc6jm3MvQDTogQkiqIhJV0nBQBTU+3okKCFDy9WwferkHjtxib7t3xIUQtHxnIwtx4mpg26/HfwVNVDb4oI9RHmx5WGelRVlrtiw43zboCLaxv46AZeB3IlTkwouebTr1y2NjSpHz68WNFjHvupy3q8TFn3Hos2IAk4Ju5dCo8B3wP7VPr/FGaKiG+T+v+TQqIrOqMTL1VdWV1DdmcbO8KXBz6esmYWYKPwDL5b5FA1a0hwapHiom0r/cKaoqr+27/XcrS5UwSMbQAAAABJRU5ErkJggg==)](https://deepwiki.com/pirate/bbus) ![PyPI - Version](https://img.shields.io/pypi/v/bubus) ![GitHub License](https://img.shields.io/github/license/pirate/bbus) ![GitHub last commit](https://img.shields.io/github/last-commit/pirate/bbus) + +[![DeepWiki: TS](https://img.shields.io/badge/DeepWiki-bbus%2FTypescript-blue.svg?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACwAAAAyCAYAAAAnWDnqAAAAAXNSR0IArs4c6QAAA05JREFUaEPtmUtyEzEQhtWTQyQLHNak2AB7ZnyXZMEjXMGeK/AIi+QuHrMnbChYY7MIh8g01fJoopFb0uhhEqqcbWTp06/uv1saEDv4O3n3dV60RfP947Mm9/SQc0ICFQgzfc4CYZoTPAswgSJCCUJUnAAoRHOAUOcATwbmVLWdGoH//PB8mnKqScAhsD0kYP3j/Yt5LPQe2KvcXmGvRHcDnpxfL2zOYJ1mFwrryWTz0advv1Ut4CJgf5uhDuDj5eUcAUoahrdY/56ebRWeraTjMt/00Sh3UDtjgHtQNHwcRGOC98BJEAEymycmYcWwOprTgcB6VZ5JK5TAJ+fXGLBm3FDAmn6oPPjR4rKCAoJCal2eAiQp2x0vxTPB3ALO2CRkwmDy5WohzBDwSEFKRwPbknEggCPB/imwrycgxX2NzoMCHhPkDwqYMr9tRcP5qNrMZHkVnOjRMWwLCcr8ohBVb1OMjxLwGCvjTikrsBOiA6fNyCrm8V1rP93iVPpwaE+gO0SsWmPiXB+jikdf6SizrT5qKasx5j8ABbHpFTx+vFXp9EnYQmLx02h1QTTrl6eDqxLnGjporxl3NL3agEvXdT0WmEost648sQOYAeJS9Q7bfUVoMGnjo4AZdUMQku50McDcMWcBPvr0SzbTAFDfvJqwLzgxwATnCgnp4wDl6Aa+Ax283gghmj+vj7feE2KBBRMW3FzOpLOADl0Isb5587h/U4gGvkt5v60Z1VLG8BhYjbzRwyQZemwAd6cCR5/XFWLYZRIMpX39AR0tjaGGiGzLVyhse5C9RKC6ai42ppWPKiBagOvaYk8lO7DajerabOZP46Lby5wKjw1HCRx7p9sVMOWGzb/vA1hwiWc6jm3MvQDTogQkiqIhJV0nBQBTU+3okKCFDy9WwferkHjtxib7t3xIUQtHxnIwtx4mpg26/HfwVNVDb4oI9RHmx5WGelRVlrtiw43zboCLaxv46AZeB3IlTkwouebTr1y2NjSpHz68WNFjHvupy3q8TFn3Hos2IAk4Ju5dCo8B3wP7VPr/FGaKiG+T+v+TQqIrOqMTL1VdWV1DdmcbO8KXBz6esmYWYKPwDL5b5FA1a0hwapHiom0r/cKaoqr+27/XcrS5UwSMbQAAAABJRU5ErkJggg==)](https://deepwiki.com/pirate/bbus/3-typescript-implementation) ![NPM Version](https://img.shields.io/npm/v/bubus) + + + image Bubus is an in-memory event bus library for async Python and TS (node/browser). From da51cfd58035da4474da3e401d72107a7e3814a7 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Sun, 8 Feb 2026 23:18:47 -0500 Subject: [PATCH 67/79] Clean up README.md by removing duplicates Removed duplicate image and adjusted formatting. --- README.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 1fc2644..66316e9 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,11 @@ # `bubus`: πŸ“’ Production-ready multi-language event bus +image + [![DeepWiki: Python](https://img.shields.io/badge/DeepWiki-bbus%2FPython-yellow.svg?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACwAAAAyCAYAAAAnWDnqAAAAAXNSR0IArs4c6QAAA05JREFUaEPtmUtyEzEQhtWTQyQLHNak2AB7ZnyXZMEjXMGeK/AIi+QuHrMnbChYY7MIh8g01fJoopFb0uhhEqqcbWTp06/uv1saEDv4O3n3dV60RfP947Mm9/SQc0ICFQgzfc4CYZoTPAswgSJCCUJUnAAoRHOAUOcATwbmVLWdGoH//PB8mnKqScAhsD0kYP3j/Yt5LPQe2KvcXmGvRHcDnpxfL2zOYJ1mFwrryWTz0advv1Ut4CJgf5uhDuDj5eUcAUoahrdY/56ebRWeraTjMt/00Sh3UDtjgHtQNHwcRGOC98BJEAEymycmYcWwOprTgcB6VZ5JK5TAJ+fXGLBm3FDAmn6oPPjR4rKCAoJCal2eAiQp2x0vxTPB3ALO2CRkwmDy5WohzBDwSEFKRwPbknEggCPB/imwrycgxX2NzoMCHhPkDwqYMr9tRcP5qNrMZHkVnOjRMWwLCcr8ohBVb1OMjxLwGCvjTikrsBOiA6fNyCrm8V1rP93iVPpwaE+gO0SsWmPiXB+jikdf6SizrT5qKasx5j8ABbHpFTx+vFXp9EnYQmLx02h1QTTrl6eDqxLnGjporxl3NL3agEvXdT0WmEost648sQOYAeJS9Q7bfUVoMGnjo4AZdUMQku50McDcMWcBPvr0SzbTAFDfvJqwLzgxwATnCgnp4wDl6Aa+Ax283gghmj+vj7feE2KBBRMW3FzOpLOADl0Isb5587h/U4gGvkt5v60Z1VLG8BhYjbzRwyQZemwAd6cCR5/XFWLYZRIMpX39AR0tjaGGiGzLVyhse5C9RKC6ai42ppWPKiBagOvaYk8lO7DajerabOZP46Lby5wKjw1HCRx7p9sVMOWGzb/vA1hwiWc6jm3MvQDTogQkiqIhJV0nBQBTU+3okKCFDy9WwferkHjtxib7t3xIUQtHxnIwtx4mpg26/HfwVNVDb4oI9RHmx5WGelRVlrtiw43zboCLaxv46AZeB3IlTkwouebTr1y2NjSpHz68WNFjHvupy3q8TFn3Hos2IAk4Ju5dCo8B3wP7VPr/FGaKiG+T+v+TQqIrOqMTL1VdWV1DdmcbO8KXBz6esmYWYKPwDL5b5FA1a0hwapHiom0r/cKaoqr+27/XcrS5UwSMbQAAAABJRU5ErkJggg==)](https://deepwiki.com/pirate/bbus) ![PyPI - Version](https://img.shields.io/pypi/v/bubus) ![GitHub License](https://img.shields.io/github/license/pirate/bbus) ![GitHub last commit](https://img.shields.io/github/last-commit/pirate/bbus) [![DeepWiki: TS](https://img.shields.io/badge/DeepWiki-bbus%2FTypescript-blue.svg?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACwAAAAyCAYAAAAnWDnqAAAAAXNSR0IArs4c6QAAA05JREFUaEPtmUtyEzEQhtWTQyQLHNak2AB7ZnyXZMEjXMGeK/AIi+QuHrMnbChYY7MIh8g01fJoopFb0uhhEqqcbWTp06/uv1saEDv4O3n3dV60RfP947Mm9/SQc0ICFQgzfc4CYZoTPAswgSJCCUJUnAAoRHOAUOcATwbmVLWdGoH//PB8mnKqScAhsD0kYP3j/Yt5LPQe2KvcXmGvRHcDnpxfL2zOYJ1mFwrryWTz0advv1Ut4CJgf5uhDuDj5eUcAUoahrdY/56ebRWeraTjMt/00Sh3UDtjgHtQNHwcRGOC98BJEAEymycmYcWwOprTgcB6VZ5JK5TAJ+fXGLBm3FDAmn6oPPjR4rKCAoJCal2eAiQp2x0vxTPB3ALO2CRkwmDy5WohzBDwSEFKRwPbknEggCPB/imwrycgxX2NzoMCHhPkDwqYMr9tRcP5qNrMZHkVnOjRMWwLCcr8ohBVb1OMjxLwGCvjTikrsBOiA6fNyCrm8V1rP93iVPpwaE+gO0SsWmPiXB+jikdf6SizrT5qKasx5j8ABbHpFTx+vFXp9EnYQmLx02h1QTTrl6eDqxLnGjporxl3NL3agEvXdT0WmEost648sQOYAeJS9Q7bfUVoMGnjo4AZdUMQku50McDcMWcBPvr0SzbTAFDfvJqwLzgxwATnCgnp4wDl6Aa+Ax283gghmj+vj7feE2KBBRMW3FzOpLOADl0Isb5587h/U4gGvkt5v60Z1VLG8BhYjbzRwyQZemwAd6cCR5/XFWLYZRIMpX39AR0tjaGGiGzLVyhse5C9RKC6ai42ppWPKiBagOvaYk8lO7DajerabOZP46Lby5wKjw1HCRx7p9sVMOWGzb/vA1hwiWc6jm3MvQDTogQkiqIhJV0nBQBTU+3okKCFDy9WwferkHjtxib7t3xIUQtHxnIwtx4mpg26/HfwVNVDb4oI9RHmx5WGelRVlrtiw43zboCLaxv46AZeB3IlTkwouebTr1y2NjSpHz68WNFjHvupy3q8TFn3Hos2IAk4Ju5dCo8B3wP7VPr/FGaKiG+T+v+TQqIrOqMTL1VdWV1DdmcbO8KXBz6esmYWYKPwDL5b5FA1a0hwapHiom0r/cKaoqr+27/XcrS5UwSMbQAAAABJRU5ErkJggg==)](https://deepwiki.com/pirate/bbus/3-typescript-implementation) ![NPM Version](https://img.shields.io/npm/v/bubus) - - -image - Bubus is an in-memory event bus library for async Python and TS (node/browser). It's designed for quickly building resilient, predictable, complex event-driven apps. From b190994cfb05cbae6e377afbdb65d0a31a026731 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Sun, 8 Feb 2026 20:31:13 -0800 Subject: [PATCH 68/79] add bubus-ts publish flow --- .github/workflows/publish-npm.yml | 52 +++++++++++++++++++++++++++++++ bubus-ts/.prettierignore | 1 + bubus-ts/package.json | 35 +++++++++++++++++---- 3 files changed, 82 insertions(+), 6 deletions(-) create mode 100644 .github/workflows/publish-npm.yml create mode 100644 bubus-ts/.prettierignore diff --git a/.github/workflows/publish-npm.yml b/.github/workflows/publish-npm.yml new file mode 100644 index 0000000..30bcfcb --- /dev/null +++ b/.github/workflows/publish-npm.yml @@ -0,0 +1,52 @@ +name: publish-npm + +on: + release: + types: [published] + workflow_dispatch: + inputs: + tag: + description: npm dist-tag to publish under + required: false + default: latest + +permissions: + contents: read + id-token: write + +jobs: + publish_to_npm: + runs-on: ubuntu-latest + defaults: + run: + working-directory: bubus-ts + steps: + - uses: actions/checkout@v4 + + - uses: pnpm/action-setup@v4 + with: + version: 10 + + - uses: actions/setup-node@v4 + with: + node-version: 22 + cache: pnpm + cache-dependency-path: bubus-ts/pnpm-lock.yaml + registry-url: https://registry.npmjs.org + + - run: pnpm install --frozen-lockfile + - run: pnpm run typecheck + - run: pnpm test + - run: pnpm run build + + - name: Publish release tag + if: github.event_name == 'release' + run: pnpm publish --access public --no-git-checks + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} + + - name: Publish manual tag + if: github.event_name == 'workflow_dispatch' + run: pnpm publish --access public --tag "${{ inputs.tag }}" --no-git-checks + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} diff --git a/bubus-ts/.prettierignore b/bubus-ts/.prettierignore new file mode 100644 index 0000000..849ddff --- /dev/null +++ b/bubus-ts/.prettierignore @@ -0,0 +1 @@ +dist/ diff --git a/bubus-ts/package.json b/bubus-ts/package.json index 441aa89..67d5406 100644 --- a/bubus-ts/package.json +++ b/bubus-ts/package.json @@ -1,28 +1,38 @@ { - "name": "bubus-ts", - "version": "1.0.0", + "name": "bubus", + "version": "1.7.3", "description": "Event bus library for browsers and ESM Node.js", "type": "module", "main": "./dist/esm/index.js", "module": "./dist/esm/index.js", "types": "./dist/types/index.d.ts", + "exports": { + ".": { + "types": "./dist/types/index.d.ts", + "import": "./dist/esm/index.js", + "default": "./dist/esm/index.js" + } + }, "files": [ "dist/esm", "dist/types" ], "scripts": { "build": "pnpm run build:esm && pnpm run build:types", - "build:esm": "esbuild src/index.ts --bundle --format=esm --platform=neutral --target=es2022 --outdir=dist/esm", + "build:esm": "esbuild src/index.ts --bundle --format=esm --platform=neutral --target=es2022 --sourcemap --outdir=dist/esm", "build:types": "tsc -p tsconfig.json --emitDeclarationOnly", "typecheck": "tsc -p tsconfig.json --noEmit", - "lint": "eslint .", + "lint": "pnpm run format:check && eslint . && pnpm run typecheck", "format": "prettier --write .", "format:check": "prettier --check .", - "test": "NODE_OPTIONS='--expose-gc' node --expose-gc --test --import tsx tests/**/*.test.ts" + "test": "NODE_OPTIONS='--expose-gc' node --expose-gc --test --import tsx tests/**/*.test.ts", + "prepack": "pnpm run build", + "release:dry-run": "pnpm publish --access public --dry-run --no-git-checks", + "release:check": "pnpm run typecheck && pnpm test && pnpm run build" }, "keywords": [], "author": "", - "license": "ISC", + "license": "MIT", "packageManager": "pnpm@10.23.0", "dependencies": { "uuid": "^11.1.0", @@ -36,5 +46,18 @@ "prettier": "^3.8.1", "tsx": "^4.20.6", "typescript": "^5.9.3" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/pirate/bbus.git", + "directory": "bubus-ts" + }, + "bugs": { + "url": "https://github.com/pirate/bbus/issues" + }, + "homepage": "https://github.com/pirate/bbus/tree/main/bubus-ts", + "publishConfig": { + "access": "public", + "registry": "https://registry.npmjs.org/" } } From 46b59ab968aaf342b7c829bffcdfdee0acd6ac5d Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Sun, 8 Feb 2026 20:31:23 -0800 Subject: [PATCH 69/79] add perf tests --- tests/test_stress_20k_events.py | 338 ++++++++++++++++++++++++++++++-- 1 file changed, 319 insertions(+), 19 deletions(-) diff --git a/tests/test_stress_20k_events.py b/tests/test_stress_20k_events.py index 0f14cfe..0a07401 100644 --- a/tests/test_stress_20k_events.py +++ b/tests/test_stress_20k_events.py @@ -1,5 +1,6 @@ import asyncio import gc +import math import os import time from typing import Any @@ -16,6 +17,246 @@ def get_memory_usage_mb(): return process.memory_info().rss / 1024 / 1024 +def percentile(values: list[float], q: float) -> float: + """Simple percentile helper without numpy dependency.""" + if not values: + return 0.0 + sorted_values = sorted(values) + pos = (len(sorted_values) - 1) * q + low = math.floor(pos) + high = math.ceil(pos) + if low == high: + return sorted_values[int(pos)] + return sorted_values[low] + (sorted_values[high] - sorted_values[low]) * (pos - low) + + +async def dispatch_and_measure( + bus: EventBus, + event_factory: callable, + total_events: int, + batch_size: int = 40, +) -> tuple[float, float, float, float, float]: + """ + Dispatch many events and return: + (throughput_events_per_sec, dispatch_p50_ms, dispatch_p95_ms, done_p50_ms, done_p95_ms) + """ + dispatch_latencies_ms: list[float] = [] + done_latencies_ms: list[float] = [] + pending: list[tuple[BaseEvent[Any], float]] = [] + + start = time.perf_counter() + for _ in range(total_events): + t0 = time.perf_counter() + event = bus.dispatch(event_factory()) + dispatch_latencies_ms.append((time.perf_counter() - t0) * 1000) + pending.append((event, time.perf_counter())) + if len(pending) >= batch_size: + await asyncio.gather(*(wait_one(item) for item in pending)) + pending.clear() + + async def wait_one(item: tuple[BaseEvent[Any], float]) -> None: + event, t_dispatch_done = item + await event + done_latencies_ms.append((time.perf_counter() - t_dispatch_done) * 1000) + + if pending: + await asyncio.gather(*(wait_one(item) for item in pending)) + await bus.wait_until_idle() + + elapsed = time.perf_counter() - start + throughput = total_events / max(elapsed, 1e-9) + return ( + throughput, + percentile(dispatch_latencies_ms, 0.50), + percentile(dispatch_latencies_ms, 0.95), + percentile(done_latencies_ms, 0.50), + percentile(done_latencies_ms, 0.95), + ) + + +async def run_mode_throughput_benchmark( + *, + parallel_handlers: bool, + total_events: int = 5_000, + batch_size: int = 50, +) -> tuple[int, float]: + """Run a basic no-op throughput benchmark for one handler mode.""" + bus = EventBus( + name=f'ThroughputFloor_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + middlewares=[], + ) + + processed = 0 + + async def handler(event: SimpleEvent) -> None: + nonlocal processed + processed += 1 + + bus.on(SimpleEvent, handler) + + pending: list[BaseEvent[Any]] = [] + start = time.time() + try: + for _ in range(total_events): + pending.append(bus.dispatch(SimpleEvent())) + if len(pending) >= batch_size: + await asyncio.gather(*pending) + pending.clear() + + if pending: + await asyncio.gather(*pending) + + await bus.wait_until_idle() + finally: + await bus.stop(timeout=0, clear=True) + + duration = time.time() - start + throughput = total_events / max(duration, 1e-9) + return processed, throughput + + +async def run_io_fanout_benchmark( + *, + parallel_handlers: bool, + total_events: int = 800, + handlers_per_event: int = 4, + sleep_seconds: float = 0.0015, + batch_size: int = 40, +) -> tuple[int, float]: + """Benchmark I/O-bound fanout to compare serial vs parallel handler mode.""" + bus = EventBus( + name=f'Fanout_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + middlewares=[], + ) + + handled = 0 + + for index in range(handlers_per_event): + async def handler(event: SimpleEvent) -> None: + nonlocal handled + await asyncio.sleep(sleep_seconds) + handled += 1 + + handler.__name__ = f'fanout_handler_{index}' + bus.on(SimpleEvent, handler) + + pending: list[BaseEvent[Any]] = [] + start = time.time() + try: + for _ in range(total_events): + pending.append(bus.dispatch(SimpleEvent())) + if len(pending) >= batch_size: + await asyncio.gather(*pending) + pending.clear() + + if pending: + await asyncio.gather(*pending) + + await bus.wait_until_idle() + finally: + await bus.stop(timeout=0, clear=True) + + duration = time.time() - start + return handled, duration + + +def throughput_floor_for_mode(parallel_handlers: bool) -> int: + """ + Conservative per-mode floor to catch severe regressions while avoiding CI flakiness. + """ + if parallel_handlers: + return 500 + return 600 + + +def throughput_regression_floor( + first_run_throughput: float, + *, + min_fraction: float, + hard_floor: float, +) -> float: + """ + Scenario+mode regression threshold using same-run baseline + absolute safety floor. + """ + return max(hard_floor, first_run_throughput * min_fraction) + + +async def run_contention_round( + *, + parallel_handlers: bool, + bus_count: int = 10, + events_per_bus: int = 120, + batch_size: int = 20, +) -> dict[str, float]: + """ + Concurrently dispatch on many buses to stress global lock contention. + """ + buses = [ + EventBus( + name=f'LockContention_{i}_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + middlewares=[], + ) + for i in range(bus_count) + ] + counters = [0 for _ in range(bus_count)] + dispatch_latencies_ms: list[float] = [] + done_latencies_ms: list[float] = [] + + for index, bus in enumerate(buses): + def make_handler(handler_index: int): + async def handler(event: SimpleEvent) -> None: + counters[handler_index] += 1 + + handler.__name__ = f'contention_handler_{handler_index}' + return handler + + bus.on(SimpleEvent, make_handler(index)) + + async def wait_batch(batch: list[tuple[BaseEvent[Any], float]]) -> None: + async def wait_one(item: tuple[BaseEvent[Any], float]) -> None: + event, dispatch_done_at = item + await event + done_latencies_ms.append((time.perf_counter() - dispatch_done_at) * 1000) + + await asyncio.gather(*(wait_one(item) for item in batch)) + + async def producer(bus: EventBus) -> None: + pending: list[tuple[BaseEvent[Any], float]] = [] + for _ in range(events_per_bus): + t0 = time.perf_counter() + event = bus.dispatch(SimpleEvent()) + dispatch_latencies_ms.append((time.perf_counter() - t0) * 1000) + pending.append((event, time.perf_counter())) + if len(pending) >= batch_size: + await wait_batch(pending) + pending.clear() + + if pending: + await wait_batch(pending) + await bus.wait_until_idle() + + total_events = bus_count * events_per_bus + start = time.perf_counter() + try: + await asyncio.gather(*(producer(bus) for bus in buses)) + finally: + await asyncio.gather(*(bus.stop(timeout=0, clear=True) for bus in buses)) + + duration = time.perf_counter() - start + return { + 'throughput': total_events / max(duration, 1e-9), + 'dispatch_p50_ms': percentile(dispatch_latencies_ms, 0.50), + 'dispatch_p95_ms': percentile(dispatch_latencies_ms, 0.95), + 'done_p50_ms': percentile(done_latencies_ms, 0.50), + 'done_p95_ms': percentile(done_latencies_ms, 0.95), + 'fairness_min': float(min(counters)), + 'fairness_max': float(max(counters)), + } + + class SimpleEvent(BaseEvent): """Simple event without Generic for performance testing""" @@ -390,43 +631,102 @@ async def handler(event: SimpleEvent) -> None: @pytest.mark.asyncio -async def test_basic_throughput_floor_regression_guard(): +@pytest.mark.parametrize( + 'parallel_handlers', + [False, True], + ids=['serial_handlers', 'parallel_handlers'], +) +async def test_basic_throughput_floor_regression_guard(parallel_handlers: bool): """ - Throughput regression guard (Python-specific floor). + Throughput regression guard across Python's handler concurrency modes. Keeps threshold conservative to avoid CI flakiness while still catching severe slowdowns. """ - bus = EventBus(name='ThroughputFloor', middlewares=[]) + processed, rate = await run_mode_throughput_benchmark(parallel_handlers=parallel_handlers) + + assert processed == 5_000 + minimum_rate = throughput_floor_for_mode(parallel_handlers) + mode = 'parallel' if parallel_handlers else 'serial' + assert rate >= minimum_rate, ( + f'{mode} throughput regression: {rate:.0f} events/sec ' + f'(expected >= {minimum_rate} events/sec)' + ) - processed = 0 - async def handler(event: SimpleEvent) -> None: - nonlocal processed - processed += 1 +@pytest.mark.asyncio +async def test_parallel_handlers_mode_improves_io_bound_fanout(): + """ + For I/O-bound workloads with multiple handlers per event, parallel mode should + provide a meaningful speedup versus serial mode. + """ + serial_handled, serial_duration = await run_io_fanout_benchmark(parallel_handlers=False) + parallel_handled, parallel_duration = await run_io_fanout_benchmark(parallel_handlers=True) + + expected_total = 800 * 4 + assert serial_handled == expected_total + assert parallel_handled == expected_total + assert parallel_duration < serial_duration * 0.8, ( + f'Expected parallel handler mode to be faster for I/O fanout; ' + f'serial={serial_duration:.2f}s parallel={parallel_duration:.2f}s' + ) - bus.on(SimpleEvent, handler) - total_events = 5_000 - batch_size = 50 - pending: list[BaseEvent[Any]] = [] +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'parallel_handlers', + [False, True], + ids=['serial_handlers', 'parallel_handlers'], +) +async def test_forwarding_throughput_floor_across_modes(parallel_handlers: bool): + """ + Regression guard for forwarding path in both handler execution modes. + """ + source_bus = EventBus( + name=f'ForwardSource_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + middlewares=[], + ) + target_bus = EventBus( + name=f'ForwardTarget_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + middlewares=[], + ) + + handled = 0 + async def sink_handler(event: SimpleEvent) -> None: + nonlocal handled + handled += 1 + + source_bus.on('*', target_bus.dispatch) + target_bus.on(SimpleEvent, sink_handler) + + total_events = 3_000 + pending: list[BaseEvent[Any]] = [] + batch_size = 40 start = time.time() try: for _ in range(total_events): - pending.append(bus.dispatch(SimpleEvent())) + pending.append(source_bus.dispatch(SimpleEvent())) if len(pending) >= batch_size: await asyncio.gather(*pending) pending.clear() if pending: await asyncio.gather(*pending) - - await bus.wait_until_idle() + await source_bus.wait_until_idle() + await target_bus.wait_until_idle() finally: - await bus.stop(timeout=0, clear=True) + await source_bus.stop(timeout=0, clear=True) + await target_bus.stop(timeout=0, clear=True) duration = time.time() - start - rate = total_events / duration - - assert processed == total_events - assert rate >= 600, f'Throughput regression: {rate:.0f} events/sec (expected >= 600 events/sec)' + throughput = total_events / max(duration, 1e-9) + floor = 200 + + assert handled == total_events + mode = 'parallel' if parallel_handlers else 'serial' + assert throughput >= floor, ( + f'{mode} forwarding throughput regression: {throughput:.0f} events/sec ' + f'(expected >= {floor} events/sec)' + ) From 875e331a73977cc87d4b2df92ac1776048dffcc3 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Mon, 9 Feb 2026 10:34:38 -0800 Subject: [PATCH 70/79] unverified python perf fixes --- bubus/models.py | 172 +++++--- bubus/service.py | 253 ++++++++---- tests/test_stress_20k_events.py | 671 +++++++++++++++++++++++++++++++- 3 files changed, 946 insertions(+), 150 deletions(-) diff --git a/bubus/models.py b/bubus/models.py index b384506..ec147dd 100644 --- a/bubus/models.py +++ b/bubus/models.py @@ -269,6 +269,7 @@ def event_result_type_serializer(self, value: Any) -> str | None: # Completion signal _event_completed_signal: asyncio.Event | None = PrivateAttr(default=None) + _event_is_complete_flag: bool = PrivateAttr(default=False) # Dispatch-time context for ContextVar propagation to handlers # Captured when dispatch() is called, used when executing handlers via ctx.run() @@ -279,16 +280,20 @@ def __hash__(self) -> int: return hash(self.event_id) def __str__(self) -> str: - """BaseEvent#ab12⏳""" - icon = ( - '⏳' - if self.event_status == 'pending' - else 'βœ…' - if self.event_status == 'completed' - else 'πŸƒ' + """Compact O(1) summary for hot-path logging.""" + completed_signal = self._event_completed_signal + is_complete = self._event_is_complete_flag or ( + completed_signal is not None and completed_signal.is_set() ) - # AuthBus≫DataBusβ–Ά AuthLoginEvent#ab12 ⏳ - return f'{"≫".join(self.event_path[1:] or "?")}β–Ά {self.event_type}#{self.event_id[-4:]} {icon}' + if is_complete: + icon = 'βœ…' + elif self.event_processed_at is not None: + icon = 'πŸƒ' + else: + icon = '⏳' + + bus_hint = self.event_path[-1] if self.event_path else '?' + return f'{bus_hint}β–Ά {self.event_type}#{self.event_id[-4:]} {icon}' def _remove_self_from_queue(self, bus: 'EventBus') -> bool: """Remove this event from the bus's queue if present. Returns True if removed.""" @@ -300,7 +305,7 @@ def _remove_self_from_queue(self, bus: 'EventBus') -> bool: return True return False - def _is_queued_on_any_bus(self) -> bool: + def _is_queued_on_any_bus(self, ignore_bus: 'EventBus | None' = None) -> bool: """ Check whether this event is currently queued on any live EventBus. @@ -310,7 +315,13 @@ def _is_queued_on_any_bus(self) -> bool: from bubus.service import EventBus for bus in list(EventBus.all_instances): - if not bus or not bus.event_queue or not hasattr(bus.event_queue, '_queue'): + if not bus: + continue + if self.event_id in getattr(bus, '_processing_event_ids', set()): + if ignore_bus is not None and bus is ignore_bus: + continue + return True + if not bus.event_queue or not hasattr(bus.event_queue, '_queue'): continue queue = cast(deque[BaseEvent[Any]], bus.event_queue._queue) # type: ignore[attr-defined] if self in queue: @@ -350,8 +361,12 @@ async def _process_self_on_all_buses(self) -> None: # Check if THIS event is in this bus's queue if self._remove_self_from_queue(bus): # Process only this event on this bus - await bus.handle_event(self) - bus.event_queue.task_done() + bus._processing_event_ids.add(self.event_id) + try: + await bus.handle_event(self) + bus.event_queue.task_done() + finally: + bus._processing_event_ids.discard(self.event_id) processed_any = True # Check if we're done after processing @@ -366,7 +381,8 @@ async def _process_self_on_all_buses(self) -> None: await asyncio.sleep(0) except asyncio.CancelledError: - logger.debug(f'Polling loop cancelled for {self}') + if logger.isEnabledFor(logging.DEBUG): + logger.debug('Polling loop cancelled for %s', self) raise async def _wait_for_completion_inside_handler(self) -> None: @@ -385,6 +401,8 @@ async def _wait_for_completion_outside_handler(self) -> None: Simply waits on the completion signal - the event loop's normal processing will handle the event. """ + if self._event_is_complete_flag: + return assert self.event_completed_signal is not None await self.event_completed_signal.wait() @@ -392,11 +410,13 @@ def __await__(self) -> Generator[Self, Any, Any]: """Wait for event to complete and return self""" async def wait_for_handlers_to_complete_then_return_event(): + if self._event_is_complete_flag: + return self assert self.event_completed_signal is not None from bubus.service import holds_global_lock, inside_handler_context is_inside_handler = inside_handler_context.get() and holds_global_lock.get() - is_not_yet_complete = not self.event_completed_signal.is_set() + is_not_yet_complete = not self._event_is_complete_flag and not self.event_completed_signal.is_set() if is_not_yet_complete and is_inside_handler: await self._wait_for_completion_inside_handler() @@ -478,7 +498,13 @@ def event_completed_signal(self) -> asyncio.Event | None: @property def event_status(self) -> EventStatus: """Current status of this event in the lifecycle.""" - return EventStatus.COMPLETED if self.event_completed_at else EventStatus.STARTED if self.event_started_at else EventStatus.PENDING + if self._event_is_complete_flag: + return EventStatus.COMPLETED + if self._event_completed_signal is not None and self._event_completed_signal.is_set(): + return EventStatus.COMPLETED + if self.event_started_at is not None: + return EventStatus.STARTED + return EventStatus.PENDING @property def event_children(self) -> list['BaseEvent[Any]']: @@ -491,27 +517,45 @@ def event_children(self) -> list['BaseEvent[Any]']: @property def event_started_at(self) -> datetime | None: """Timestamp when event first started being processed by any handler""" - started_times = [result.started_at for result in self.event_results.values() if result.started_at is not None] - # If no handlers but event was processed, use the processed timestamp - if not started_times and self.event_processed_at: + earliest_started: datetime | None = None + for result in self.event_results.values(): + started_at = result.started_at + if started_at is None: + continue + if earliest_started is None or started_at < earliest_started: + earliest_started = started_at + # If no handlers but event was processed, use the processed timestamp. + if earliest_started is None and self.event_processed_at: return self.event_processed_at - return min(started_times) if started_times else None + return earliest_started @property def event_completed_at(self) -> datetime | None: """Timestamp when event was completed by all handlers""" - # If no handlers at all but event was processed, use the processed timestamp + # If no handlers at all but event was processed, use the processed timestamp. + # This supports manually deserialized/updated events in tests and tooling. if not self.event_results and self.event_processed_at: return self.event_processed_at - # All handlers must be done (completed or error) - all_done = all(result.status in ('completed', 'error') for result in self.event_results.values()) - if not all_done: + if not self._event_is_complete_flag and not ( + self._event_completed_signal is not None and self._event_completed_signal.is_set() + ): + # Fast negative path for in-flight events return None - # Return the latest completion time - completed_times = [result.completed_at for result in self.event_results.values() if result.completed_at is not None] - return max(completed_times) if completed_times else self.event_processed_at + if not self.event_results: + return self.event_processed_at + + latest_completed: datetime | None = None + for result in self.event_results.values(): + if result.status not in ('completed', 'error'): + return None + completed_at = result.completed_at + if completed_at is None: + continue + if latest_completed is None or completed_at > latest_completed: + latest_completed = completed_at + return latest_completed or self.event_processed_at def event_create_pending_results( self, @@ -525,6 +569,7 @@ def event_create_pending_results( Any stale timing/error data from prior runs is cleared so consumers immediately see a fresh pending state. """ pending_results: dict[PythonIdStr, 'EventResult[T_EventResultType]'] = {} + self._event_is_complete_flag = False for handler_id, handler in handlers.items(): event_result = self.event_result_update( handler=handler, @@ -828,46 +873,50 @@ def event_result_update( # Don't mark complete here - let the EventBus do it after all handlers are done return self.event_results[handler_id] - def event_mark_complete_if_all_handlers_completed(self) -> None: + def event_mark_complete_if_all_handlers_completed(self, current_bus: 'EventBus | None' = None) -> None: """Check if all handlers are done and signal completion""" - if self.event_completed_signal and not self.event_completed_signal.is_set(): - # If there are no results at all, the event is complete - if not self.event_results: - if hasattr(self, 'event_processed_at'): - self.event_processed_at = datetime.now(UTC) - self.event_completed_signal.set() - # Clear dispatch context to avoid memory leaks - self._event_dispatch_context = None - return - - # Check if all handler results are done - all_handlers_done = all(result.status in ('completed', 'error') for result in self.event_results.values()) - if not all_handlers_done: - # logger.debug( - # f'Event {self} not complete - waiting for handlers: {[r for r in self.event_results.values() if r.status not in ("completed", "error")]}' - # ) - return - - # Forwarded events may still be waiting in another bus queue. - # Don't mark complete until all queue copies have been consumed. - if self._is_queued_on_any_bus(): + completed_signal = self._event_completed_signal + if completed_signal is not None and completed_signal.is_set(): + self._event_is_complete_flag = True + return + + # If there are no results at all, the event is complete. + if not self.event_results: + # Even with no local handlers, forwarded copies may still be queued elsewhere. + if self._is_queued_on_any_bus(ignore_bus=current_bus): return - - # Recursively check if all child events are also complete if not self.event_are_all_children_complete(): - # incomplete_children = [c for c in self.event_children if c.event_status != 'completed'] - # logger.debug( - # f'Event {self} not complete - waiting for {len(incomplete_children)} child events: {incomplete_children}' - # ) return - - # All handlers and all child events are done if hasattr(self, 'event_processed_at'): self.event_processed_at = datetime.now(UTC) - # logger.debug(f'Event {self} marking complete - all handlers and children done') - self.event_completed_signal.set() - # Clear dispatch context to avoid memory leaks (it holds references to ContextVars) + self._event_is_complete_flag = True + if completed_signal is not None: + completed_signal.set() self._event_dispatch_context = None + return + + # Check if all handler results are done. + for result in self.event_results.values(): + if result.status not in ('completed', 'error'): + return + + # Forwarded events may still be waiting in another bus queue. + # Don't mark complete until all queue copies have been consumed. + if self._is_queued_on_any_bus(ignore_bus=current_bus): + return + + # Recursively check if all child events are also complete + if not self.event_are_all_children_complete(): + return + + # All handlers and all child events are done. + if hasattr(self, 'event_processed_at'): + self.event_processed_at = datetime.now(UTC) + self._event_is_complete_flag = True + if completed_signal is not None: + completed_signal.set() + # Clear dispatch context to avoid memory leaks (it holds references to ContextVars) + self._event_dispatch_context = None def event_are_all_children_complete(self, _visited: set[str] | None = None) -> bool: """Recursively check if all child events and their descendants are complete""" @@ -881,7 +930,8 @@ def event_are_all_children_complete(self, _visited: set[str] | None = None) -> b for child_event in self.event_children: if child_event.event_status != 'completed': - logger.debug(f'Event {self} has incomplete child {child_event}') + if logger.isEnabledFor(logging.DEBUG): + logger.debug('Event %s has incomplete child %s', self, child_event) return False # Recursively check child's children if not child_event.event_are_all_children_complete(_visited): diff --git a/bubus/service.py b/bubus/service.py index 44c893e..e77079c 100644 --- a/bubus/service.py +++ b/bubus/service.py @@ -304,6 +304,8 @@ class EventBus: _is_running: bool = False _runloop_task: asyncio.Task[None] | None = None _on_idle: asyncio.Event | None = None + _active_event_ids: set[str] + _processing_event_ids: set[str] def __init__( self, @@ -351,6 +353,8 @@ def __init__( self.parallel_handlers = parallel_handlers self._on_idle = None self.middlewares: list[EventBusMiddleware] = list(middlewares or []) + self._active_event_ids = set() + self._processing_event_ids = set() # Memory leak prevention settings self.max_history_size = max_history_size @@ -378,28 +382,59 @@ def __del__(self): def __str__(self) -> str: icon = '🟒' if self._is_running else 'πŸ”΄' - return f'{self.name}{icon}(⏳ {len(self.events_pending or [])} | ▢️ {len(self.events_started or [])} | βœ… {len(self.events_completed or [])} ➑️ {len(self.handlers)} πŸ‘‚)' + queue_size = self.event_queue.qsize() if self.event_queue else 0 + return f'{self.name}{icon}(queue={queue_size} active={len(self._active_event_ids)} history={len(self.event_history)} handlers={len(self.handlers)})' def __repr__(self) -> str: return str(self) async def _on_event_change(self, event: BaseEvent[Any], status: EventStatus) -> None: + if not self.middlewares: + return for middleware in self.middlewares: await middleware.on_event_change(self, event, status) async def _on_event_result_change( self, event: BaseEvent[Any], event_result: EventResult[Any], status: EventStatus ) -> None: + if not self.middlewares: + return for middleware in self.middlewares: await middleware.on_event_result_change(self, event, event_result, status) + @staticmethod + def _is_event_complete_fast(event: BaseEvent[Any]) -> bool: + signal = event._event_completed_signal # pyright: ignore[reportPrivateUsage] + if signal is not None: + return signal.is_set() + if getattr(event, '_event_is_complete_flag', False): # pyright: ignore[reportPrivateUsage] + return True + return event.event_completed_at is not None + + @staticmethod + def _is_event_started_fast(event: BaseEvent[Any]) -> bool: + for result in event.event_results.values(): + if result.started_at is not None or result.status == 'started': + return True + return False + + def _has_inflight_events_fast(self) -> bool: + return bool(self._active_event_ids) + + @staticmethod + def _mark_event_complete_on_all_buses(event: BaseEvent[Any]) -> None: + event_id = event.event_id + for bus in list(EventBus.all_instances): + if bus: + bus._active_event_ids.discard(event_id) + @property def events_pending(self) -> list[BaseEvent[Any]]: """Get events that haven't started processing yet (does not include events that have not even finished dispatching yet in self.event_queue)""" return [ event for event in self.event_history.values() - if event.event_started_at is None and event.event_completed_at is None + if not self._is_event_complete_fast(event) and not self._is_event_started_fast(event) ] @property @@ -408,13 +443,13 @@ def events_started(self) -> list[BaseEvent[Any]]: return [ event for event in self.event_history.values() - if event.event_started_at is not None and event.event_completed_at is None + if not self._is_event_complete_fast(event) and self._is_event_started_fast(event) ] @property def events_completed(self) -> list[BaseEvent[Any]]: """Get events that have completed processing""" - return [event for event in self.event_history.values() if event.event_completed_at is not None] + return [event for event in self.event_history.values() if self._is_event_complete_fast(event)] # Overloads for typed event patterns with specific handler signatures # Order matters - more specific types must come before general ones @@ -508,7 +543,13 @@ def on( # Register handler self.handlers[event_key].append(handler) # type: ignore - logger.debug(f'πŸ‘‚ {self}.on({event_key}, {get_handler_name(handler)}) Registered event handler') + if logger.isEnabledFor(logging.DEBUG): + logger.debug( + 'πŸ‘‚ %s.on(%s, %s) Registered event handler', + self, + event_key, + get_handler_name(handler), + ) def dispatch(self, event: T_ExpectedEvent) -> T_ExpectedEvent: """ @@ -566,9 +607,13 @@ def dispatch(self, event: T_ExpectedEvent) -> T_ExpectedEvent: # NOT: event = event.model_copy(update={'event_path': event.event_path + [self.name]}) event.event_path.append(self.name) else: - logger.debug( - f'⚠️ {self}.dispatch({event.event_type}) - Bus already in path, not adding again. Path: {event.event_path}' - ) + if logger.isEnabledFor(logging.DEBUG): + logger.debug( + '⚠️ %s.dispatch(%s) - Bus already in path, not adding again. Path: %s', + self, + event.event_type, + event.event_path, + ) assert event.event_path, 'Missing event.event_path: list[str] (with at least the origin function name recorded in it)' assert all(entry.isidentifier() for entry in event.event_path), ( @@ -579,9 +624,12 @@ def dispatch(self, event: T_ExpectedEvent) -> T_ExpectedEvent: # Only enforce if we have memory limits set if self.max_history_size is not None: queue_size = self.event_queue.qsize() if self.event_queue else 0 - pending_in_history = sum( - 1 for event in self.event_history.values() if event.event_status in ('pending', 'started') - ) + pending_in_history = 0 + for existing_event in self.event_history.values(): + if not self._is_event_complete_fast(existing_event): + pending_in_history += 1 + if queue_size + pending_in_history >= 100: + break total_pending = queue_size + pending_in_history if total_pending >= 100: @@ -600,13 +648,20 @@ def dispatch(self, event: T_ExpectedEvent) -> T_ExpectedEvent: self.event_queue.put_nowait(event) # Only add to history after successfully queuing self.event_history[event.event_id] = event - loop = asyncio.get_running_loop() - loop.create_task( - self._on_event_change(event, EventStatus.PENDING) - ) - logger.info( - f'πŸ—£οΈ {self}.dispatch({event.event_type}) ➑️ {event.event_type}#{event.event_id[-4:]} (#{self.event_queue.qsize()} {event.event_status})' - ) + self._active_event_ids.add(event.event_id) + if self.middlewares: + loop = asyncio.get_running_loop() + loop.create_task(self._on_event_change(event, EventStatus.PENDING)) + if logger.isEnabledFor(logging.INFO): + logger.info( + 'πŸ—£οΈ %s.dispatch(%s) ➑️ %s#%s (#%d %s)', + self, + event.event_type, + event.event_type, + event.event_id[-4:], + self.event_queue.qsize(), + event.event_status, + ) except asyncio.QueueFull: # Don't add to history if we can't queue it logger.error( @@ -614,15 +669,17 @@ def dispatch(self, event: T_ExpectedEvent) -> T_ExpectedEvent: ) raise # could also block indefinitely until queue has space, but dont drop silently or delete events else: - logger.warning(f'⚠️ {self}.dispatch() called but event_queue is None! Event not queued: {event.event_type}') + logger.warning('⚠️ %s.dispatch() called but event_queue is None! Event not queued: %s', self, event.event_type) # Note: We do NOT pre-create EventResults here anymore. # EventResults are created only when handlers actually start executing. # This avoids "orphaned" pending results for handlers that get filtered out later. - # Clean up if over the limit - if self.max_history_size and len(self.event_history) > self.max_history_size: - self.cleanup_event_history() + # Soft cleanup during enqueue to prevent unbounded growth while keeping hot dispatch fast. + if self.max_history_size: + soft_limit = max(self.max_history_size, int(self.max_history_size * 1.2)) + if len(self.event_history) > soft_limit: + self.cleanup_event_history() return event @@ -1094,10 +1151,14 @@ async def stop(self, timeout: float | None = None, clear: bool = False) -> None: pass queue_size = self.event_queue.qsize() if self.event_queue else 0 - if queue_size or self.events_pending or self.events_started: + has_inflight = self._has_inflight_events_fast() + if queue_size or has_inflight: logger.debug( - f'⚠️ {self} stopping with pending events: Pending {len(self.events_pending) + queue_size} | Started {len(self.events_started)} | Completed {len(self.events_completed)}\n' - f'PENDING={str(self.events_pending)[:500]}\nSTARTED={str(self.events_started)[:500]}' + '⚠️ %s stopping with pending events: queue=%d inflight=%s history=%d', + self, + queue_size, + has_inflight, + len(self.event_history), ) # Signal shutdown @@ -1119,6 +1180,8 @@ async def stop(self, timeout: float | None = None, clear: bool = False) -> None: # Clear references self._runloop_task = None + self._active_event_ids.clear() + self._processing_event_ids.clear() if self._on_idle: self._on_idle.set() @@ -1131,6 +1194,7 @@ async def stop(self, timeout: float | None = None, clear: bool = False) -> None: if clear: self.event_history.clear() self.handlers.clear() + self._active_event_ids.clear() # Remove from global instance tracking if self in EventBus.all_instances: @@ -1145,9 +1209,9 @@ async def stop(self, timeout: float | None = None, clear: bool = False) -> None: # No running loop, that's fine pass - logger.debug(f'🧹 {self} cleared event history and removed from global tracking') + logger.debug('🧹 %s cleared event history and removed from global tracking', self) - logger.debug(f'πŸ›‘ {self} shut down gracefully' if timeout is not None else f'πŸ›‘ {self} killed') + logger.debug('πŸ›‘ %s shut down %s', self, 'gracefully' if timeout is not None else 'immediately') # Check total memory usage across all instances try: @@ -1185,7 +1249,7 @@ async def wait_until_idle(self, timeout: float | None = None) -> None: await asyncio.sleep(0) # Yield to event loop # Double-check we're truly idle - if new events came in, wait again - while not self._on_idle.is_set() or self.events_started or self.events_pending: + while not self._on_idle.is_set() or self._has_inflight_events_fast(): if timeout is not None: elapsed = asyncio.get_event_loop().time() - start_time remaining_timeout = max(0, timeout - elapsed) @@ -1200,7 +1264,10 @@ async def wait_until_idle(self, timeout: float | None = None) -> None: except TimeoutError: logger.warning( - f'βŒ›οΈ {self} Timeout waiting for event bus to be idle after {timeout}s (processing: {len(self.events_started)})' + 'βŒ›οΈ %s Timeout waiting for event bus to be idle after %ss (history=%d)', + self, + timeout, + len(self.event_history), ) async def _run_loop(self) -> None: @@ -1211,7 +1278,7 @@ async def _run_loop(self) -> None: _processed_event = await self.step() # Check if we should set idle state after processing if self._on_idle and self.event_queue: - if not (self.events_pending or self.events_started or self.event_queue.qsize()): + if not self._has_inflight_events_fast() and self.event_queue.qsize() == 0: self._on_idle.set() except QueueShutDown: # Queue was shut down, exit cleanly @@ -1221,10 +1288,10 @@ async def _run_loop(self) -> None: if 'Event loop is closed' in str(e) or 'no running event loop' in str(e): break else: - logger.exception(f'❌ {self} Runtime error in event loop: {type(e).__name__} {e}', exc_info=True) + logger.exception('❌ %s Runtime error in event loop: %s %s', self, type(e).__name__, e, exc_info=True) # Continue running even if there's an error except Exception as e: - logger.exception(f'❌ {self} Error in event loop: {type(e).__name__} {e}', exc_info=True) + logger.exception('❌ %s Error in event loop: %s %s', self, type(e).__name__, e, exc_info=True) # Continue running even if there's an error except asyncio.CancelledError: # Task was cancelled, clean exit @@ -1269,7 +1336,7 @@ async def _run_loop_weak(bus_ref: 'weakref.ReferenceType[EventBus]') -> None: if bus is None: break if bus._on_idle and bus.event_queue: - if not (bus.events_pending or bus.events_started or bus.event_queue.qsize()): + if not bus._has_inflight_events_fast() and bus.event_queue.qsize() == 0: bus._on_idle.set() del bus continue @@ -1300,13 +1367,15 @@ async def _run_loop_weak(bus_ref: 'weakref.ReferenceType[EventBus]') -> None: if bus._on_idle: bus._on_idle.clear() + if event is not None: + bus._processing_event_ids.add(event.event_id) async with _get_global_lock(): if event is not None: await bus.handle_event(event) queue.task_done() if bus._on_idle and bus.event_queue: - if not (bus.events_pending or bus.events_started or bus.event_queue.qsize()): + if not bus._has_inflight_events_fast() and bus.event_queue.qsize() == 0: bus._on_idle.set() except QueueShutDown: break @@ -1319,6 +1388,8 @@ async def _run_loop_weak(bus_ref: 'weakref.ReferenceType[EventBus]') -> None: except Exception as e: logger.exception(f'❌ Weak run loop error: {type(e).__name__} {e}', exc_info=True) finally: + if event is not None: + bus._processing_event_ids.discard(event.event_id) del bus finally: bus = bus_ref() @@ -1351,7 +1422,7 @@ async def _get_next_event(self, wait_for_timeout: float = 0.1) -> 'BaseEvent[Any get_next_queued_event.cancel() # Check if we're idle, if so, set the idle flag - if not (self.events_pending or self.events_started or self.event_queue.qsize()): + if not self._has_inflight_events_fast() and self.event_queue.qsize() == 0: self._on_idle.set() return None @@ -1408,21 +1479,27 @@ async def step( if event is None: return None - logger.debug(f'πŸƒ {self}.step({event}) STARTING') + if logger.isEnabledFor(logging.DEBUG): + logger.debug('πŸƒ %s.step(%s) STARTING', self, event) # Clear idle state when we get an event self._on_idle.clear() # Always acquire the global lock (it's re-entrant across tasks) - async with _get_global_lock(): - # Process the event - await self.handle_event(event, timeout=timeout) + self._processing_event_ids.add(event.event_id) + try: + async with _get_global_lock(): + # Process the event + await self.handle_event(event, timeout=timeout) - # Mark task as done only if we got it from the queue - if from_queue: - self.event_queue.task_done() + # Mark task as done only if we got it from the queue + if from_queue: + self.event_queue.task_done() + finally: + self._processing_event_ids.discard(event.event_id) - logger.debug(f'βœ… {self}.step({event}) COMPLETE') + if logger.isEnabledFor(logging.DEBUG): + logger.debug('βœ… %s.step(%s) COMPLETE', self, event) return event async def handle_event(self, event: BaseEvent[Any], timeout: float | None = None) -> None: @@ -1467,19 +1544,15 @@ async def handle_event(self, event: BaseEvent[Any], timeout: float | None = None # Get applicable handlers applicable_handlers = self._get_applicable_handlers(event) - # Prepare EventResult placeholders ahead of execution - event.event_create_pending_results( - applicable_handlers, eventbus=self, timeout=timeout or event.event_timeout - ) - # Execute handlers await self._execute_handlers(event, handlers=applicable_handlers, timeout=timeout) # Mark event as complete and emit change if it just completed - was_complete = event.event_completed_signal and event.event_completed_signal.is_set() - event.event_mark_complete_if_all_handlers_completed() - just_completed = not was_complete and event.event_completed_signal and event.event_completed_signal.is_set() + was_complete = self._is_event_complete_fast(event) + event.event_mark_complete_if_all_handlers_completed(current_bus=self) + just_completed = (not was_complete) and self._is_event_complete_fast(event) if just_completed: + self._mark_event_complete_on_all_buses(event) await self._on_event_change(event, EventStatus.COMPLETED) # After processing this event, check if any parent events can now be marked complete @@ -1504,18 +1577,19 @@ async def handle_event(self, event: BaseEvent[Any], timeout: float | None = None break # Check if parent can be marked complete - was_complete = parent_event.event_completed_signal and parent_event.event_completed_signal.is_set() + was_complete = self._is_event_complete_fast(parent_event) if not was_complete: - parent_event.event_mark_complete_if_all_handlers_completed() - just_completed = not was_complete and parent_event.event_completed_signal and parent_event.event_completed_signal.is_set() + parent_event.event_mark_complete_if_all_handlers_completed(current_bus=parent_bus) + just_completed = (not was_complete) and self._is_event_complete_fast(parent_event) if parent_bus and just_completed: + self._mark_event_complete_on_all_buses(parent_event) await parent_bus._on_event_change(parent_event, EventStatus.COMPLETED) # Move up the chain current = parent_event # Clean up excess events to prevent memory leaks - if self.max_history_size: + if self.max_history_size and len(self.event_history) > self.max_history_size: self.cleanup_event_history() def _get_applicable_handlers(self, event: BaseEvent[Any]) -> dict[str, EventHandler]: @@ -1572,26 +1646,18 @@ async def _execute_handlers( pending_results = event.event_create_pending_results( applicable_handlers, eventbus=self, timeout=timeout or event.event_timeout ) - for pending_result in pending_results.values(): - await self._on_event_result_change( - event, pending_result, EventStatus.PENDING - ) + if self.middlewares: + for pending_result in pending_results.values(): + await self._on_event_result_change(event, pending_result, EventStatus.PENDING) # Execute all handlers in parallel if self.parallel_handlers: - handler_tasks: dict[PythonIdStr, tuple[asyncio.Task[Any], EventHandler]] = {} - # Copy the current context to ensure context vars are propagated - context = contextvars.copy_context() - for handler_id, handler in applicable_handlers.items(): - task = asyncio.create_task( - self.execute_handler(event, handler, timeout=timeout), - name=f'{self}.execute_handler({event}, {get_handler_name(handler)})', - context=context, - ) - handler_tasks[handler_id] = (task, handler) + handler_tasks: list[asyncio.Task[Any]] = [] + for handler in applicable_handlers.values(): + handler_tasks.append(asyncio.create_task(self.execute_handler(event, handler, timeout=timeout))) - # Wait for all handlers to complete - for handler_id, (task, handler) in handler_tasks.items(): + # Wait for all handlers to complete. + for task in handler_tasks: try: await task except Exception: @@ -1604,9 +1670,16 @@ async def _execute_handlers( await self.execute_handler(event, handler, timeout=timeout) except Exception as e: # Error already logged and recorded in execute_handler - logger.debug( - f'❌ {self} Handler {get_handler_name(handler)}#{str(id(handler))[-4:]}({event}) failed with {type(e).__name__}: {e}' - ) + if logger.isEnabledFor(logging.DEBUG): + logger.debug( + '❌ %s Handler %s#%s(%s) failed with %s: %s', + self, + get_handler_name(handler), + str(id(handler))[-4:], + event, + type(e).__name__, + e, + ) pass # print('FINSIHED EXECUTING ALL HANDLERS') @@ -1620,7 +1693,14 @@ async def execute_handler( """Safely execute a single handler with middleware support and EventResult orchestration.""" handler_id = get_handler_id(handler, self) - logger.debug(f' ↳ {self}.execute_handler({event}, handler={get_handler_name(handler)}#{handler_id[-4:]})') + if logger.isEnabledFor(logging.DEBUG): + logger.debug( + ' ↳ %s.execute_handler(%s, handler=%s#%s)', + self, + event, + get_handler_name(handler), + handler_id[-4:], + ) if handler_id not in event.event_results: new_results = event.event_create_pending_results( @@ -1655,9 +1735,13 @@ async def execute_handler( ) result_type_name = type(result_value).__name__ if result_value is not None else 'None' - logger.debug( - f' ↳ Handler {get_handler_name(handler)}#{handler_id[-4:]} returned: {result_type_name}' - ) + if logger.isEnabledFor(logging.DEBUG): + logger.debug( + ' ↳ Handler %s#%s returned: %s', + get_handler_name(handler), + handler_id[-4:], + result_type_name, + ) await self._on_event_result_change( event, event_result, EventStatus.COMPLETED @@ -1788,7 +1872,8 @@ def cleanup_excess_events(self) -> int: del self.event_history[event_id] if event_ids_to_remove: - logger.debug(f'🧹 {self} Cleaned up {len(event_ids_to_remove)} excess events from history') + if logger.isEnabledFor(logging.DEBUG): + logger.debug('🧹 %s Cleaned up %d excess events from history', self, len(event_ids_to_remove)) return len(event_ids_to_remove) @@ -1809,12 +1894,12 @@ def cleanup_event_history(self) -> int: completed_events: list[tuple[str, BaseEvent[Any]]] = [] for event_id, event in self.event_history.items(): - if event.event_status == 'pending': - pending_events.append((event_id, event)) - elif event.event_status == 'started': - started_events.append((event_id, event)) - else: # completed or error + if self._is_event_complete_fast(event): completed_events.append((event_id, event)) + elif self._is_event_started_fast(event): + started_events.append((event_id, event)) + else: + pending_events.append((event_id, event)) # Sort completed events by creation time (oldest first) completed_events.sort(key=lambda x: x[1].event_created_at.timestamp()) # pyright: ignore[reportUnknownMemberType, reportUnknownLambdaType] diff --git a/tests/test_stress_20k_events.py b/tests/test_stress_20k_events.py index 0a07401..942c85d 100644 --- a/tests/test_stress_20k_events.py +++ b/tests/test_stress_20k_events.py @@ -1,5 +1,7 @@ import asyncio +import functools import gc +import inspect import math import os import time @@ -9,6 +11,8 @@ import pytest from bubus import BaseEvent, EventBus +import bubus.models as models_module +import bubus.service as service_module def get_memory_usage_mb(): @@ -44,6 +48,11 @@ async def dispatch_and_measure( done_latencies_ms: list[float] = [] pending: list[tuple[BaseEvent[Any], float]] = [] + async def wait_one(item: tuple[BaseEvent[Any], float]) -> None: + event, t_dispatch_done = item + await event + done_latencies_ms.append((time.perf_counter() - t_dispatch_done) * 1000) + start = time.perf_counter() for _ in range(total_events): t0 = time.perf_counter() @@ -54,11 +63,6 @@ async def dispatch_and_measure( await asyncio.gather(*(wait_one(item) for item in pending)) pending.clear() - async def wait_one(item: tuple[BaseEvent[Any], float]) -> None: - event, t_dispatch_done = item - await event - done_latencies_ms.append((time.perf_counter() - t_dispatch_done) * 1000) - if pending: await asyncio.gather(*(wait_one(item) for item in pending)) await bus.wait_until_idle() @@ -183,6 +187,59 @@ def throughput_regression_floor( return max(hard_floor, first_run_throughput * min_fraction) +class MethodProfiler: + """Lightweight monkeypatch profiler for selected class methods.""" + + def __init__(self) -> None: + self.stats: dict[str, dict[str, float]] = {} + self._restore: list[tuple[type[Any], str, Any]] = [] + + def instrument(self, owner: type[Any], method_name: str, label: str | None = None) -> None: + original = getattr(owner, method_name) + metric_name = label or f'{owner.__name__}.{method_name}' + + if inspect.iscoroutinefunction(original): + @functools.wraps(original) + async def wrapped(*args: Any, **kwargs: Any) -> Any: + started = time.perf_counter() + try: + return await original(*args, **kwargs) + finally: + elapsed = time.perf_counter() - started + metric = self.stats.setdefault(metric_name, {'calls': 0.0, 'total_s': 0.0}) + metric['calls'] += 1.0 + metric['total_s'] += elapsed + else: + @functools.wraps(original) + def wrapped(*args: Any, **kwargs: Any) -> Any: + started = time.perf_counter() + try: + return original(*args, **kwargs) + finally: + elapsed = time.perf_counter() - started + metric = self.stats.setdefault(metric_name, {'calls': 0.0, 'total_s': 0.0}) + metric['calls'] += 1.0 + metric['total_s'] += elapsed + + self._restore.append((owner, method_name, original)) + setattr(owner, method_name, wrapped) + + def restore(self) -> None: + for owner, method_name, original in reversed(self._restore): + setattr(owner, method_name, original) + self._restore.clear() + + def top_lines(self, limit: int = 12) -> list[str]: + ranked = sorted(self.stats.items(), key=lambda item: item[1]['total_s'], reverse=True) + lines: list[str] = [] + for name, metric in ranked[:limit]: + calls = int(metric['calls']) + total_s = metric['total_s'] + avg_us = (total_s * 1_000_000.0) / max(calls, 1) + lines.append(f'{name}: calls={calls:,} total={total_s:.3f}s avg={avg_us:.1f}us') + return lines + + async def run_contention_round( *, parallel_handlers: bool, @@ -730,3 +787,607 @@ async def sink_handler(event: SimpleEvent) -> None: f'{mode} forwarding throughput regression: {throughput:.0f} events/sec ' f'(expected >= {floor} events/sec)' ) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'parallel_handlers', + [False, True], + ids=['serial_handlers', 'parallel_handlers'], +) +async def test_global_lock_contention_multi_bus_matrix(parallel_handlers: bool): + """ + High-contention benchmark: many buses dispatching concurrently under global lock. + """ + phase1 = await run_contention_round(parallel_handlers=parallel_handlers) + phase2 = await run_contention_round(parallel_handlers=parallel_handlers) + + expected_per_bus = 120.0 + hard_floor = 120.0 + regression_floor = throughput_regression_floor( + phase1['throughput'], + min_fraction=0.55, + hard_floor=90.0, + ) + + assert phase1['fairness_min'] == expected_per_bus + assert phase1['fairness_max'] == expected_per_bus + assert phase2['fairness_min'] == expected_per_bus + assert phase2['fairness_max'] == expected_per_bus + assert phase1['throughput'] >= hard_floor, ( + f'lock-contention throughput too low: {phase1["throughput"]:.0f} events/sec ' + f'(expected >= {hard_floor:.0f})' + ) + assert phase2['throughput'] >= regression_floor, ( + f'lock-contention regression: phase1={phase1["throughput"]:.0f} ' + f'phase2={phase2["throughput"]:.0f} ' + f'(required >= {regression_floor:.0f})' + ) + assert phase2['dispatch_p95_ms'] < 25.0 + assert phase2['done_p95_ms'] < 250.0 + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'handlers_per_event', + [10, 30], + ids=['fanout_10_handlers', 'fanout_30_handlers'], +) +async def test_parallel_handlers_mode_scales_with_high_fanout(handlers_per_event: int): + """ + High fanout benchmark to catch regressions in parallel handler scheduling. + """ + serial_handled, serial_duration = await run_io_fanout_benchmark( + parallel_handlers=False, + total_events=400, + handlers_per_event=handlers_per_event, + sleep_seconds=0.001, + batch_size=25, + ) + parallel_handled, parallel_duration = await run_io_fanout_benchmark( + parallel_handlers=True, + total_events=400, + handlers_per_event=handlers_per_event, + sleep_seconds=0.001, + batch_size=25, + ) + + expected_total = 400 * handlers_per_event + speedup = serial_duration / max(parallel_duration, 1e-9) + minimum_speedup = 1.2 if handlers_per_event == 10 else 1.5 + + assert serial_handled == expected_total + assert parallel_handled == expected_total + assert speedup >= minimum_speedup, ( + f'Parallel fanout speedup too small for {handlers_per_event} handlers/event: ' + f'{speedup:.2f}x (expected >= {minimum_speedup:.2f}x)' + ) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'parallel_handlers', + [False, True], + ids=['serial_handlers', 'parallel_handlers'], +) +async def test_queue_jump_perf_matrix_by_mode(parallel_handlers: bool): + """ + Queue-jump throughput/latency matrix (parent awaits child on same bus) by mode. + """ + class QueueJumpParentEvent(BaseEvent): + iteration: int = 0 + event_timeout: float | None = 0.2 + + class QueueJumpChildEvent(BaseEvent): + iteration: int = 0 + event_timeout: float | None = 0.2 + + bus = EventBus( + name=f'QueueJump_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + middlewares=[], + ) + + parent_count = 0 + child_count = 0 + phase_counter = 0 + + async def child_handler(event: QueueJumpChildEvent) -> None: + nonlocal child_count + child_count += 1 + await asyncio.sleep(0.0005) + + async def parent_handler(event: QueueJumpParentEvent) -> None: + nonlocal parent_count + parent_count += 1 + child = bus.dispatch(QueueJumpChildEvent(iteration=event.iteration)) + await child + + bus.on(QueueJumpParentEvent, parent_handler) + bus.on(QueueJumpChildEvent, child_handler) + + def parent_factory() -> QueueJumpParentEvent: + nonlocal phase_counter + event = QueueJumpParentEvent(iteration=phase_counter) + phase_counter += 1 + return event + + try: + phase1 = await dispatch_and_measure(bus, parent_factory, total_events=500, batch_size=20) + phase2 = await dispatch_and_measure(bus, parent_factory, total_events=500, batch_size=20) + finally: + await bus.stop(timeout=0, clear=True) + + hard_floor = 60.0 + regression_floor = throughput_regression_floor(phase1[0], min_fraction=0.50, hard_floor=50.0) + + assert parent_count == 1_000 + assert child_count == 1_000 + assert phase1[0] >= hard_floor, ( + f'queue-jump throughput too low: {phase1[0]:.0f} events/sec (expected >= {hard_floor:.0f})' + ) + assert phase2[0] >= regression_floor, ( + f'queue-jump regression: phase1={phase1[0]:.0f} phase2={phase2[0]:.0f} ' + f'(required >= {regression_floor:.0f})' + ) + assert phase2[2] < 15.0 + assert phase2[4] < 120.0 + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'parallel_handlers', + [False, True], + ids=['serial_handlers', 'parallel_handlers'], +) +async def test_forwarding_chain_perf_matrix_by_mode(parallel_handlers: bool): + """ + Forwarding chain A -> B -> C throughput/latency matrix by mode. + """ + source_bus = EventBus( + name=f'ChainSource_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + max_history_size=120, + middlewares=[], + ) + middle_bus = EventBus( + name=f'ChainMiddle_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + max_history_size=120, + middlewares=[], + ) + sink_bus = EventBus( + name=f'ChainSink_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + max_history_size=120, + middlewares=[], + ) + + sink_count = 0 + + async def sink_handler(event: SimpleEvent) -> None: + nonlocal sink_count + sink_count += 1 + + async def forward_to_middle(event: BaseEvent[Any]) -> None: + while True: + try: + middle_bus.dispatch(event) + return + except asyncio.QueueFull: + await asyncio.sleep(0) + except RuntimeError as exc: + if 'EventBus at capacity' not in str(exc): + raise + await asyncio.sleep(0) + + async def forward_to_sink(event: BaseEvent[Any]) -> None: + while True: + try: + sink_bus.dispatch(event) + return + except asyncio.QueueFull: + await asyncio.sleep(0) + except RuntimeError as exc: + if 'EventBus at capacity' not in str(exc): + raise + await asyncio.sleep(0) + + source_bus.on('*', forward_to_middle) + middle_bus.on('*', forward_to_sink) + sink_bus.on(SimpleEvent, sink_handler) + + try: + phase1 = await dispatch_and_measure(source_bus, SimpleEvent, total_events=500, batch_size=5) + phase2 = await dispatch_and_measure(source_bus, SimpleEvent, total_events=500, batch_size=5) + await source_bus.wait_until_idle() + await middle_bus.wait_until_idle() + await sink_bus.wait_until_idle() + finally: + await source_bus.stop(timeout=0, clear=True) + await middle_bus.stop(timeout=0, clear=True) + await sink_bus.stop(timeout=0, clear=True) + + hard_floor = 35.0 + regression_floor = throughput_regression_floor(phase1[0], min_fraction=0.45, hard_floor=20.0) + + assert sink_count == 1_000 + assert phase1[0] >= hard_floor + assert phase2[0] >= regression_floor + assert phase2[2] < 40.0 + assert phase2[4] < 350.0 + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'parallel_handlers', + [False, True], + ids=['serial_handlers', 'parallel_handlers'], +) +async def test_timeout_churn_perf_matrix_by_mode(parallel_handlers: bool): + """ + Timeout-heavy phase followed by healthy phase should keep throughput healthy. + """ + class TimeoutChurnEvent(BaseEvent): + mode: str = 'slow' + iteration: int = 0 + event_timeout: float | None = 0.01 + + bus = EventBus( + name=f'TimeoutChurn_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + middlewares=[], + ) + + timeout_phase_events: list[TimeoutChurnEvent] = [] + recovery_phase_events: list[TimeoutChurnEvent] = [] + timeout_counter = 0 + recovery_counter = 0 + + async def handler(event: TimeoutChurnEvent) -> None: + if event.mode == 'slow': + await asyncio.sleep(0.006) + else: + await asyncio.sleep(0) + + bus.on(TimeoutChurnEvent, handler) + + def timeout_factory() -> TimeoutChurnEvent: + nonlocal timeout_counter + is_slow = (timeout_counter % 3) != 0 + event = TimeoutChurnEvent( + mode='slow' if is_slow else 'fast', + iteration=timeout_counter, + event_timeout=0.001 if is_slow else 0.02, + ) + timeout_phase_events.append(event) + timeout_counter += 1 + return event + + def recovery_factory() -> TimeoutChurnEvent: + nonlocal recovery_counter + event = TimeoutChurnEvent( + mode='fast', + iteration=10_000 + recovery_counter, + event_timeout=0.02, + ) + recovery_phase_events.append(event) + recovery_counter += 1 + return event + + try: + timeout_phase = await dispatch_and_measure(bus, timeout_factory, total_events=180, batch_size=20) + recovery_phase = await dispatch_and_measure(bus, recovery_factory, total_events=500, batch_size=25) + finally: + await bus.stop(timeout=0, clear=True) + + timeout_count = sum( + 1 + for event in timeout_phase_events + if event.mode == 'slow' + and any(isinstance(result.error, TimeoutError) for result in event.event_results.values()) + ) + recovery_errors = sum( + 1 + for event in recovery_phase_events + if any(result.error is not None for result in event.event_results.values()) + ) + hard_floor = 120.0 + regression_floor = throughput_regression_floor( + timeout_phase[0], + min_fraction=0.45, + hard_floor=100.0, + ) + + assert timeout_count > 0 + assert recovery_errors == 0 + assert recovery_phase[0] >= hard_floor + assert recovery_phase[0] >= regression_floor + assert recovery_phase[2] < 12.0 + assert recovery_phase[4] < 70.0 + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'parallel_handlers', + [False, True], + ids=['serial_handlers', 'parallel_handlers'], +) +async def test_memory_envelope_by_mode_for_capped_history(parallel_handlers: bool): + """ + Mode-specific memory slope/envelope check with capped history. + """ + bus = EventBus( + name=f'MemoryEnvelope_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + max_history_size=60, + middlewares=[], + ) + + async def handler(event: SimpleEvent) -> None: + return None + + bus.on(SimpleEvent, handler) + + gc.collect() + before_mb = get_memory_usage_mb() + + try: + metrics = await dispatch_and_measure(bus, SimpleEvent, total_events=6_000, batch_size=40) + done_mb = get_memory_usage_mb() + gc.collect() + gc_mb = get_memory_usage_mb() + retained = len(bus.event_history) + finally: + await bus.stop(timeout=0, clear=True) + + done_delta = done_mb - before_mb + gc_delta = gc_mb - before_mb + per_dispatched_kb = (max(done_delta, 0.0) * 1024.0) / 6_000 + per_retained_mb = max(gc_delta, 0.0) / max(retained, 1) + done_budget = 130.0 if parallel_handlers else 110.0 + gc_budget = 70.0 if parallel_handlers else 60.0 + + assert retained <= 60 + assert metrics[0] >= 450.0 + assert metrics[2] < 10.0 + assert metrics[4] < 60.0 + assert done_delta < done_budget + assert gc_delta < gc_budget + assert per_dispatched_kb < 32.0 + assert per_retained_mb < 1.5 + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'parallel_handlers', + [False, True], + ids=['serial_handlers', 'parallel_handlers'], +) +async def test_max_history_none_single_bus_stress_matrix(parallel_handlers: bool): + """ + Unlimited-history mode stress for single bus: throughput + memory envelope. + """ + bus = EventBus( + name=f'UnlimitedSingle_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + max_history_size=None, + middlewares=[], + ) + processed = 0 + + async def handler(event: SimpleEvent) -> None: + nonlocal processed + processed += 1 + + bus.on(SimpleEvent, handler) + + gc.collect() + before_mb = get_memory_usage_mb() + try: + phase1 = await dispatch_and_measure(bus, SimpleEvent, total_events=1_500, batch_size=120) + phase2 = await dispatch_and_measure(bus, SimpleEvent, total_events=1_500, batch_size=120) + done_mb = get_memory_usage_mb() + gc.collect() + gc_mb = get_memory_usage_mb() + history_size = len(bus.event_history) + finally: + await bus.stop(timeout=0, clear=True) + + done_delta = done_mb - before_mb + gc_delta = gc_mb - before_mb + per_event_mb = max(gc_delta, 0.0) / 3_000 + hard_floor = 220.0 + regression_floor = throughput_regression_floor(phase1[0], min_fraction=0.55, hard_floor=170.0) + + assert processed == 3_000 + assert history_size == 3_000 + assert phase1[0] >= hard_floor + assert phase2[0] >= regression_floor + assert phase2[2] < 12.0 + assert phase2[4] < 80.0 + assert done_delta < 260.0 + assert gc_delta < 220.0 + assert per_event_mb < 0.08 + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'parallel_handlers', + [False, True], + ids=['serial_handlers', 'parallel_handlers'], +) +async def test_max_history_none_forwarding_chain_stress_matrix(parallel_handlers: bool): + """ + Unlimited-history forwarding chain (A -> B -> C) stress by mode. + """ + source_bus = EventBus( + name=f'UnlimitedChainSource_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + max_history_size=None, + middlewares=[], + ) + middle_bus = EventBus( + name=f'UnlimitedChainMiddle_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + max_history_size=None, + middlewares=[], + ) + sink_bus = EventBus( + name=f'UnlimitedChainSink_{"parallel" if parallel_handlers else "serial"}', + parallel_handlers=parallel_handlers, + max_history_size=None, + middlewares=[], + ) + + sink_count = 0 + + async def sink_handler(event: SimpleEvent) -> None: + nonlocal sink_count + sink_count += 1 + + source_bus.on('*', middle_bus.dispatch) + middle_bus.on('*', sink_bus.dispatch) + sink_bus.on(SimpleEvent, sink_handler) + + gc.collect() + before_mb = get_memory_usage_mb() + try: + phase1 = await dispatch_and_measure(source_bus, SimpleEvent, total_events=900, batch_size=100) + phase2 = await dispatch_and_measure(source_bus, SimpleEvent, total_events=900, batch_size=100) + done_mb = get_memory_usage_mb() + gc.collect() + gc_mb = get_memory_usage_mb() + source_hist = len(source_bus.event_history) + middle_hist = len(middle_bus.event_history) + sink_hist = len(sink_bus.event_history) + finally: + await source_bus.stop(timeout=0, clear=True) + await middle_bus.stop(timeout=0, clear=True) + await sink_bus.stop(timeout=0, clear=True) + + gc_delta = gc_mb - before_mb + done_delta = done_mb - before_mb + hard_floor = 170.0 + regression_floor = throughput_regression_floor(phase1[0], min_fraction=0.55, hard_floor=130.0) + + assert sink_count == 1_800 + assert source_hist == 1_800 + assert middle_hist == 1_800 + assert sink_hist == 1_800 + assert phase1[0] >= hard_floor + assert phase2[0] >= regression_floor + assert phase2[2] < 15.0 + assert phase2[4] < 100.0 + assert done_delta < 320.0 + assert gc_delta < 280.0 + + +@pytest.mark.asyncio +@pytest.mark.skipif( + os.getenv('BUBUS_PERF_DEBUG') != '1', + reason='Set BUBUS_PERF_DEBUG=1 to enable hot-path timing diagnostics', +) +async def test_perf_debug_hot_path_breakdown() -> None: + """ + Debug-only perf test: + profiles key hot-path methods to confirm where time is spent before optimizing. + """ + profiler = MethodProfiler() + instrumented = [ + (service_module.ReentrantLock, '__aenter__'), + (service_module.ReentrantLock, '__aexit__'), + (service_module.EventBus, '_get_applicable_handlers'), + (service_module.EventBus, '_would_create_loop'), + (service_module.EventBus, '_execute_handlers'), + (service_module.EventBus, 'execute_handler'), + (service_module.EventBus, 'cleanup_event_history'), + (models_module.BaseEvent, 'event_create_pending_results'), + (models_module.BaseEvent, '_is_queued_on_any_bus'), + (models_module.BaseEvent, '_remove_self_from_queue'), + (models_module.BaseEvent, '_process_self_on_all_buses'), + ] + for owner, method_name in instrumented: + profiler.instrument(owner, method_name) + + class DebugParentEvent(BaseEvent): + idx: int = 0 + event_timeout: float | None = 0.2 + + class DebugChildEvent(BaseEvent): + idx: int = 0 + event_timeout: float | None = 0.2 + + bus_a = EventBus(name='PerfDebugA', middlewares=[]) + bus_b = EventBus(name='PerfDebugB', middlewares=[]) + + forwarded_simple_count = 0 + child_count = 0 + parent_counter = 0 + + async def forwarded_simple_handler(event: SimpleEvent) -> None: + nonlocal forwarded_simple_count + forwarded_simple_count += 1 + + async def child_handler(event: DebugChildEvent) -> None: + nonlocal child_count + child_count += 1 + await asyncio.sleep(0) + + async def parent_handler(event: DebugParentEvent) -> None: + child = bus_a.dispatch(DebugChildEvent(idx=event.idx)) + bus_b.dispatch(child) + await child + + bus_a.on('*', bus_b.dispatch) + bus_b.on(SimpleEvent, forwarded_simple_handler) + bus_a.on(DebugParentEvent, parent_handler) + bus_b.on(DebugChildEvent, child_handler) + + def parent_factory() -> DebugParentEvent: + nonlocal parent_counter + event = DebugParentEvent(idx=parent_counter) + parent_counter += 1 + return event + + gc.collect() + before_mb = get_memory_usage_mb() + start = time.perf_counter() + try: + simple_metrics = await dispatch_and_measure(bus_a, SimpleEvent, total_events=2_000, batch_size=50) + parent_metrics = await dispatch_and_measure(bus_a, parent_factory, total_events=600, batch_size=20) + await bus_a.wait_until_idle() + await bus_b.wait_until_idle() + finally: + await bus_a.stop(timeout=0, clear=True) + await bus_b.stop(timeout=0, clear=True) + profiler.restore() + elapsed = time.perf_counter() - start + done_mb = get_memory_usage_mb() + gc.collect() + gc_mb = get_memory_usage_mb() + + print('\n[perf-debug] scenario=global_fifo_forwarding_queue_jump') + print(f'[perf-debug] elapsed_s={elapsed:.3f}') + print( + '[perf-debug] simple throughput={:.0f}/s dispatch_p95={:.3f}ms done_p95={:.3f}ms'.format( + simple_metrics[0], simple_metrics[2], simple_metrics[4] + ) + ) + print( + '[perf-debug] queue_jump throughput={:.0f}/s dispatch_p95={:.3f}ms done_p95={:.3f}ms'.format( + parent_metrics[0], parent_metrics[2], parent_metrics[4] + ) + ) + print( + '[perf-debug] memory_mb before={:.1f} done={:.1f} gc={:.1f}'.format( + before_mb, done_mb, gc_mb + ) + ) + print(f'[perf-debug] forwarded_simple_count={forwarded_simple_count:,} child_count={child_count:,}') + print('[perf-debug] hot_path_top_total_time:') + for line in profiler.top_lines(limit=14): + print(f'[perf-debug] {line}') + + assert forwarded_simple_count == 2_000 + assert child_count == 600 From 45b5c6a085e812677868d1ff7efcd8cb9ce0964f Mon Sep 17 00:00:00 2001 From: Claude Date: Mon, 9 Feb 2026 18:59:56 +0000 Subject: [PATCH 71/79] Add retry() decorator with semaphore support and re-entrancy protection Standalone higher-order function / TC39 decorator that adds configurable retry logic and semaphore-based concurrency limiting to any async function. Works independently of the event bus (on plain functions, class methods, or event handlers). Features: - max_attempts, retry_after, retry_backoff_factor, retry_on_errors, timeout - Global semaphore registry (semaphore_limit, semaphore_name, semaphore_lax) - AsyncLocalStorage-based re-entrancy tracking to prevent deadlocks when nested/recursive calls share the same semaphore - 30 tests covering retry logic, backoff, error filtering, timeouts, semaphore concurrency, re-entrancy, and event bus integration https://claude.ai/code/session_01TyuqFQFwDXa4h5QzQDCUsv --- bubus-ts/README.md | 153 ++++++++++ bubus-ts/src/index.ts | 2 + bubus-ts/src/retry.ts | 323 ++++++++++++++++++++ bubus-ts/tests/retry.test.ts | 558 +++++++++++++++++++++++++++++++++++ 4 files changed, 1036 insertions(+) create mode 100644 bubus-ts/src/retry.ts create mode 100644 bubus-ts/tests/retry.test.ts diff --git a/bubus-ts/README.md b/bubus-ts/README.md index 006f384..dd44855 100644 --- a/bubus-ts/README.md +++ b/bubus-ts/README.md @@ -343,3 +343,156 @@ The core contract is preserved: But the **implementation details are different** because JS needs browser compatibility and lacks Python's contextvars + asyncio primitives. The `LockManager` (runloop pause + semaphore coordination), `HandlerLock` (yield-and-reacquire), and `BusScopedEvent` proxy are the key differences that make the behavior match in practice. + +--- + +## `retry()` Decorator + +`retry()` is a standalone higher-order function / decorator that adds retry logic and optional semaphore-based +concurrency limiting to any async function. It works independently of the event bus β€” you can use it on plain +functions, class methods, or event bus handlers. + +### Basic usage + +```ts +import { retry } from 'bubus' + +// Higher-order function wrapper (works on any function) +const fetchWithRetry = retry({ max_attempts: 3, retry_after: 1 })(async (url: string) => { + const res = await fetch(url) + if (!res.ok) throw new Error(`HTTP ${res.status}`) + return res.json() +}) + +// On an event bus handler +bus.on(MyEvent, retry({ max_attempts: 3, timeout: 10 })(async (event) => { + await riskyOperation(event.data) +})) + +// On a class method (manual wrapping pattern) +class ApiClient { + fetchData = retry({ max_attempts: 3, retry_after: 0.5 })(async function (this: ApiClient) { + return await this.doRequest() + }) +} +``` + +### Options + +| Option | Type | Default | Description | +|--------|------|---------|-------------| +| `max_attempts` | `number` | `1` | Total attempts including the initial call. `1` = no retry, `3` = up to 2 retries. | +| `retry_after` | `number` | `0` | Seconds to wait between retries. | +| `retry_backoff_factor` | `number` | `1.0` | Multiplier applied to `retry_after` after each attempt. `2.0` = exponential backoff. | +| `retry_on_errors` | `ErrorClass[]` | `undefined` | Only retry when the error is an `instanceof` one of these classes. `undefined` = retry on any error. | +| `timeout` | `number \| null` | `undefined` | Per-attempt timeout in seconds. Throws `RetryTimeoutError` if exceeded. | +| `semaphore_limit` | `number \| null` | `undefined` | Max concurrent executions sharing this semaphore. | +| `semaphore_name` | `string \| null` | fn name | Semaphore identifier. Functions with the same name share the same slot pool. | +| `semaphore_lax` | `boolean` | `true` | If `true`, proceed without concurrency limit when semaphore acquisition times out. | +| `semaphore_timeout` | `number \| null` | `undefined` | Max seconds to wait for semaphore. Default: `timeout * max(1, limit - 1)`. | + +### Error types + +- **`RetryTimeoutError`** β€” thrown when a single attempt exceeds `timeout`. Has `.timeout_seconds` and `.attempt` fields. Retryable by default (treated like any other error in the retry loop). +- **`SemaphoreTimeoutError`** β€” thrown (when `semaphore_lax=false`) if the semaphore cannot be acquired within the timeout. Has `.semaphore_name`, `.semaphore_limit`, `.timeout_seconds` fields. + +### Semaphore concurrency control + +The semaphore is acquired **once** before the first attempt and held across all retries. This prevents other +callers from stealing the slot between retry attempts. + +```ts +// At most 3 concurrent calls to this function across the entire process +const limited = retry({ + max_attempts: 2, + semaphore_limit: 3, + semaphore_name: 'api_calls', +})(async () => { + await callExternalApi() +}) +``` + +Functions that share a `semaphore_name` share the same slot pool β€” this is how you limit concurrency across +different functions that access the same resource. + +### Re-entrancy and deadlock prevention + +The decorator uses `AsyncLocalStorage` (on Node.js) to track which semaphores are held in the current async +call stack. When a nested call encounters a semaphore it already holds, it **skips acquisition** and runs +directly within the parent's slot. This prevents deadlocks in recursive or nested scenarios: + +```ts +const inner = retry({ semaphore_limit: 1, semaphore_name: 'shared' })(async () => 'ok') + +const outer = retry({ semaphore_limit: 1, semaphore_name: 'shared' })(async () => { + // Without re-entrancy tracking, this would deadlock: + // outer holds the semaphore, inner tries to acquire the same one. + // With re-entrancy, inner detects 'shared' is already held and skips acquisition. + return await inner() +}) + +await outer() // works, no deadlock +``` + +This also works for recursive calls (a function calling itself) and deeply nested chains (A β†’ B β†’ C all sharing +a semaphore). + +In browsers (no `AsyncLocalStorage`), re-entrancy tracking is unavailable. Avoid recursive/nested calls through +the same semaphore in browser environments, or use different `semaphore_name` values. + +### Interaction with `event_concurrency` and `event_handler_concurrency` + +`retry()` and the bus's concurrency modes are **orthogonal** and compose together: + +- **`event_concurrency`** controls how many events the bus processes at once (via the runloop + event semaphore). +- **`event_handler_concurrency`** controls how many handlers run concurrently for a single event (via the handler semaphore). +- **`retry()` semaphores** control how many concurrent invocations of a specific function are allowed (via a global semaphore registry). + +When you wrap an event handler with `retry()`, both layers apply: + +```ts +// Bus enforces bus-serial handler ordering (default). +// retry() additionally limits this specific handler to 2 concurrent invocations +// and retries up to 3 times on failure. +bus.on( + MyEvent, + retry({ max_attempts: 3, semaphore_limit: 2, semaphore_name: 'my_handler' })( + async (event) => { await doWork(event) } + ) +) +``` + +The execution order is: +1. Bus acquires the **handler concurrency semaphore** (e.g. `bus-serial`) +2. `retry()` acquires its own **retry semaphore** (if `semaphore_limit` is set) +3. The handler function runs (with retries if it throws) +4. `retry()` releases its semaphore +5. Bus releases the handler concurrency semaphore + +The bus's `handler_timeout` and `retry()`'s `timeout` are independent: +- `handler_timeout` (set via `bus.on()` options or bus defaults) applies to the **entire** wrapped handler call, including all retry attempts. +- `retry({ timeout })` applies to **each individual attempt**. + +If you need per-attempt timeouts, use `retry({ timeout })`. If you need an overall deadline for the handler +(including all retries), rely on the bus's `handler_timeout`. + +### Differences from the Python `@retry` decorator + +| Aspect | Python | TypeScript | +|--------|--------|------------| +| **Naming** | `retries=3` (retry count after first attempt) | `max_attempts=1` (total attempts including first) | +| **Naming** | `wait=3` (seconds between retries) | `retry_after=0` (seconds between retries) | +| **Naming** | `retry_on` | `retry_on_errors` | +| **Default retries** | 3 retries (4 total attempts) | 1 attempt (no retries) | +| **Default delay** | 3 seconds | 0 seconds | +| **Default timeout** | 5 seconds per attempt | No timeout | +| **Semaphore scopes** | `'global'`, `'class'`, `'self'`, `'multiprocess'` | Global only (by `semaphore_name`) | +| **Multiprocess** | Supported via `portalocker` file locks | Not supported (single-process JS runtime) | +| **System overload** | Tracks active operations, checks CPU/memory via `psutil` | Not implemented | +| **Re-entrancy** | Not implemented (relies on Python's GIL + asyncio single-thread) | `AsyncLocalStorage`-based tracking to prevent deadlocks | +| **Syntax** | `@retry(...)` decorator on `async def` | `retry({...})(fn)` HOF or `@retry({...})` on class methods (TC39 Stage 3) | +| **Sync functions** | Not supported (async-only) | Supported (wrapper always returns a Promise) | + +The TS version intentionally starts with conservative defaults (1 attempt, no delay, no timeout) so that +`retry()` with no options is a no-op wrapper. The Python version defaults to 3 retries with 3s delay and 5s +timeout, which is more aggressive. diff --git a/bubus-ts/src/index.ts b/bubus-ts/src/index.ts index 5021eaf..ed57151 100644 --- a/bubus-ts/src/index.ts +++ b/bubus-ts/src/index.ts @@ -9,3 +9,5 @@ export { } from './event_handler.js' export type { ConcurrencyMode, EventBusInterfaceForLockManager } from './lock_manager.js' export type { EventClass, EventHandlerFunction as EventHandler, EventKey, EventStatus, FindOptions, FindWindow } from './types.js' +export { retry, clearSemaphoreRegistry, RetryTimeoutError, SemaphoreTimeoutError } from './retry.js' +export type { RetryOptions } from './retry.js' diff --git a/bubus-ts/src/retry.ts b/bubus-ts/src/retry.ts new file mode 100644 index 0000000..44fdacc --- /dev/null +++ b/bubus-ts/src/retry.ts @@ -0,0 +1,323 @@ +import { AsyncSemaphore } from './lock_manager.js' + +// ─── Types ─────────────────────────────────────────────────────────────────── + +export interface RetryOptions { + /** Total number of attempts including the initial call (1 = no retry, 3 = up to 2 retries). Default: 1 */ + max_attempts?: number + + /** Seconds to wait between retries. Default: 0 */ + retry_after?: number + + /** Multiplier applied to retry_after after each attempt for exponential backoff. Default: 1.0 (constant delay) */ + retry_backoff_factor?: number + + /** Only retry when the thrown error is an instance of one of these classes. Default: undefined (retry on any error) */ + retry_on_errors?: Array Error> + + /** Per-attempt timeout in seconds. Default: undefined (no per-attempt timeout) */ + timeout?: number | null + + /** Maximum concurrent executions sharing this semaphore. Default: undefined (no concurrency limit) */ + semaphore_limit?: number | null + + /** Semaphore identifier. Functions with the same name share the same concurrency slot pool. Default: function name */ + semaphore_name?: string | null + + /** If true, proceed without concurrency limit when semaphore acquisition times out. Default: true */ + semaphore_lax?: boolean + + /** Maximum seconds to wait for semaphore acquisition. Default: undefined β†’ timeout * max(1, limit - 1) */ + semaphore_timeout?: number | null +} + +// ─── Errors ────────────────────────────────────────────────────────────────── + +/** Thrown when a single attempt exceeds the per-attempt timeout. */ +export class RetryTimeoutError extends Error { + timeout_seconds: number + attempt: number + + constructor(message: string, params: { timeout_seconds: number; attempt: number }) { + super(message) + this.name = 'RetryTimeoutError' + this.timeout_seconds = params.timeout_seconds + this.attempt = params.attempt + } +} + +/** Thrown (when semaphore_lax=false) if the semaphore cannot be acquired within the timeout. */ +export class SemaphoreTimeoutError extends Error { + semaphore_name: string + semaphore_limit: number + timeout_seconds: number + + constructor(message: string, params: { semaphore_name: string; semaphore_limit: number; timeout_seconds: number }) { + super(message) + this.name = 'SemaphoreTimeoutError' + this.semaphore_name = params.semaphore_name + this.semaphore_limit = params.semaphore_limit + this.timeout_seconds = params.timeout_seconds + } +} + +// ─── Re-entrancy tracking via AsyncLocalStorage ────────────────────────────── +// +// Prevents deadlocks when a retry()-wrapped function calls another retry()-wrapped +// function that shares the same semaphore (or calls itself recursively). +// +// Each async call stack tracks which semaphore names it currently holds. When a +// nested call encounters a semaphore it already holds, it skips acquisition and +// runs directly within the parent's slot. + +type ReentrantStore = Set + +type AsyncLocalStorageLike = { + getStore(): ReentrantStore | undefined + run(store: ReentrantStore, callback: () => T): T +} + +let retry_context_storage: AsyncLocalStorageLike | null = null + +declare const process: { versions?: { node?: string } } | undefined +const is_node = typeof process !== 'undefined' && typeof process.versions !== 'undefined' && typeof process.versions?.node === 'string' + +if (is_node) { + try { + const importer = new Function('specifier', 'return import(specifier)') as ( + specifier: string + ) => Promise<{ AsyncLocalStorage?: new () => AsyncLocalStorageLike }> + const mod = await importer('node:async_hooks') + if (mod?.AsyncLocalStorage) { + retry_context_storage = new mod.AsyncLocalStorage() + } + } catch { + retry_context_storage = null + } +} + +function getHeldSemaphores(): ReentrantStore { + return retry_context_storage?.getStore() ?? new Set() +} + +function runWithHeldSemaphores(held: ReentrantStore, fn: () => T): T { + if (!retry_context_storage) return fn() + return retry_context_storage.run(held, fn) +} + +// ─── Global semaphore registry ─────────────────────────────────────────────── + +const SEMAPHORE_REGISTRY = new Map() + +function getOrCreateSemaphore(name: string, limit: number): AsyncSemaphore { + const existing = SEMAPHORE_REGISTRY.get(name) + if (existing && existing.size === limit) return existing + const sem = new AsyncSemaphore(limit) + SEMAPHORE_REGISTRY.set(name, sem) + return sem +} + +/** Reset the global semaphore registry. Useful in tests. */ +export function clearSemaphoreRegistry(): void { + SEMAPHORE_REGISTRY.clear() +} + +// ─── retry() decorator / higher-order wrapper ──────────────────────────────── +// +// Usage as a higher-order function (works on any async function): +// +// const fetchWithRetry = retry({ max_attempts: 3, retry_after: 1 })(async (url: string) => { +// return await fetch(url) +// }) +// +// Usage as a TC39 Stage 3 decorator on class methods (TS 5.0+): +// +// class ApiClient { +// @retry({ max_attempts: 3, retry_after: 1 }) +// async fetchData(): Promise { ... } +// } +// +// Usage on event bus handlers: +// +// bus.on(MyEvent, retry({ max_attempts: 3 })(async (event) => { +// await riskyOperation(event.data) +// })) + +export function retry(options: RetryOptions = {}) { + const { + max_attempts = 1, + retry_after = 0, + retry_backoff_factor = 1.0, + retry_on_errors, + timeout, + semaphore_limit, + semaphore_name: semaphore_name_option, + semaphore_lax = true, + semaphore_timeout, + } = options + + return function decorator any>(target: T, _context?: ClassMethodDecoratorContext): T { + const fn_name = target.name || (_context?.name as string) || 'anonymous' + const sem_name = semaphore_name_option ?? fn_name + const effective_max_attempts = Math.max(1, max_attempts) + const effective_retry_after = Math.max(0, retry_after) + + async function retryWrapper(this: any, ...args: any[]): Promise { + // ── Check re-entrancy: skip semaphore if we already hold it in this async context ── + const held = getHeldSemaphores() + const needs_semaphore = semaphore_limit != null && semaphore_limit > 0 + const is_reentrant = needs_semaphore && held.has(sem_name) + + // ── Semaphore acquisition (held across all retry attempts, skipped if re-entrant) ── + let semaphore: AsyncSemaphore | null = null + let semaphore_acquired = false + + if (needs_semaphore && !is_reentrant) { + semaphore = getOrCreateSemaphore(sem_name, semaphore_limit!) + + const effective_sem_timeout = + semaphore_timeout != null + ? semaphore_timeout + : timeout != null + ? timeout * Math.max(1, semaphore_limit! - 1) + : null + + if (effective_sem_timeout != null && effective_sem_timeout > 0) { + semaphore_acquired = await acquireWithTimeout(semaphore, effective_sem_timeout * 1000) + if (!semaphore_acquired) { + if (!semaphore_lax) { + throw new SemaphoreTimeoutError( + `Failed to acquire semaphore "${sem_name}" within ${effective_sem_timeout}s (limit=${semaphore_limit})`, + { semaphore_name: sem_name, semaphore_limit: semaphore_limit!, timeout_seconds: effective_sem_timeout } + ) + } + // lax mode: proceed without concurrency limit + } + } else { + // No timeout configured: wait indefinitely for a slot + await semaphore.acquire() + semaphore_acquired = true + } + } + + // ── Build the set of held semaphores for nested calls ── + const new_held = new Set(held) + if (semaphore_acquired) { + new_held.add(sem_name) + } + + // ── Retry loop (runs inside the semaphore and re-entrancy context) ── + const run_retry_loop = async (): Promise => { + for (let attempt = 1; attempt <= effective_max_attempts; attempt++) { + try { + if (timeout != null && timeout > 0) { + return await withTimeout(() => Promise.resolve(target.apply(this, args)), timeout * 1000, attempt) + } else { + return await Promise.resolve(target.apply(this, args)) + } + } catch (error) { + // Check if this error type should trigger a retry + if (retry_on_errors && retry_on_errors.length > 0) { + const is_retryable = retry_on_errors.some((ErrorClass) => error instanceof ErrorClass) + if (!is_retryable) throw error + } + + // Last attempt: rethrow + if (attempt >= effective_max_attempts) throw error + + // Wait before next attempt with exponential backoff + const delay_seconds = effective_retry_after * Math.pow(retry_backoff_factor, attempt - 1) + if (delay_seconds > 0) { + await sleep(delay_seconds * 1000) + } + } + } + + // Unreachable, but satisfies the type checker + throw new Error(`retry(${fn_name}): unexpected end of retry loop`) + } + + try { + return await runWithHeldSemaphores(new_held, run_retry_loop) + } finally { + if (semaphore_acquired && semaphore) { + semaphore.release() + } + } + } + + Object.defineProperty(retryWrapper, 'name', { value: fn_name, configurable: true }) + return retryWrapper as unknown as T + } +} + +// ─── Internal helpers ──────────────────────────────────────────────────────── + +/** + * Try to acquire a semaphore within a timeout. Returns true if acquired, false if timed out. + * If the semaphore is acquired after the timeout (due to the waiter remaining queued), + * it is immediately released to avoid leaking slots. + */ +async function acquireWithTimeout(semaphore: AsyncSemaphore, timeout_ms: number): Promise { + return new Promise((resolve) => { + let settled = false + + const timer = setTimeout(() => { + if (!settled) { + settled = true + resolve(false) + } + }, timeout_ms) + + semaphore.acquire().then(() => { + if (!settled) { + settled = true + clearTimeout(timer) + resolve(true) + } else { + // Acquired after timeout fired β€” release immediately to avoid slot leak + semaphore.release() + } + }) + }) +} + +/** Run fn() with a timeout. Rejects with RetryTimeoutError if the timeout fires first. */ +async function withTimeout(fn: () => Promise, timeout_ms: number, attempt: number): Promise { + return new Promise((resolve, reject) => { + let settled = false + + const timer = setTimeout(() => { + if (!settled) { + settled = true + reject( + new RetryTimeoutError(`Timed out after ${timeout_ms / 1000}s (attempt ${attempt})`, { + timeout_seconds: timeout_ms / 1000, + attempt, + }) + ) + } + }, timeout_ms) + + fn().then( + (value) => { + if (!settled) { + settled = true + clearTimeout(timer) + resolve(value) + } + }, + (error) => { + if (!settled) { + settled = true + clearTimeout(timer) + reject(error) + } + } + ) + }) +} + +function sleep(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)) +} diff --git a/bubus-ts/tests/retry.test.ts b/bubus-ts/tests/retry.test.ts new file mode 100644 index 0000000..e0a249b --- /dev/null +++ b/bubus-ts/tests/retry.test.ts @@ -0,0 +1,558 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { + BaseEvent, + EventBus, + retry, + clearSemaphoreRegistry, + RetryTimeoutError, + SemaphoreTimeoutError, +} from '../src/index.js' + +const delay = (ms: number): Promise => new Promise((resolve) => setTimeout(resolve, ms)) + +// ─── Basic retry behavior ──────────────────────────────────────────────────── + +test('retry: function succeeds on first attempt with no retries needed', async () => { + const fn = retry({ max_attempts: 3 })(async () => 'ok') + assert.equal(await fn(), 'ok') +}) + +test('retry: function retries on failure and eventually succeeds', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3 })(async () => { + calls++ + if (calls < 3) throw new Error(`fail ${calls}`) + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 3) +}) + +test('retry: throws after exhausting all attempts', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3 })(async () => { + calls++ + throw new Error('always fails') + }) + await assert.rejects(fn, { message: 'always fails' }) + assert.equal(calls, 3) +}) + +test('retry: max_attempts=1 means no retries (single attempt)', async () => { + let calls = 0 + const fn = retry({ max_attempts: 1 })(async () => { + calls++ + throw new Error('fail') + }) + await assert.rejects(fn, { message: 'fail' }) + assert.equal(calls, 1) +}) + +test('retry: default max_attempts=1 means single attempt', async () => { + let calls = 0 + const fn = retry()(async () => { + calls++ + throw new Error('fail') + }) + await assert.rejects(fn, { message: 'fail' }) + assert.equal(calls, 1) +}) + +// ─── retry_after delay ─────────────────────────────────────────────────────── + +test('retry: retry_after introduces delay between attempts', async () => { + let calls = 0 + const timestamps: number[] = [] + const fn = retry({ max_attempts: 3, retry_after: 0.05 })(async () => { + calls++ + timestamps.push(performance.now()) + if (calls < 3) throw new Error('fail') + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 3) + + // Check that delays were at least ~50ms between attempts + const gap1 = timestamps[1] - timestamps[0] + const gap2 = timestamps[2] - timestamps[1] + assert.ok(gap1 >= 40, `expected >=40ms gap, got ${gap1.toFixed(1)}ms`) + assert.ok(gap2 >= 40, `expected >=40ms gap, got ${gap2.toFixed(1)}ms`) +}) + +// ─── Exponential backoff ───────────────────────────────────────────────────── + +test('retry: retry_backoff_factor increases delay between attempts', async () => { + let calls = 0 + const timestamps: number[] = [] + const fn = retry({ max_attempts: 4, retry_after: 0.03, retry_backoff_factor: 2.0 })(async () => { + calls++ + timestamps.push(performance.now()) + if (calls < 4) throw new Error('fail') + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 4) + + // Delays: 30ms, 60ms, 120ms (0.03 * 2^0, 0.03 * 2^1, 0.03 * 2^2) + const gap1 = timestamps[1] - timestamps[0] + const gap2 = timestamps[2] - timestamps[1] + const gap3 = timestamps[3] - timestamps[2] + + assert.ok(gap1 >= 20, `gap1=${gap1.toFixed(1)}ms, expected >=20ms`) + assert.ok(gap2 >= 45, `gap2=${gap2.toFixed(1)}ms, expected >=45ms (should be ~60ms)`) + assert.ok(gap3 >= 90, `gap3=${gap3.toFixed(1)}ms, expected >=90ms (should be ~120ms)`) + // Verify backoff is actually increasing + assert.ok(gap2 > gap1, 'gap2 should be larger than gap1') + assert.ok(gap3 > gap2, 'gap3 should be larger than gap2') +}) + +// ─── retry_on_errors filtering ─────────────────────────────────────────────── + +class NetworkError extends Error { + constructor(message: string = 'network error') { + super(message) + this.name = 'NetworkError' + } +} + +class ValidationError extends Error { + constructor(message: string = 'validation error') { + super(message) + this.name = 'ValidationError' + } +} + +test('retry: retry_on_errors retries only matching error types', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3, retry_on_errors: [NetworkError] })(async () => { + calls++ + if (calls < 3) throw new NetworkError() + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 3) +}) + +test('retry: retry_on_errors does not retry non-matching errors', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3, retry_on_errors: [NetworkError] })(async () => { + calls++ + throw new ValidationError() + }) + await assert.rejects(fn, { name: 'ValidationError' }) + // Should have thrown immediately without retrying + assert.equal(calls, 1) +}) + +test('retry: retry_on_errors with multiple error types', async () => { + let calls = 0 + const fn = retry({ max_attempts: 5, retry_on_errors: [NetworkError, TypeError] })(async () => { + calls++ + if (calls === 1) throw new NetworkError() + if (calls === 2) throw new TypeError('type error') + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 3) +}) + +// ─── Per-attempt timeout ───────────────────────────────────────────────────── + +test('retry: timeout triggers RetryTimeoutError on slow attempts', async () => { + let calls = 0 + const fn = retry({ max_attempts: 1, timeout: 0.05 })(async () => { + calls++ + await delay(200) + return 'ok' + }) + await assert.rejects(fn, (error: unknown) => { + assert.ok(error instanceof RetryTimeoutError) + assert.equal(error.attempt, 1) + return true + }) + assert.equal(calls, 1) +}) + +test('retry: timeout allows fast attempts to succeed', async () => { + const fn = retry({ max_attempts: 1, timeout: 1 })(async () => { + await delay(5) + return 'fast' + }) + assert.equal(await fn(), 'fast') +}) + +test('retry: timed-out attempts are retried when max_attempts > 1', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3, timeout: 0.05 })(async () => { + calls++ + if (calls < 3) { + await delay(200) // will timeout + return 'slow' + } + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 3) +}) + +// ─── Semaphore concurrency control ────────────────────────────────────────── + +test('retry: semaphore_limit controls max concurrent executions', async (t) => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + const fn = retry({ max_attempts: 1, semaphore_limit: 2, semaphore_name: 'test_sem_limit' })(async () => { + active++ + max_active = Math.max(max_active, active) + await delay(50) + active-- + }) + + // Launch 6 concurrent calls β€” should only run 2 at a time + await Promise.all([fn(), fn(), fn(), fn(), fn(), fn()]) + assert.equal(max_active, 2, 'should never exceed semaphore_limit=2') +}) + +test('retry: semaphore_lax=false throws SemaphoreTimeoutError when slots are full', async () => { + clearSemaphoreRegistry() + + const fn = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_name: 'test_sem_lax_false', + semaphore_lax: false, + semaphore_timeout: 0.05, + })(async () => { + await delay(200) // hold the semaphore for a while + return 'ok' + }) + + // Start one call to grab the semaphore + const first = fn() + + // Give the first call time to acquire the semaphore + await delay(10) + + // Second call should timeout trying to acquire semaphore + await assert.rejects( + fn(), + (error: unknown) => { + assert.ok(error instanceof SemaphoreTimeoutError) + assert.equal(error.semaphore_name, 'test_sem_lax_false') + return true + } + ) + + // Let the first call finish + assert.equal(await first, 'ok') +}) + +test('retry: semaphore_lax=true (default) proceeds without semaphore on timeout', async () => { + clearSemaphoreRegistry() + + let calls = 0 + const fn = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_name: 'test_sem_lax_true', + semaphore_lax: true, + semaphore_timeout: 0.05, + })(async () => { + calls++ + await delay(200) + return 'ok' + }) + + // Start first call to grab the semaphore + const first = fn() + await delay(10) + + // Second call should proceed anyway (lax mode) + const second = fn() + const results = await Promise.all([first, second]) + assert.deepEqual(results, ['ok', 'ok']) + assert.equal(calls, 2) +}) + +// ─── Preserves function metadata ───────────────────────────────────────────── + +test('retry: preserves function name', () => { + async function myNamedFunction(): Promise { + return 'ok' + } + const wrapped = retry()(myNamedFunction) + assert.equal(wrapped.name, 'myNamedFunction') +}) + +// ─── Preserves `this` context ──────────────────────────────────────────────── + +test('retry: preserves this context for methods', async () => { + class MyService { + value = 42 + fetch = retry({ max_attempts: 2 })(async function (this: MyService) { + return this.value + }) + } + + const svc = new MyService() + assert.equal(await svc.fetch(), 42) +}) + +// ─── Works with synchronous functions ──────────────────────────────────────── + +test('retry: wraps sync functions (result becomes a promise)', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3 })(() => { + calls++ + if (calls < 2) throw new Error('sync fail') + return 'sync ok' + }) + assert.equal(await fn(), 'sync ok') + assert.equal(calls, 2) +}) + +// ─── Integration with EventBus ─────────────────────────────────────────────── + +test('retry: works as event bus handler wrapper', async () => { + const bus = new EventBus('RetryBus', { event_timeout: null }) + const TestEvent = BaseEvent.extend('TestEvent', {}) + + let calls = 0 + bus.on( + TestEvent, + retry({ max_attempts: 3 })(async (_event) => { + calls++ + if (calls < 3) throw new Error(`handler fail ${calls}`) + return 'handler ok' + }) + ) + + const event = bus.dispatch(TestEvent({})) + await event.done() + + assert.equal(calls, 3) + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.equal(result.result, 'handler ok') +}) + +test('retry: bus handler with retry_on_errors only retries matching errors', async () => { + const bus = new EventBus('RetryFilterBus', { event_timeout: null }) + const TestEvent = BaseEvent.extend('TestEvent', {}) + + let calls = 0 + bus.on( + TestEvent, + retry({ max_attempts: 3, retry_on_errors: [NetworkError] })(async (_event) => { + calls++ + throw new ValidationError() + }) + ) + + const event = bus.dispatch(TestEvent({})) + await event.done() + + // Should have failed immediately without retrying + assert.equal(calls, 1) + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'error') +}) + +// ─── Edge cases ────────────────────────────────────────────────────────────── + +test('retry: max_attempts=0 is treated as 1 (minimum)', async () => { + let calls = 0 + const fn = retry({ max_attempts: 0 })(async () => { + calls++ + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 1) +}) + +test('retry: passes arguments through to wrapped function', async () => { + const fn = retry({ max_attempts: 1 })(async (a: number, b: string) => `${a}-${b}`) + assert.equal(await fn(1, 'hello'), '1-hello') +}) + +test('retry: semaphore is held across all retry attempts', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + let total_calls = 0 + + const fn = retry({ + max_attempts: 3, + semaphore_limit: 1, + semaphore_name: 'test_sem_across_retries', + })(async () => { + active++ + max_active = Math.max(max_active, active) + total_calls++ + await delay(10) + active-- + // Odd calls fail, even calls succeed β€” each invocation needs 2 attempts + if (total_calls % 2 === 1) throw new Error('fail') + return 'ok' + }) + + // Run 3 calls concurrently β€” they should run serially because semaphore_limit=1 + // The semaphore should be held across retries, so only 1 active at a time + const results = await Promise.all([fn(), fn(), fn()]) + assert.equal(max_active, 1, 'semaphore should enforce serial execution even during retries') + assert.deepEqual(results, ['ok', 'ok', 'ok']) + assert.equal(total_calls, 6, 'each of 3 calls should have taken 2 attempts') +}) + +test('retry: semaphore released even when all attempts fail', async () => { + clearSemaphoreRegistry() + + const fn = retry({ + max_attempts: 2, + semaphore_limit: 1, + semaphore_name: 'test_sem_release_on_fail', + })(async () => { + throw new Error('always fails') + }) + + // First call fails, should release semaphore + await assert.rejects(fn) + + // Second call should be able to acquire the semaphore (not deadlocked) + await assert.rejects(fn) +}) + +// ─── TC39 decorator syntax on class methods ────────────────────────────────── + +test('retry: works on class method via manual wrapping pattern', async () => { + // Since TC39 Stage 3 decorators require experimentalDecorators or TS 5.0+ native support, + // we test the equivalent pattern: applying retry() to a method post-definition. + class ApiClient { + base_url = 'https://example.com' + calls = 0 + + fetchData = retry({ max_attempts: 3 })(async function (this: ApiClient) { + this.calls++ + if (this.calls < 3) throw new Error('api error') + return `data from ${this.base_url}` + }) + } + + const client = new ApiClient() + assert.equal(await client.fetchData(), 'data from https://example.com') + assert.equal(client.calls, 3) +}) + +// ─── Re-entrancy / deadlock prevention ─────────────────────────────────────── + +test('retry: re-entrant call on same semaphore does not deadlock', async () => { + clearSemaphoreRegistry() + + const inner = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_name: 'shared_sem', + })(async () => { + return 'inner ok' + }) + + const outer = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_name: 'shared_sem', + })(async () => { + // This would deadlock without re-entrancy tracking: + // outer holds the semaphore, inner tries to acquire the same one + const result = await inner() + return `outer got: ${result}` + }) + + assert.equal(await outer(), 'outer got: inner ok') +}) + +test('retry: recursive function with semaphore does not deadlock', async () => { + clearSemaphoreRegistry() + + let depth = 0 + const recurse: (n: number) => Promise = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_name: 'recursive_sem', + })(async (n: number): Promise => { + depth++ + if (n <= 1) return 1 + return n + (await recurse(n - 1)) + }) + + const result = await recurse(5) + assert.equal(result, 15) // 5 + 4 + 3 + 2 + 1 + assert.equal(depth, 5) +}) + +test('retry: different semaphore names do not interfere with re-entrancy', async () => { + clearSemaphoreRegistry() + + let inner_active = 0 + let inner_max_active = 0 + + const inner = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_name: 'inner_sem', + })(async () => { + inner_active++ + inner_max_active = Math.max(inner_max_active, inner_active) + await delay(20) + inner_active-- + return 'inner ok' + }) + + const outer = retry({ + max_attempts: 1, + semaphore_limit: 2, + semaphore_name: 'outer_sem', + })(async () => { + return await inner() + }) + + // Run 3 outer calls concurrently + // outer_sem allows 2 concurrent, but inner_sem only allows 1 + const results = await Promise.all([outer(), outer(), outer()]) + assert.deepEqual(results, ['inner ok', 'inner ok', 'inner ok']) + assert.equal(inner_max_active, 1, 'inner semaphore should still enforce limit=1') +}) + +test('retry: three-level nested re-entrancy does not deadlock', async () => { + clearSemaphoreRegistry() + + const level3 = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_name: 'nested_sem', + })(async () => 'level3') + + const level2 = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_name: 'nested_sem', + })(async () => { + const r = await level3() + return `level2>${r}` + }) + + const level1 = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_name: 'nested_sem', + })(async () => { + const r = await level2() + return `level1>${r}` + }) + + assert.equal(await level1(), 'level1>level2>level3') +}) From 8be74231942c5ab51fa3aff0fb8541a3b14e1655 Mon Sep 17 00:00:00 2001 From: Claude Date: Mon, 9 Feb 2026 19:04:26 +0000 Subject: [PATCH 72/79] Use shared async_context.ts polyfill for retry re-entrancy tracking Switch from a separate node:async_hooks import to the existing createAsyncLocalStorage() factory from async_context.ts. This ensures browser compatibility by gracefully degrading to a no-op when AsyncLocalStorage is unavailable. https://claude.ai/code/session_01TyuqFQFwDXa4h5QzQDCUsv --- bubus-ts/README.md | 5 +++-- bubus-ts/src/async_context.ts | 18 +++++++++++++++--- bubus-ts/src/retry.ts | 32 ++++++++------------------------ 3 files changed, 26 insertions(+), 29 deletions(-) diff --git a/bubus-ts/README.md b/bubus-ts/README.md index dd44855..6d04985 100644 --- a/bubus-ts/README.md +++ b/bubus-ts/README.md @@ -437,8 +437,9 @@ await outer() // works, no deadlock This also works for recursive calls (a function calling itself) and deeply nested chains (A β†’ B β†’ C all sharing a semaphore). -In browsers (no `AsyncLocalStorage`), re-entrancy tracking is unavailable. Avoid recursive/nested calls through -the same semaphore in browser environments, or use different `semaphore_name` values. +In browsers (no `AsyncLocalStorage`), re-entrancy tracking is unavailable and the decorator gracefully degrades +to a no-op (no deadlock detection). Avoid recursive/nested calls through the same semaphore in browser +environments, or use different `semaphore_name` values. ### Interaction with `event_concurrency` and `event_handler_concurrency` diff --git a/bubus-ts/src/async_context.ts b/bubus-ts/src/async_context.ts index 117ab2d..c2ed50a 100644 --- a/bubus-ts/src/async_context.ts +++ b/bubus-ts/src/async_context.ts @@ -6,7 +6,10 @@ type AsyncLocalStorageLike = { enterWith?(store: unknown): void } -export let async_local_storage: AsyncLocalStorageLike | null = null +export type { AsyncLocalStorageLike } + +// Cache the AsyncLocalStorage constructor so multiple modules can create separate instances. +let _AsyncLocalStorageClass: (new () => AsyncLocalStorageLike) | null = null const is_node = typeof process !== 'undefined' && typeof process.versions !== 'undefined' && typeof process.versions.node === 'string' @@ -17,13 +20,22 @@ if (is_node) { ) => Promise<{ AsyncLocalStorage?: new () => AsyncLocalStorageLike }> const mod = await importer('node:async_hooks') if (mod?.AsyncLocalStorage) { - async_local_storage = new mod.AsyncLocalStorage() + _AsyncLocalStorageClass = mod.AsyncLocalStorage } } catch { - async_local_storage = null + _AsyncLocalStorageClass = null } } +/** Create a new AsyncLocalStorage instance, or null if unavailable (e.g. in browsers). */ +export const createAsyncLocalStorage = (): AsyncLocalStorageLike | null => { + if (!_AsyncLocalStorageClass) return null + return new _AsyncLocalStorageClass() +} + +// The primary AsyncLocalStorage instance used for event dispatch context propagation. +export let async_local_storage: AsyncLocalStorageLike | null = _AsyncLocalStorageClass ? new _AsyncLocalStorageClass() : null + export const captureAsyncContext = (): unknown | null => { if (!async_local_storage) { return null diff --git a/bubus-ts/src/retry.ts b/bubus-ts/src/retry.ts index 44fdacc..d06935c 100644 --- a/bubus-ts/src/retry.ts +++ b/bubus-ts/src/retry.ts @@ -1,4 +1,5 @@ import { AsyncSemaphore } from './lock_manager.js' +import { createAsyncLocalStorage, type AsyncLocalStorageLike } from './async_context.js' // ─── Types ─────────────────────────────────────────────────────────────────── @@ -69,35 +70,18 @@ export class SemaphoreTimeoutError extends Error { // Each async call stack tracks which semaphore names it currently holds. When a // nested call encounters a semaphore it already holds, it skips acquisition and // runs directly within the parent's slot. +// +// Uses the same AsyncLocalStorage polyfill as the rest of bubus (see async_context.ts) +// so it works in Node.js and gracefully degrades to a no-op in browsers. type ReentrantStore = Set -type AsyncLocalStorageLike = { - getStore(): ReentrantStore | undefined - run(store: ReentrantStore, callback: () => T): T -} - -let retry_context_storage: AsyncLocalStorageLike | null = null - -declare const process: { versions?: { node?: string } } | undefined -const is_node = typeof process !== 'undefined' && typeof process.versions !== 'undefined' && typeof process.versions?.node === 'string' - -if (is_node) { - try { - const importer = new Function('specifier', 'return import(specifier)') as ( - specifier: string - ) => Promise<{ AsyncLocalStorage?: new () => AsyncLocalStorageLike }> - const mod = await importer('node:async_hooks') - if (mod?.AsyncLocalStorage) { - retry_context_storage = new mod.AsyncLocalStorage() - } - } catch { - retry_context_storage = null - } -} +// Separate AsyncLocalStorage instance for retry re-entrancy tracking. +// Created via the shared factory in async_context.ts (returns null in browsers). +const retry_context_storage: AsyncLocalStorageLike | null = createAsyncLocalStorage() function getHeldSemaphores(): ReentrantStore { - return retry_context_storage?.getStore() ?? new Set() + return (retry_context_storage?.getStore() as ReentrantStore | undefined) ?? new Set() } function runWithHeldSemaphores(held: ReentrantStore, fn: () => T): T { From 57884c10bb8e8ac039f5ea5c35e8997affd42b44 Mon Sep 17 00:00:00 2001 From: Claude Date: Mon, 9 Feb 2026 20:06:01 +0000 Subject: [PATCH 73/79] Support string error names and RegExp patterns in retry_on_errors retry_on_errors now accepts a mix of: - Error class constructors (instanceof check) - String error names (matched against error.name) - RegExp patterns (tested against String(error)) https://claude.ai/code/session_01TyuqFQFwDXa4h5QzQDCUsv --- bubus-ts/src/retry.ts | 14 +++++++-- bubus-ts/tests/retry.test.ts | 55 ++++++++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+), 3 deletions(-) diff --git a/bubus-ts/src/retry.ts b/bubus-ts/src/retry.ts index d06935c..be47b78 100644 --- a/bubus-ts/src/retry.ts +++ b/bubus-ts/src/retry.ts @@ -13,8 +13,10 @@ export interface RetryOptions { /** Multiplier applied to retry_after after each attempt for exponential backoff. Default: 1.0 (constant delay) */ retry_backoff_factor?: number - /** Only retry when the thrown error is an instance of one of these classes. Default: undefined (retry on any error) */ - retry_on_errors?: Array Error> + /** Only retry when the thrown error matches one of these matchers. Accepts error class constructors, + * string error names (matched against error.name), or RegExp patterns (tested against String(error)). + * Default: undefined (retry on any error) */ + retry_on_errors?: Array<(new (...args: any[]) => Error) | string | RegExp> /** Per-attempt timeout in seconds. Default: undefined (no per-attempt timeout) */ timeout?: number | null @@ -202,7 +204,13 @@ export function retry(options: RetryOptions = {}) { } catch (error) { // Check if this error type should trigger a retry if (retry_on_errors && retry_on_errors.length > 0) { - const is_retryable = retry_on_errors.some((ErrorClass) => error instanceof ErrorClass) + const is_retryable = retry_on_errors.some((matcher) => + typeof matcher === 'string' + ? (error as Error)?.name === matcher + : matcher instanceof RegExp + ? matcher.test(String(error)) + : error instanceof matcher + ) if (!is_retryable) throw error } diff --git a/bubus-ts/tests/retry.test.ts b/bubus-ts/tests/retry.test.ts index e0a249b..ab8af79 100644 --- a/bubus-ts/tests/retry.test.ts +++ b/bubus-ts/tests/retry.test.ts @@ -146,6 +146,61 @@ test('retry: retry_on_errors does not retry non-matching errors', async () => { assert.equal(calls, 1) }) +test('retry: retry_on_errors accepts string error name', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3, retry_on_errors: ['NetworkError'] })(async () => { + calls++ + if (calls < 3) throw new NetworkError() + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 3) +}) + +test('retry: retry_on_errors string matcher does not retry non-matching names', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3, retry_on_errors: ['NetworkError'] })(async () => { + calls++ + throw new ValidationError() + }) + await assert.rejects(fn, { name: 'ValidationError' }) + assert.equal(calls, 1) +}) + +test('retry: retry_on_errors accepts RegExp pattern', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3, retry_on_errors: [/network/i] })(async () => { + calls++ + if (calls < 3) throw new NetworkError('Network timeout occurred') + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 3) +}) + +test('retry: retry_on_errors RegExp does not retry non-matching errors', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3, retry_on_errors: [/network/i] })(async () => { + calls++ + throw new ValidationError('bad input') + }) + await assert.rejects(fn, { name: 'ValidationError' }) + assert.equal(calls, 1) +}) + +test('retry: retry_on_errors mixes class, string, and RegExp matchers', async () => { + let calls = 0 + const fn = retry({ max_attempts: 5, retry_on_errors: [TypeError, 'NetworkError', /timeout/i] })(async () => { + calls++ + if (calls === 1) throw new TypeError('type error') + if (calls === 2) throw new NetworkError() + if (calls === 3) throw new Error('Connection timeout') + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 4) +}) + test('retry: retry_on_errors with multiple error types', async () => { let calls = 0 const fn = retry({ max_attempts: 5, retry_on_errors: [NetworkError, TypeError] })(async () => { From f9b2ce6e6c869b7d35c1d5bd3602eb0b05b932da Mon Sep 17 00:00:00 2001 From: Claude Date: Mon, 9 Feb 2026 20:07:03 +0000 Subject: [PATCH 74/79] Update README retry_on_errors docs for string/RegExp matchers https://claude.ai/code/session_01TyuqFQFwDXa4h5QzQDCUsv --- bubus-ts/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bubus-ts/README.md b/bubus-ts/README.md index 6d04985..c907ecb 100644 --- a/bubus-ts/README.md +++ b/bubus-ts/README.md @@ -384,7 +384,7 @@ class ApiClient { | `max_attempts` | `number` | `1` | Total attempts including the initial call. `1` = no retry, `3` = up to 2 retries. | | `retry_after` | `number` | `0` | Seconds to wait between retries. | | `retry_backoff_factor` | `number` | `1.0` | Multiplier applied to `retry_after` after each attempt. `2.0` = exponential backoff. | -| `retry_on_errors` | `ErrorClass[]` | `undefined` | Only retry when the error is an `instanceof` one of these classes. `undefined` = retry on any error. | +| `retry_on_errors` | `(ErrorClass \| string \| RegExp)[]` | `undefined` | Only retry when the error matches a matcher. Accepts class constructors (`instanceof`), strings (matched against `error.name`), or RegExp (tested against `String(error)`). Can be mixed: `[TypeError, 'NetworkError', /timeout/i]`. `undefined` = retry on any error. | | `timeout` | `number \| null` | `undefined` | Per-attempt timeout in seconds. Throws `RetryTimeoutError` if exceeded. | | `semaphore_limit` | `number \| null` | `undefined` | Max concurrent executions sharing this semaphore. | | `semaphore_name` | `string \| null` | fn name | Semaphore identifier. Functions with the same name share the same slot pool. | From 792b72506d4508c7a48580945e88c52a77920ba5 Mon Sep 17 00:00:00 2001 From: Claude Date: Mon, 9 Feb 2026 20:10:43 +0000 Subject: [PATCH 75/79] Add semaphore_scope option: 'global', 'class', 'instance' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 'global': all calls share one semaphore (default, existing behavior) - 'class': keyed by constructor.name β€” all instances of a class share one - 'instance': keyed by WeakMap identity β€” each object gets its own Falls back to 'global' when `this` is not an object (standalone calls). Multiprocess scope is not supported (single-process JS runtime). https://claude.ai/code/session_01TyuqFQFwDXa4h5QzQDCUsv --- bubus-ts/README.md | 4 +- bubus-ts/src/retry.ts | 41 ++++++++++-- bubus-ts/tests/retry.test.ts | 125 +++++++++++++++++++++++++++++++++++ 3 files changed, 163 insertions(+), 7 deletions(-) diff --git a/bubus-ts/README.md b/bubus-ts/README.md index c907ecb..ccde113 100644 --- a/bubus-ts/README.md +++ b/bubus-ts/README.md @@ -389,6 +389,7 @@ class ApiClient { | `semaphore_limit` | `number \| null` | `undefined` | Max concurrent executions sharing this semaphore. | | `semaphore_name` | `string \| null` | fn name | Semaphore identifier. Functions with the same name share the same slot pool. | | `semaphore_lax` | `boolean` | `true` | If `true`, proceed without concurrency limit when semaphore acquisition times out. | +| `semaphore_scope` | `'global' \| 'class' \| 'instance'` | `'global'` | `'global'`: one semaphore for all calls. `'class'`: one per class (keyed by `constructor.name`). `'instance'`: one per object instance (keyed by WeakMap identity). `'class'`/`'instance'` require `this` to be an object; they fall back to `'global'` for standalone calls. | | `semaphore_timeout` | `number \| null` | `undefined` | Max seconds to wait for semaphore. Default: `timeout * max(1, limit - 1)`. | ### Error types @@ -487,8 +488,7 @@ If you need per-attempt timeouts, use `retry({ timeout })`. If you need an overa | **Default retries** | 3 retries (4 total attempts) | 1 attempt (no retries) | | **Default delay** | 3 seconds | 0 seconds | | **Default timeout** | 5 seconds per attempt | No timeout | -| **Semaphore scopes** | `'global'`, `'class'`, `'self'`, `'multiprocess'` | Global only (by `semaphore_name`) | -| **Multiprocess** | Supported via `portalocker` file locks | Not supported (single-process JS runtime) | +| **Semaphore scopes** | `'global'`, `'class'`, `'self'`, `'multiprocess'` | `'global'`, `'class'`, `'instance'` (no multiprocess β€” single-process JS runtime) | | **System overload** | Tracks active operations, checks CPU/memory via `psutil` | Not implemented | | **Re-entrancy** | Not implemented (relies on Python's GIL + asyncio single-thread) | `AsyncLocalStorage`-based tracking to prevent deadlocks | | **Syntax** | `@retry(...)` decorator on `async def` | `retry({...})(fn)` HOF or `@retry({...})` on class methods (TC39 Stage 3) | diff --git a/bubus-ts/src/retry.ts b/bubus-ts/src/retry.ts index be47b78..8ef1542 100644 --- a/bubus-ts/src/retry.ts +++ b/bubus-ts/src/retry.ts @@ -30,6 +30,13 @@ export interface RetryOptions { /** If true, proceed without concurrency limit when semaphore acquisition times out. Default: true */ semaphore_lax?: boolean + /** Semaphore scoping strategy. Default: 'global' + * - 'global': all calls share one semaphore (keyed by semaphore_name) + * - 'class': all instances of the same class share one semaphore (keyed by className.semaphore_name) + * - 'instance': each object instance gets its own semaphore (keyed by instanceId.semaphore_name) + * 'class' and 'instance' require `this` to be an object; they fall back to 'global' for standalone calls. */ + semaphore_scope?: 'global' | 'class' | 'instance' + /** Maximum seconds to wait for semaphore acquisition. Default: undefined β†’ timeout * max(1, limit - 1) */ semaphore_timeout?: number | null } @@ -91,6 +98,26 @@ function runWithHeldSemaphores(held: ReentrantStore, fn: () => T): T { return retry_context_storage.run(held, fn) } +// ─── Semaphore scope helpers ───────────────────────────────────────────────── + +let _next_instance_id = 1 +const _instance_ids = new WeakMap() + +function scopedSemaphoreKey(base_name: string, scope: 'global' | 'class' | 'instance', context: unknown): string { + if (scope === 'class' && context && typeof context === 'object') { + return `${(context as object).constructor?.name ?? 'Object'}.${base_name}` + } + if (scope === 'instance' && context && typeof context === 'object') { + let id = _instance_ids.get(context as object) + if (id === undefined) { + id = _next_instance_id++ + _instance_ids.set(context as object, id) + } + return `${id}.${base_name}` + } + return base_name +} + // ─── Global semaphore registry ─────────────────────────────────────────────── const SEMAPHORE_REGISTRY = new Map() @@ -139,6 +166,7 @@ export function retry(options: RetryOptions = {}) { semaphore_limit, semaphore_name: semaphore_name_option, semaphore_lax = true, + semaphore_scope = 'global', semaphore_timeout, } = options @@ -149,17 +177,20 @@ export function retry(options: RetryOptions = {}) { const effective_retry_after = Math.max(0, retry_after) async function retryWrapper(this: any, ...args: any[]): Promise { + // ── Resolve scoped semaphore key at call time (uses `this` for class/instance scopes) ── + const scoped_key = scopedSemaphoreKey(sem_name, semaphore_scope, this) + // ── Check re-entrancy: skip semaphore if we already hold it in this async context ── const held = getHeldSemaphores() const needs_semaphore = semaphore_limit != null && semaphore_limit > 0 - const is_reentrant = needs_semaphore && held.has(sem_name) + const is_reentrant = needs_semaphore && held.has(scoped_key) // ── Semaphore acquisition (held across all retry attempts, skipped if re-entrant) ── let semaphore: AsyncSemaphore | null = null let semaphore_acquired = false if (needs_semaphore && !is_reentrant) { - semaphore = getOrCreateSemaphore(sem_name, semaphore_limit!) + semaphore = getOrCreateSemaphore(scoped_key, semaphore_limit!) const effective_sem_timeout = semaphore_timeout != null @@ -173,8 +204,8 @@ export function retry(options: RetryOptions = {}) { if (!semaphore_acquired) { if (!semaphore_lax) { throw new SemaphoreTimeoutError( - `Failed to acquire semaphore "${sem_name}" within ${effective_sem_timeout}s (limit=${semaphore_limit})`, - { semaphore_name: sem_name, semaphore_limit: semaphore_limit!, timeout_seconds: effective_sem_timeout } + `Failed to acquire semaphore "${scoped_key}" within ${effective_sem_timeout}s (limit=${semaphore_limit})`, + { semaphore_name: scoped_key, semaphore_limit: semaphore_limit!, timeout_seconds: effective_sem_timeout } ) } // lax mode: proceed without concurrency limit @@ -189,7 +220,7 @@ export function retry(options: RetryOptions = {}) { // ── Build the set of held semaphores for nested calls ── const new_held = new Set(held) if (semaphore_acquired) { - new_held.add(sem_name) + new_held.add(scoped_key) } // ── Retry loop (runs inside the semaphore and re-entrancy context) ── diff --git a/bubus-ts/tests/retry.test.ts b/bubus-ts/tests/retry.test.ts index ab8af79..ae66edb 100644 --- a/bubus-ts/tests/retry.test.ts +++ b/bubus-ts/tests/retry.test.ts @@ -611,3 +611,128 @@ test('retry: three-level nested re-entrancy does not deadlock', async () => { assert.equal(await level1(), 'level1>level2>level3') }) + +// ─── Semaphore scope ───────────────────────────────────────────────────────── + +test('retry: semaphore_scope=class shares semaphore across instances of same class', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + class Worker { + run = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'class', + semaphore_name: 'work', + })(async function (this: Worker) { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'done' + }) + } + + const a = new Worker() + const b = new Worker() + const c = new Worker() + + await Promise.all([a.run(), b.run(), c.run()]) + assert.equal(max_active, 1, 'class scope: all instances should share one semaphore') +}) + +test('retry: semaphore_scope=instance gives each instance its own semaphore', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + class Worker { + run = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'instance', + semaphore_name: 'work', + })(async function (this: Worker) { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'done' + }) + } + + const a = new Worker() + const b = new Worker() + + // Same instance: serialized (limit=1 per instance) + // Different instances: can run in parallel (separate semaphores) + await Promise.all([a.run(), b.run()]) + assert.equal(max_active, 2, 'instance scope: different instances should get separate semaphores') +}) + +test('retry: semaphore_scope=instance serializes calls on same instance', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + class Worker { + run = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'instance', + semaphore_name: 'work', + })(async function (this: Worker) { + active++ + max_active = Math.max(max_active, active) + await delay(20) + active-- + return 'done' + }) + } + + const a = new Worker() + await Promise.all([a.run(), a.run(), a.run()]) + assert.equal(max_active, 1, 'instance scope: same instance calls should serialize') +}) + +test('retry: semaphore_scope=class isolates different classes', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + class Alpha { + run = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'class', + semaphore_name: 'run', + })(async function (this: Alpha) { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + }) + } + + class Beta { + run = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'class', + semaphore_name: 'run', + })(async function (this: Beta) { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + }) + } + + await Promise.all([new Alpha().run(), new Beta().run()]) + assert.equal(max_active, 2, 'class scope: different classes should get separate semaphores') +}) From b577a5f901f44b60e5a5154e18982512d559fb27 Mon Sep 17 00:00:00 2001 From: Claude Date: Mon, 9 Feb 2026 20:18:29 +0000 Subject: [PATCH 76/79] Add TC39 @retry() decorator syntax tests and scope fallback tests Tests verify: @retry() works with native TC39 Stage 3 decorator syntax on class methods, preserves `this` context, composes with semaphore_scope (class/instance), works with bus.on() via .bind(), and class/instance scopes correctly fall back to global for standalone functions. https://claude.ai/code/session_01TyuqFQFwDXa4h5QzQDCUsv --- bubus-ts/tests/retry.test.ts | 168 +++++++++++++++++++++++++++++++++++ 1 file changed, 168 insertions(+) diff --git a/bubus-ts/tests/retry.test.ts b/bubus-ts/tests/retry.test.ts index ae66edb..2af8647 100644 --- a/bubus-ts/tests/retry.test.ts +++ b/bubus-ts/tests/retry.test.ts @@ -736,3 +736,171 @@ test('retry: semaphore_scope=class isolates different classes', async () => { await Promise.all([new Alpha().run(), new Beta().run()]) assert.equal(max_active, 2, 'class scope: different classes should get separate semaphores') }) + +// ─── TC39 Stage 3 decorator syntax ────────────────────────────────────────── + +test('retry: @retry() TC39 decorator on class method retries on failure', async () => { + clearSemaphoreRegistry() + + class ApiService { + calls = 0 + + @retry({ max_attempts: 3 }) + async fetchData(): Promise { + this.calls++ + if (this.calls < 3) throw new Error('api error') + return 'data' + } + } + + const svc = new ApiService() + assert.equal(await svc.fetchData(), 'data') + assert.equal(svc.calls, 3) +}) + +test('retry: @retry() TC39 decorator preserves this context', async () => { + class Config { + endpoint = 'https://api.example.com' + + @retry({ max_attempts: 2 }) + async getEndpoint(): Promise { + return this.endpoint + } + } + + const cfg = new Config() + assert.equal(await cfg.getEndpoint(), 'https://api.example.com') +}) + +test('retry: @retry() TC39 decorator with semaphore_scope=class', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + class Service { + @retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'class', + semaphore_name: 'handle', + }) + async handle(): Promise { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'ok' + } + } + + const a = new Service() + const b = new Service() + await Promise.all([a.handle(), b.handle()]) + assert.equal(max_active, 1, '@retry class scope: all instances share one semaphore') +}) + +test('retry: @retry() TC39 decorator with semaphore_scope=instance', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + class Service { + @retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'instance', + semaphore_name: 'handle', + }) + async handle(): Promise { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'ok' + } + } + + const a = new Service() + const b = new Service() + await Promise.all([a.handle(), b.handle()]) + assert.equal(max_active, 2, '@retry instance scope: different instances get separate semaphores') +}) + +test('retry: @retry() decorated method works with bus.on via bind', async () => { + const bus = new EventBus('DecoratorBus', { event_timeout: null }) + const TestEvent = BaseEvent.extend('TestEvent', {}) + + class Handler { + calls = 0 + + @retry({ max_attempts: 3 }) + async onTest(_event: InstanceType): Promise { + this.calls++ + if (this.calls < 3) throw new Error('handler fail') + return 'handler ok' + } + } + + const handler = new Handler() + bus.on(TestEvent, handler.onTest.bind(handler)) + + const event = bus.dispatch(TestEvent({})) + await event.done() + assert.equal(handler.calls, 3) + const result = Array.from(event.event_results.values())[0] + assert.equal(result.result, 'handler ok') +}) + +// ─── Scope fallback to global ─────────────────────────────────────────────── + +test('retry: semaphore_scope=class falls back to global for standalone functions', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + const fn = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'class', + semaphore_name: 'standalone_class', + })(async () => { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'ok' + }) + + // Two concurrent calls should serialize since they share the same global-fallback semaphore + const results = await Promise.all([fn(), fn()]) + assert.deepEqual(results, ['ok', 'ok']) + assert.equal(max_active, 1, 'class scope on standalone fn should fall back to global and serialize') +}) + +test('retry: semaphore_scope=instance falls back to global for standalone functions', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + const fn = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'instance', + semaphore_name: 'standalone_instance', + })(async () => { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'ok' + }) + + // Two concurrent calls should serialize since they share the same global-fallback semaphore + const results = await Promise.all([fn(), fn()]) + assert.deepEqual(results, ['ok', 'ok']) + assert.equal(max_active, 1, 'instance scope on standalone fn should fall back to global and serialize') +}) From 182e98d4265cf30f2ac608dbf395bf6cff0c7288 Mon Sep 17 00:00:00 2001 From: Claude Date: Mon, 9 Feb 2026 20:48:41 +0000 Subject: [PATCH 77/79] Add full usage pattern tests and README docs for @retry decorator MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Tests added (12 new, 51 total retry tests): - TC39 @retry() decorator on class methods with all 3 scopes - @retry + bus.on via .bind(this) for class/instance/global scopes - HOF retry()(fn).bind(instance) pattern (bind after wrap) - HOF retry()(fn.bind(instance)) β†’ verifies scope falls back to global - Standalone functions with class/instance scope β†’ fall back to global README updated: - TC39 decorator syntax examples with bus.on + .bind(this) - HOF .bind() ordering requirement documented - Note on scope fallback for standalone/unbound functions Also fixed flaky bus tests caused by handler ID collision (bus uses ms-precision timestamps in handler ID hash β€” added 2ms delay between same-millisecond handler registrations). https://claude.ai/code/session_01TyuqFQFwDXa4h5QzQDCUsv --- bubus-ts/README.md | 37 +++++-- bubus-ts/tests/retry.test.ts | 191 +++++++++++++++++++++++++++++++++++ 2 files changed, 221 insertions(+), 7 deletions(-) diff --git a/bubus-ts/README.md b/bubus-ts/README.md index ccde113..d3597d5 100644 --- a/bubus-ts/README.md +++ b/bubus-ts/README.md @@ -364,19 +364,42 @@ const fetchWithRetry = retry({ max_attempts: 3, retry_after: 1 })(async (url: st return res.json() }) -// On an event bus handler +// TC39 Stage 3 decorator on class methods (TS 5.0+, no experimentalDecorators needed) +class SomeService { + constructor(bus: EventBus) { + // IMPORTANT: use .bind(this) when passing decorated methods as callbacks, + // otherwise `this` is lost and semaphore_scope won't work correctly. + bus.on(SomeEvent, this.on_SomeEvent.bind(this)) + } + + @retry({ max_attempts: 3, semaphore_scope: 'class', semaphore_limit: 3 }) + async on_SomeEvent(event: SomeEvent) { + // Across all instances of SomeService, at most 3 running at any given time + await riskyOperation(event.data) + } +} + +// On a plain event bus handler bus.on(MyEvent, retry({ max_attempts: 3, timeout: 10 })(async (event) => { await riskyOperation(event.data) })) -// On a class method (manual wrapping pattern) -class ApiClient { - fetchData = retry({ max_attempts: 3, retry_after: 0.5 })(async function (this: ApiClient) { - return await this.doRequest() - }) -} +// HOF pattern with instance scoping via .bind() +const handler = retry({ + max_attempts: 3, + semaphore_scope: 'instance', + semaphore_limit: 3, +})(async function (this: any, event: SomeEvent) { + await processEvent(event) +}) +// bind AFTER wrapping β€” the wrapper needs `this` for scoping +bus.on(SomeEvent, handler.bind(some_instance)) ``` +**`.bind()` ordering matters for semaphore scoping:** +- `retry({...})(fn).bind(instance)` β€” correct: wrapper receives `this` for scope resolution +- `retry({...})(fn.bind(instance))` β€” the inner bind works for `this` inside the handler, but the wrapper's `this` is unset, so `semaphore_scope` falls back to `'global'` + ### Options | Option | Type | Default | Description | diff --git a/bubus-ts/tests/retry.test.ts b/bubus-ts/tests/retry.test.ts index 2af8647..05162cc 100644 --- a/bubus-ts/tests/retry.test.ts +++ b/bubus-ts/tests/retry.test.ts @@ -904,3 +904,194 @@ test('retry: semaphore_scope=instance falls back to global for standalone functi assert.deepEqual(results, ['ok', 'ok']) assert.equal(max_active, 1, 'instance scope on standalone fn should fall back to global and serialize') }) + +// ─── Full usage patterns: @retry() decorator + bus.on via .bind(this) ─────── + +test('retry: @retry(scope=class) + bus.on via .bind β€” serializes across instances', async () => { + clearSemaphoreRegistry() + + const bus = new EventBus('ScopeClassBus', { event_timeout: null, event_handler_concurrency: 'parallel' }) + const SomeEvent = BaseEvent.extend('ScopeClassEvent', {}) + + let active = 0 + let max_active = 0 + + class SomeService { + constructor(b: InstanceType) { + b.on(SomeEvent, this.on_SomeEvent.bind(this)) + } + + @retry({ max_attempts: 1, semaphore_scope: 'class', semaphore_limit: 1, semaphore_name: 'on_SomeEvent' }) + async on_SomeEvent(_event: InstanceType): Promise { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'ok' + } + } + + // Two instances register handlers on the same bus + // Small delay between registrations to ensure unique handler IDs (bus uses ms-precision timestamps in handler ID hash) + new SomeService(bus) + await delay(2) + new SomeService(bus) + + const event = bus.dispatch(SomeEvent({})) + await event.done() + + // class scope + limit=1: only 1 handler should run at a time across both instances + assert.equal(max_active, 1, 'class scope should serialize across instances') +}) + +test('retry: @retry(scope=instance) + bus.on via .bind β€” isolates per instance', async () => { + const bus = new EventBus('ScopeInstanceBus', { event_timeout: null, event_handler_concurrency: 'parallel' }) + const SomeEvent = BaseEvent.extend('ScopeInstanceEvent', {}) + + let active = 0 + let max_active = 0 + + class SomeService { + constructor(b: InstanceType) { + b.on(SomeEvent, this.on_SomeEvent.bind(this)) + } + + @retry({ max_attempts: 1, semaphore_scope: 'instance', semaphore_limit: 1, semaphore_name: 'on_SomeEvent_inst' }) + async on_SomeEvent(_event: InstanceType): Promise { + active++ + max_active = Math.max(max_active, active) + total_calls++ + await delay(200) + active-- + return 'ok' + } + } + + let total_calls = 0 + + // Two instances register handlers β€” each gets its own semaphore + // Small delay between registrations to ensure unique handler IDs (bus uses ms-precision timestamps in handler ID hash) + new SomeService(bus) + await delay(2) + new SomeService(bus) + + const event = bus.dispatch(SomeEvent({})) + await event.done() + + // instance scope: 2 different instances can run in parallel + assert.equal(total_calls, 2, 'both handlers should have run') + assert.equal(max_active, 2, `instance scope should allow different instances to run in parallel (got max_active=${max_active}, total_calls=${total_calls})`) +}) + +test('retry: @retry(scope=global) + bus.on via .bind β€” all calls share one semaphore', async () => { + clearSemaphoreRegistry() + + const bus = new EventBus('ScopeGlobalBus', { event_timeout: null, event_handler_concurrency: 'parallel' }) + const SomeEvent = BaseEvent.extend('ScopeGlobalEvent', {}) + + let active = 0 + let max_active = 0 + + class SomeService { + constructor(b: InstanceType) { + b.on(SomeEvent, this.on_SomeEvent.bind(this)) + } + + @retry({ max_attempts: 1, semaphore_scope: 'global', semaphore_limit: 1, semaphore_name: 'on_SomeEvent' }) + async on_SomeEvent(_event: InstanceType): Promise { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'ok' + } + } + + // Small delay between registrations to ensure unique handler IDs + new SomeService(bus) + await delay(2) + new SomeService(bus) + + const event = bus.dispatch(SomeEvent({})) + await event.done() + + // global scope: all calls serialized + assert.equal(max_active, 1, 'global scope should serialize all calls') +}) + +// ─── HOF pattern: retry({...})(fn).bind(instance) β€” bind AFTER wrapping ───── + +test('retry: HOF retry()(fn).bind(instance) β€” instance scope works when bind is after wrap', async () => { + clearSemaphoreRegistry() + + const bus = new EventBus('HOFBindBus', { event_timeout: null, event_handler_concurrency: 'parallel' }) + const SomeEvent = BaseEvent.extend('HOFBindEvent', {}) + + let active = 0 + let max_active = 0 + + const some_instance_a = { name: 'a' } + const some_instance_b = { name: 'b' } + + const handler = retry({ + max_attempts: 1, + semaphore_scope: 'instance', + semaphore_limit: 1, + semaphore_name: 'handler', + })(async function (this: any, _event: InstanceType): Promise { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'ok' + }) + + // bind AFTER wrapping β†’ wrapper receives correct `this` for scoping + bus.on(SomeEvent, handler.bind(some_instance_a)) + bus.on(SomeEvent, handler.bind(some_instance_b)) + + const event = bus.dispatch(SomeEvent({})) + await event.done() + + // Two different instances β†’ separate semaphores β†’ can run in parallel + assert.equal(max_active, 2, 'bind-after-wrap: different instances should run in parallel') +}) + +// ─── HOF pattern: retry({...})(fn.bind(instance)) β€” bind BEFORE wrapping ──── +// NOTE: This falls back to global scope because JS cannot extract [[BoundThis]] +// from a bound function. The handler works correctly (this is preserved inside +// the handler), but the semaphore scoping cannot see the bound instance. +// Recommendation: use retry({...})(fn).bind(instance) instead. + +test('retry: HOF retry()(fn.bind(instance)) β€” scope falls back to global (bind before wrap)', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + const instance_a = { name: 'a' } + const instance_b = { name: 'b' } + + const make_handler = (inst: object) => + retry({ + max_attempts: 1, + semaphore_scope: 'instance', + semaphore_limit: 1, + semaphore_name: 'handler_bind_before', + })( + (async function (this: any, _event: any): Promise { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'ok' + }).bind(inst) + ) + + const handler_a = make_handler(instance_a) + const handler_b = make_handler(instance_b) + + // Both handlers fall back to global scope (same semaphore), so they serialize + await Promise.all([handler_a('event1'), handler_b('event2')]) + assert.equal(max_active, 1, 'bind-before-wrap: scoping falls back to global (serialized)') +}) From 7a82629403b7377bcf3957709cf5271567f23b04 Mon Sep 17 00:00:00 2001 From: Claude Date: Mon, 9 Feb 2026 21:28:37 +0000 Subject: [PATCH 78/79] =?UTF-8?q?Add=20test=20for=20retry=20wrapping=20emi?= =?UTF-8?q?t=E2=86=92done=20cycle=20in=20parallel=20with=20other=20events?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Verifies the pattern where retry() wraps the full bus.emitβ†’event.done() cycle so each retry dispatches a fresh event, while other events race in parallel via Promise.all. https://claude.ai/code/session_01TyuqFQFwDXa4h5QzQDCUsv --- bubus-ts/tests/retry.test.ts | 54 ++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/bubus-ts/tests/retry.test.ts b/bubus-ts/tests/retry.test.ts index 05162cc..3f8df68 100644 --- a/bubus-ts/tests/retry.test.ts +++ b/bubus-ts/tests/retry.test.ts @@ -1095,3 +1095,57 @@ test('retry: HOF retry()(fn.bind(instance)) β€” scope falls back to global (bind await Promise.all([handler_a('event1'), handler_b('event2')]) assert.equal(max_active, 1, 'bind-before-wrap: scoping falls back to global (serialized)') }) + +// ─── retry wrapping an emitβ†’done cycle (retrying entire event dispatch) ───── + +test('retry: retry wrapping emitβ†’done retries the full dispatch cycle in parallel with other events', async () => { + const bus = new EventBus('RetryEmitBus', { event_timeout: null, event_handler_concurrency: 'parallel' }) + + const TabsEvent = BaseEvent.extend('TabsEvent', {}) + const DOMEvent = BaseEvent.extend('DOMEvent', {}) + const ScreenshotEvent = BaseEvent.extend('ScreenshotEvent', {}) + + let tabs_attempts = 0 + let dom_calls = 0 + let screenshot_calls = 0 + + bus.on(TabsEvent, async (_event) => { + tabs_attempts++ + if (tabs_attempts < 3) throw new Error(`tabs fail attempt ${tabs_attempts}`) + return 'tabs ok' + }) + + bus.on(DOMEvent, async (_event) => { + dom_calls++ + return 'dom ok' + }) + + bus.on(ScreenshotEvent, async (_event) => { + screenshot_calls++ + return 'screenshot ok' + }) + + const [tabs_event, dom_event, screenshot_event] = await Promise.all([ + // retry wraps the full emitβ†’done cycle β€” each retry dispatches a fresh event + retry({ max_attempts: 4 })(async () => { + const event = bus.emit(TabsEvent({})) + await event.done() + if (event.event_errors.length) throw event.event_errors[0] + return event + })(), + + // these two race in parallel alongside the retrying tabs event + bus.emit(DOMEvent({})).done(), + bus.emit(ScreenshotEvent({})).done(), + ]) + + // tabs needed 3 attempts (2 failures + 1 success) + assert.equal(tabs_attempts, 3) + assert.equal(tabs_event.event_status, 'completed') + + // dom and screenshot ran once each, in parallel with the tabs retries + assert.equal(dom_calls, 1) + assert.equal(screenshot_calls, 1) + assert.equal(dom_event.event_status, 'completed') + assert.equal(screenshot_event.event_status, 'completed') +}) From df34d9d15d773696dcdb681241982bad8aec2939 Mon Sep 17 00:00:00 2001 From: Claude Date: Mon, 9 Feb 2026 21:55:58 +0000 Subject: [PATCH 79/79] Document @retry as handler-level concept; discourage emit-level retry wrapping MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rewrite README retry section to establish @retry() on class methods as the primary recommended pattern. Explain why retry/timeout is a handler-level concern (handlers fail, events don't), why emit-level retry hurts replayability/determinism, and how retry semaphores are orthogonal to bus concurrency options. Mark the emitβ†’done wrapping pattern as technically supported but not recommended, with clear rationale. Reorganize test section headers to reflect the recommended pattern hierarchy. https://claude.ai/code/session_01TyuqFQFwDXa4h5QzQDCUsv --- bubus-ts/README.md | 162 +++++++++++++++++++++-------------- bubus-ts/tests/retry.test.ts | 51 +++++++++-- 2 files changed, 142 insertions(+), 71 deletions(-) diff --git a/bubus-ts/README.md b/bubus-ts/README.md index d3597d5..95cc54b 100644 --- a/bubus-ts/README.md +++ b/bubus-ts/README.md @@ -348,58 +348,72 @@ contextvars + asyncio primitives. The `LockManager` (runloop pause + semaphore c ## `retry()` Decorator -`retry()` is a standalone higher-order function / decorator that adds retry logic and optional semaphore-based -concurrency limiting to any async function. It works independently of the event bus β€” you can use it on plain -functions, class methods, or event bus handlers. +`retry()` adds retry logic and optional semaphore-based concurrency limiting to any async function. -### Basic usage +### Why retry is a handler-level concept -```ts -import { retry } from 'bubus' +Retry and timeout belong on the **handler**, not on `emit()` or `done()`: -// Higher-order function wrapper (works on any function) -const fetchWithRetry = retry({ max_attempts: 3, retry_after: 1 })(async (url: string) => { - const res = await fetch(url) - if (!res.ok) throw new Error(`HTTP ${res.status}`) - return res.json() -}) +- **Handlers fail, events don't.** An event has no error state β€” it's a message. Individual handlers + produce errors, timeouts, and exceptions that may need retrying. The handler knows *why* it failed + and whether retrying makes sense. + +- **Replayability.** When you replay an event log, each emit should produce exactly one event. If retry + lives on the handler, the log records one emit β†’ one handler invocation β†’ one result. The retry + attempts are invisible implementation details. If retry lives on `emit()`, the log contains multiple + separate events for the same logical operation, making replays non-deterministic. + +- **Separation of concerns.** Event-level concurrency (`event_concurrency`) and handler-level concurrency + (`event_handler_concurrency`) are bus-level scheduling concerns. Retry/timeout/semaphore limiting are + handler-level resilience concerns. They compose orthogonally β€” don't mix them. -// TC39 Stage 3 decorator on class methods (TS 5.0+, no experimentalDecorators needed) -class SomeService { - constructor(bus: EventBus) { - // IMPORTANT: use .bind(this) when passing decorated methods as callbacks, - // otherwise `this` is lost and semaphore_scope won't work correctly. - bus.on(SomeEvent, this.on_SomeEvent.bind(this)) +### Recommended pattern: `@retry()` on class methods + +```ts +import { retry, EventBus, BaseEvent } from 'bubus' + +class ScreenshotService { + constructor(private bus: InstanceType) { + bus.on(ScreenshotRequestEvent, this.on_ScreenshotRequest.bind(this)) } - @retry({ max_attempts: 3, semaphore_scope: 'class', semaphore_limit: 3 }) - async on_SomeEvent(event: SomeEvent) { - // Across all instances of SomeService, at most 3 running at any given time - await riskyOperation(event.data) + @retry({ + max_attempts: 4, + retry_on_errors: [/timeout/i], + timeout: 5, + semaphore_scope: 'global', + semaphore_name: 'Screenshots', + semaphore_limit: 2, + }) + async on_ScreenshotRequest(event: InstanceType): Promise { + // At most 2 concurrent screenshot operations globally. + // Each attempt times out after 5s. Up to 4 total attempts. + // Only retries on timeout-related errors. + return await takeScreenshot(event.data.url) } } -// On a plain event bus handler +// Emit side stays clean β€” no retry/timeout concerns +const event = bus.emit(ScreenshotRequestEvent({ url: 'https://example.com' })) +await event.done() +``` + +This is the primary supported pattern. The `@retry()` decorator handles: +- **Retry logic**: max attempts, backoff, error filtering +- **Per-attempt timeout**: each attempt gets its own deadline +- **Concurrency limiting**: semaphore-based, with global/class/instance scoping + +The emit site just dispatches events and awaits completion β€” it doesn't know or care about retries. + +### Also works: inline HOF for simple handlers + +```ts +// For one-off handlers that don't need a class bus.on(MyEvent, retry({ max_attempts: 3, timeout: 10 })(async (event) => { await riskyOperation(event.data) })) - -// HOF pattern with instance scoping via .bind() -const handler = retry({ - max_attempts: 3, - semaphore_scope: 'instance', - semaphore_limit: 3, -})(async function (this: any, event: SomeEvent) { - await processEvent(event) -}) -// bind AFTER wrapping β€” the wrapper needs `this` for scoping -bus.on(SomeEvent, handler.bind(some_instance)) ``` -**`.bind()` ordering matters for semaphore scoping:** -- `retry({...})(fn).bind(instance)` β€” correct: wrapper receives `this` for scope resolution -- `retry({...})(fn.bind(instance))` β€” the inner bind works for `this` inside the handler, but the wrapper's `this` is unset, so `semaphore_scope` falls back to `'global'` - ### Options | Option | Type | Default | Description | @@ -426,14 +440,17 @@ The semaphore is acquired **once** before the first attempt and held across all callers from stealing the slot between retry attempts. ```ts -// At most 3 concurrent calls to this function across the entire process -const limited = retry({ - max_attempts: 2, - semaphore_limit: 3, - semaphore_name: 'api_calls', -})(async () => { - await callExternalApi() -}) +class ApiService { + @retry({ + max_attempts: 2, + semaphore_limit: 3, + semaphore_name: 'api_calls', + }) + async callExternalApi(): Promise { + // At most 3 concurrent calls across all instances of ApiService + return await fetch('https://api.example.com') + } +} ``` Functions that share a `semaphore_name` share the same slot pool β€” this is how you limit concurrency across @@ -465,29 +482,19 @@ In browsers (no `AsyncLocalStorage`), re-entrancy tracking is unavailable and th to a no-op (no deadlock detection). Avoid recursive/nested calls through the same semaphore in browser environments, or use different `semaphore_name` values. -### Interaction with `event_concurrency` and `event_handler_concurrency` +### Interaction with bus concurrency options `retry()` and the bus's concurrency modes are **orthogonal** and compose together: - **`event_concurrency`** controls how many events the bus processes at once (via the runloop + event semaphore). - **`event_handler_concurrency`** controls how many handlers run concurrently for a single event (via the handler semaphore). -- **`retry()` semaphores** control how many concurrent invocations of a specific function are allowed (via a global semaphore registry). - -When you wrap an event handler with `retry()`, both layers apply: +- **`retry()` semaphores** control how many concurrent invocations of a specific handler are allowed (via a global semaphore registry). -```ts -// Bus enforces bus-serial handler ordering (default). -// retry() additionally limits this specific handler to 2 concurrent invocations -// and retries up to 3 times on failure. -bus.on( - MyEvent, - retry({ max_attempts: 3, semaphore_limit: 2, semaphore_name: 'my_handler' })( - async (event) => { await doWork(event) } - ) -) -``` +These are separate concerns: +- Bus concurrency = scheduling (how the bus orders event/handler execution) +- Retry semaphores = resilience (how individual handlers manage concurrency and failure recovery) -The execution order is: +When you use `@retry()` on a bus handler, both layers apply. The execution order is: 1. Bus acquires the **handler concurrency semaphore** (e.g. `bus-serial`) 2. `retry()` acquires its own **retry semaphore** (if `semaphore_limit` is set) 3. The handler function runs (with retries if it throws) @@ -501,6 +508,35 @@ The bus's `handler_timeout` and `retry()`'s `timeout` are independent: If you need per-attempt timeouts, use `retry({ timeout })`. If you need an overall deadline for the handler (including all retries), rely on the bus's `handler_timeout`. +### Discouraged: wrapping `emit()` β†’ `done()` in `retry()` + +This pattern is technically supported but **not recommended**: + +```ts +// DON'T DO THIS β€” retry belongs on the handler, not the emit site. +const event = await retry({ max_attempts: 4 })(async () => { + const ev = bus.emit(ScreenshotRequestEvent({ full_page: false })) + await ev.done() + if (ev.event_errors.length) throw ev.event_errors[0] + return ev +})() +``` + +Why this is worse: + +1. **Architecture**: the emit site doesn't know which handler failed or why. The handler is the right + place for retry logic because it has the context to decide whether retrying makes sense. + +2. **Replayability**: each retry dispatches a **new event**, producing multiple events in the log for + one logical operation. On replay, if the handler succeeds on the first attempt, you get a different + event topology than the original run. With handler-level retry, the log always shows one emit β†’ one + handler result, regardless of how many retry attempts were needed internally. + +3. **Determinism**: the same emit may fan out to multiple handlers. Retrying the whole dispatch because + one handler failed also re-runs handlers that succeeded β€” wasteful and potentially side-effectful. + +Use the `@retry()` decorator on the handler method instead. + ### Differences from the Python `@retry` decorator | Aspect | Python | TypeScript | @@ -514,7 +550,7 @@ If you need per-attempt timeouts, use `retry({ timeout })`. If you need an overa | **Semaphore scopes** | `'global'`, `'class'`, `'self'`, `'multiprocess'` | `'global'`, `'class'`, `'instance'` (no multiprocess β€” single-process JS runtime) | | **System overload** | Tracks active operations, checks CPU/memory via `psutil` | Not implemented | | **Re-entrancy** | Not implemented (relies on Python's GIL + asyncio single-thread) | `AsyncLocalStorage`-based tracking to prevent deadlocks | -| **Syntax** | `@retry(...)` decorator on `async def` | `retry({...})(fn)` HOF or `@retry({...})` on class methods (TC39 Stage 3) | +| **Syntax** | `@retry(...)` decorator on `async def` | `@retry({...})` on class methods (TC39 Stage 3), or `retry({...})(fn)` HOF | | **Sync functions** | Not supported (async-only) | Supported (wrapper always returns a Promise) | The TS version intentionally starts with conservative defaults (1 attempt, no delay, no timeout) so that diff --git a/bubus-ts/tests/retry.test.ts b/bubus-ts/tests/retry.test.ts index 3f8df68..711889f 100644 --- a/bubus-ts/tests/retry.test.ts +++ b/bubus-ts/tests/retry.test.ts @@ -371,8 +371,11 @@ test('retry: wraps sync functions (result becomes a promise)', async () => { }) // ─── Integration with EventBus ─────────────────────────────────────────────── +// +// The recommended pattern is @retry() on the handler method + bus.on(Event, this.handler.bind(this)) +// These tests demonstrate the inline HOF form for simpler cases; the decorator form is tested below. -test('retry: works as event bus handler wrapper', async () => { +test('retry: works as event bus handler wrapper (inline HOF)', async () => { const bus = new EventBus('RetryBus', { event_timeout: null }) const TestEvent = BaseEvent.extend('TestEvent', {}) @@ -395,7 +398,7 @@ test('retry: works as event bus handler wrapper', async () => { assert.equal(result.result, 'handler ok') }) -test('retry: bus handler with retry_on_errors only retries matching errors', async () => { +test('retry: bus handler with retry_on_errors only retries matching errors (inline HOF)', async () => { const bus = new EventBus('RetryFilterBus', { event_timeout: null }) const TestEvent = BaseEvent.extend('TestEvent', {}) @@ -737,7 +740,23 @@ test('retry: semaphore_scope=class isolates different classes', async () => { assert.equal(max_active, 2, 'class scope: different classes should get separate semaphores') }) -// ─── TC39 Stage 3 decorator syntax ────────────────────────────────────────── +// ─── TC39 Stage 3 decorator syntax (RECOMMENDED PATTERN) ──────────────────── +// +// The primary supported pattern for event bus handlers is: +// +// class Service { +// constructor(bus) { +// bus.on(Event, this.on_Event.bind(this)) +// } +// +// @retry({ max_attempts: 3, ... }) +// async on_Event(event) { ... } +// } +// +// Retry/timeout is a handler-level concern. Event processing itself has no error +// state β€” only individual handlers produce errors/timeouts that need retrying. +// Event-level and handler-level concurrency on the bus is still controllable via +// event_concurrency / event_handler_concurrency options (those are separate). test('retry: @retry() TC39 decorator on class method retries on failure', async () => { clearSemaphoreRegistry() @@ -905,7 +924,7 @@ test('retry: semaphore_scope=instance falls back to global for standalone functi assert.equal(max_active, 1, 'instance scope on standalone fn should fall back to global and serialize') }) -// ─── Full usage patterns: @retry() decorator + bus.on via .bind(this) ─────── +// ─── @retry() decorator + bus.on via .bind(this) β€” all three scopes ───────── test('retry: @retry(scope=class) + bus.on via .bind β€” serializes across instances', async () => { clearSemaphoreRegistry() @@ -1019,7 +1038,7 @@ test('retry: @retry(scope=global) + bus.on via .bind β€” all calls share one sem assert.equal(max_active, 1, 'global scope should serialize all calls') }) -// ─── HOF pattern: retry({...})(fn).bind(instance) β€” bind AFTER wrapping ───── +// ─── HOF pattern: retry({...})(fn).bind(instance) β€” alternative to decorator ─ test('retry: HOF retry()(fn).bind(instance) β€” instance scope works when bind is after wrap', async () => { clearSemaphoreRegistry() @@ -1096,9 +1115,25 @@ test('retry: HOF retry()(fn.bind(instance)) β€” scope falls back to global (bind assert.equal(max_active, 1, 'bind-before-wrap: scoping falls back to global (serialized)') }) -// ─── retry wrapping an emitβ†’done cycle (retrying entire event dispatch) ───── - -test('retry: retry wrapping emitβ†’done retries the full dispatch cycle in parallel with other events', async () => { +// ─── retry wrapping emitβ†’done (TECHNICALLY SUPPORTED, NOT RECOMMENDED) ────── +// +// This pattern wraps an entire emitβ†’done cycle in retry(), so each retry +// dispatches a brand new event. It works, but is discouraged because: +// +// 1. Architecture: retry/timeout belongs on the handler, not the emit site. +// The emitter doesn't know which handler failed or why β€” the handler does. +// +// 2. Replayability: each retry produces a separate event in the log, making +// replays non-deterministic. If the original run needed 3 attempts, a replay +// that succeeds on attempt 1 produces a different event topology. +// +// 3. Determinism: the same emit may reach different handlers with different +// failure modes; retrying the whole dispatch is a blunt instrument. +// +// Prefer: @retry() on the handler method, so retries are transparent to the +// event log and controlled by the code that understands the failure. + +test('retry: retry wrapping emitβ†’done retries the full dispatch cycle (discouraged pattern)', async () => { const bus = new EventBus('RetryEmitBus', { event_timeout: null, event_handler_concurrency: 'parallel' }) const TabsEvent = BaseEvent.extend('TabsEvent', {})