From d8818be69539f52b86ad08411111f69fb7bd1198 Mon Sep 17 00:00:00 2001 From: thesayyn Date: Fri, 27 Feb 2026 15:05:12 -0800 Subject: [PATCH 1/6] feat: workflows + axl --- .aspect/config.axl | 15 +- .aspect/user-task.axl | 23 +- .buildkite/hooks/pre-command | 48 + .buildkite/pipeline.yaml | 180 ++-- Cargo.lock | 1 + crates/aspect-cli/.aspect/config.axl | 1 - crates/aspect-cli/BUILD.bazel | 5 + crates/aspect-cli/Cargo.toml | 1 + .../src/builtins/aspect/MODULE.aspect | 22 + .../src/builtins/aspect/axl_add.axl | 0 .../aspect-cli/src/builtins/aspect/bazel.axl | 41 + .../aspect-cli/src/builtins/aspect/build.axl | 103 ++ .../src/builtins/aspect/config/artifacts.axl | 299 ++++++ .../src/builtins/aspect/config/builtins.axl | 67 ++ .../src/builtins/aspect/config/delivery.axl | 12 + .../src/builtins/aspect/config/lint.axl | 55 ++ .../src/builtins/aspect/config/nolint.axl | 8 + .../src/builtins/aspect/fragments.axl | 28 + .../src/builtins/aspect/lib/artifacts.axl | 294 ++++++ .../builtins/aspect/lib/build_metadata.axl | 271 +++++ .../src/builtins/aspect/lib/deliveryd.axl | 123 +++ .../src/builtins/aspect/lib/environment.axl | 105 ++ .../src/builtins/aspect/lib/github.axl | 727 ++++++++++++++ .../src/builtins/aspect/lib/health_check.axl | 176 ++++ .../src/builtins/aspect/lib/linting.axl | 393 ++++++++ .../src/builtins/aspect/lib/platform.axl | 234 +++++ .../src/builtins/aspect/lib/sarif.axl | 228 +++++ .../src/builtins/aspect/lib/tar.axl | 101 ++ .../src/builtins/aspect/tasks/delivery.axl | 237 +++++ .../builtins/aspect/tasks/dummy_format.axl | 22 + .../src/builtins/aspect/tasks/dummy_lint.axl | 22 + .../aspect-cli/src/builtins/aspect/test.axl | 103 ++ crates/aspect-cli/src/builtins/mod.rs | 136 +++ crates/aspect-cli/src/main.rs | 121 ++- crates/axl-runtime/BUILD.bazel | 5 +- crates/axl-runtime/Cargo.toml | 3 +- .../src/builtins/aspect/MODULE.aspect | 3 - .../axl-runtime/src/builtins/aspect/build.axl | 52 - .../axl-runtime/src/builtins/aspect/test.axl | 52 - crates/axl-runtime/src/builtins/mod.rs | 45 - crates/axl-runtime/src/engine/bazel/build.rs | 93 +- .../src/engine/bazel/execlog_sink.rs | 63 ++ .../src/engine/bazel/health_check.rs | 251 +++++ .../axl-runtime/src/engine/bazel/helpers.rs | 37 - .../src/engine/bazel/iter/build_event.rs | 4 +- crates/axl-runtime/src/engine/bazel/mod.rs | 97 +- .../src/engine/bazel/stream/build_event.rs | 35 +- .../src/engine/bazel/stream/execlog.rs | 237 ++++- .../src/engine/bazel/stream/util.rs | 37 + .../axl-runtime/src/engine/config/context.rs | 43 +- .../src/engine/config/fragment_map.rs | 271 +++++ crates/axl-runtime/src/engine/config/mod.rs | 2 + .../engine/config/tasks/configured_task.rs | 71 +- .../src/engine/config/tasks/value.rs | 60 +- crates/axl-runtime/src/engine/mod.rs | 3 +- crates/axl-runtime/src/engine/task.rs | 50 +- crates/axl-runtime/src/engine/task_context.rs | 24 +- .../axl-runtime/src/engine/types/fragment.rs | 928 ++++++++++++++++++ crates/axl-runtime/src/engine/types/mod.rs | 1 + crates/axl-runtime/src/eval/config.rs | 93 +- crates/axl-runtime/src/eval/load.rs | 43 +- crates/axl-runtime/src/eval/mod.rs | 1 - crates/axl-runtime/src/eval/task.rs | 94 +- crates/axl-runtime/src/lib.rs | 1 - crates/axl-runtime/src/module/disk_store.rs | 48 +- crates/axl-runtime/src/module/eval.rs | 59 +- crates/axl-runtime/src/module/mod.rs | 2 +- crates/axl-runtime/src/module/store.rs | 20 + 68 files changed, 6460 insertions(+), 570 deletions(-) create mode 100755 .buildkite/hooks/pre-command create mode 100644 crates/aspect-cli/src/builtins/aspect/MODULE.aspect rename crates/{axl-runtime => aspect-cli}/src/builtins/aspect/axl_add.axl (100%) create mode 100644 crates/aspect-cli/src/builtins/aspect/bazel.axl create mode 100644 crates/aspect-cli/src/builtins/aspect/build.axl create mode 100644 crates/aspect-cli/src/builtins/aspect/config/artifacts.axl create mode 100644 crates/aspect-cli/src/builtins/aspect/config/builtins.axl create mode 100644 crates/aspect-cli/src/builtins/aspect/config/delivery.axl create mode 100644 crates/aspect-cli/src/builtins/aspect/config/lint.axl create mode 100644 crates/aspect-cli/src/builtins/aspect/config/nolint.axl create mode 100644 crates/aspect-cli/src/builtins/aspect/fragments.axl create mode 100644 crates/aspect-cli/src/builtins/aspect/lib/artifacts.axl create mode 100644 crates/aspect-cli/src/builtins/aspect/lib/build_metadata.axl create mode 100644 crates/aspect-cli/src/builtins/aspect/lib/deliveryd.axl create mode 100644 crates/aspect-cli/src/builtins/aspect/lib/environment.axl create mode 100644 crates/aspect-cli/src/builtins/aspect/lib/github.axl create mode 100644 crates/aspect-cli/src/builtins/aspect/lib/health_check.axl create mode 100644 crates/aspect-cli/src/builtins/aspect/lib/linting.axl create mode 100644 crates/aspect-cli/src/builtins/aspect/lib/platform.axl create mode 100644 crates/aspect-cli/src/builtins/aspect/lib/sarif.axl create mode 100644 crates/aspect-cli/src/builtins/aspect/lib/tar.axl create mode 100644 crates/aspect-cli/src/builtins/aspect/tasks/delivery.axl create mode 100644 crates/aspect-cli/src/builtins/aspect/tasks/dummy_format.axl create mode 100644 crates/aspect-cli/src/builtins/aspect/tasks/dummy_lint.axl create mode 100644 crates/aspect-cli/src/builtins/aspect/test.axl create mode 100644 crates/aspect-cli/src/builtins/mod.rs delete mode 100644 crates/axl-runtime/src/builtins/aspect/MODULE.aspect delete mode 100644 crates/axl-runtime/src/builtins/aspect/build.axl delete mode 100644 crates/axl-runtime/src/builtins/aspect/test.axl delete mode 100644 crates/axl-runtime/src/builtins/mod.rs create mode 100644 crates/axl-runtime/src/engine/bazel/execlog_sink.rs create mode 100644 crates/axl-runtime/src/engine/bazel/health_check.rs delete mode 100644 crates/axl-runtime/src/engine/bazel/helpers.rs create mode 100644 crates/axl-runtime/src/engine/config/fragment_map.rs create mode 100644 crates/axl-runtime/src/engine/types/fragment.rs diff --git a/.aspect/config.axl b/.aspect/config.axl index 809994865..2944c7312 100644 --- a/.aspect/config.axl +++ b/.aspect/config.axl @@ -19,14 +19,11 @@ def config(ctx: ConfigContext): # assert we can call a lambda with a global bind from another module in a config script lambda_with_global_bind()("assert") + # Configure the UserTaskConfig fragment globally + user_config = ctx.fragments[UserTaskConfig] + user_config.message = "hello axl" + user_config.count = 2 + user_config.customize_message = lambda s: _customize_message(s, "!") + # add a new task ctx.tasks.add(_user_task) - - # customize a task - for task in ctx.tasks: - if task.name == "user_task" and task.group == ["user"]: - task.config = UserTaskConfig( - message = "hello axl", - count = 2, - customize_message = lambda s: _customize_message(s, "!") - ) diff --git a/.aspect/user-task.axl b/.aspect/user-task.axl index d85135450..d0bd8785c 100644 --- a/.aspect/user-task.axl +++ b/.aspect/user-task.axl @@ -1,9 +1,9 @@ load("./lambda.axl", "lambda_with_global_bind") -UserTaskConfig = record( - message=field(str, "hello world"), - count=field(int, 1), - customize_message = field(typing.Callable[[str], str], default = lambda s: s), +UserTaskConfig = fragment( + message=attr(str, "hello world"), + count=attr(int, 1), + customize_message = attr(typing.Callable[[str], str], lambda s: s), ) def _impl(ctx: TaskContext) -> int: @@ -13,18 +13,19 @@ def _impl(ctx: TaskContext) -> int: lambda_with_global_bind()("assert") # do something that makes uses the task config + config = ctx.fragments[UserTaskConfig] print(ctx) - print(ctx.config) - print(ctx.config.count) - print(ctx.config.message) - print(ctx.config.customize_message) - for i in range(ctx.config.count): - print(ctx.config.customize_message(ctx.config.message)) + print(config) + print(config.count) + print(config.message) + print(config.customize_message) + for i in range(config.count): + print(config.customize_message(config.message)) return 0 user_task = task( group = ["user"], implementation = _impl, args = {}, - config = UserTaskConfig(), + fragments = [UserTaskConfig], ) diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command new file mode 100755 index 000000000..ef99d8ef7 --- /dev/null +++ b/.buildkite/hooks/pre-command @@ -0,0 +1,48 @@ +#!/bin/sh +set -eu + +# Bazel startup options for CI +export BAZEL_STARTUP_OPTS="--nohome_rc --output_user_root=/mnt/ephemeral/bazel/aspect-cli/__main__ --output_base=/mnt/ephemeral/output/aspect-cli/__main__" + +# Build bazel remote flags from environment variables set by configure_workflows_env +BAZEL_REMOTE_FLAGS="" +[ -n "${ASPECT_WORKFLOWS_BES_BACKEND:-}" ] && BAZEL_REMOTE_FLAGS="${BAZEL_REMOTE_FLAGS} --bes_backend=${ASPECT_WORKFLOWS_BES_BACKEND}" +[ -n "${ASPECT_WORKFLOWS_BES_RESULTS_URL:-}" ] && BAZEL_REMOTE_FLAGS="${BAZEL_REMOTE_FLAGS} --bes_results_url=${ASPECT_WORKFLOWS_BES_RESULTS_URL}" +[ -n "${ASPECT_WORKFLOWS_REMOTE_CACHE:-}" ] && BAZEL_REMOTE_FLAGS="${BAZEL_REMOTE_FLAGS} --remote_cache=${ASPECT_WORKFLOWS_REMOTE_CACHE}" +[ -n "${ASPECT_WORKFLOWS_REMOTE_BYTESTREAM_URI_PREFIX:-}" ] && BAZEL_REMOTE_FLAGS="${BAZEL_REMOTE_FLAGS} --remote_bytestream_uri_prefix=${ASPECT_WORKFLOWS_REMOTE_BYTESTREAM_URI_PREFIX}" +export BAZEL_REMOTE_FLAGS + +export BAZEL_BUILD_OPTS="--config=ci ${BAZEL_REMOTE_FLAGS}" + +# Skip if bazel is not installed +if ! command -v bazel >/dev/null 2>&1; then + echo "DEBUG: bazel not found, skipping" + exit 0 +fi + +# If the bazel server is busy, the runner is in a bad state — reap it so the autoscaler +# replaces it with a fresh instance. +reap_runner() { + echo "--- Bazel server is unhealthy, reaping runner" + REGION=$(cat /etc/aspect/workflows/platform/region) + INSTANCE_ID=$(cat /etc/aspect/workflows/platform/instance_id) + /etc/aspect/workflows/lib/aws.sh autoscaling set-instance-health \ + --region "${REGION}" \ + --instance-id "${INSTANCE_ID}" \ + --health-status Unhealthy \ + --no-should-respect-grace-period +} + +# We use a short timeout so we capture the "Another command (pid=X)" message without +# blocking indefinitely. +LOCK_OUTPUT=$(timeout 5 bazel $BAZEL_STARTUP_OPTS info 2>&1) || true +BUSY_PID=$(echo "$LOCK_OUTPUT" | grep -o '(pid=[0-9]*)' | grep -o '[0-9]*') || true +if [ -n "$BUSY_PID" ]; then + reap_runner + exit 78 +fi + +# Build aspect-cli so version.axl can pick it up from bazel-bin/ +echo "--- Building aspect-cli" +# bazel in aspect workflows runners is actually old aspect-cli. +bazel --aspect:disable_plugins $BAZEL_STARTUP_OPTS build $BAZEL_BUILD_OPTS -c dbg --remote_download_toplevel --show_progress_rate_limit=1 //:cli diff --git a/.buildkite/pipeline.yaml b/.buildkite/pipeline.yaml index f012ad4dc..17a7f4097 100644 --- a/.buildkite/pipeline.yaml +++ b/.buildkite/pipeline.yaml @@ -1,85 +1,105 @@ +# Generated by: aspect workflows migrate --host=buildkite +# Source: .aspect/workflows/config_aws.yaml +# DO NOT EDIT - regenerate with 'aspect workflows migrate' + +env: + ASPECT_DEBUG: "1" steps: - - label: ":aspect: Test" + - key: __main__::warm + label: ":aspect: Building CLI" agents: queue: aspect-huge - command: | - echo "--- :aspect-build: Workflows environment" - /etc/aspect/workflows/bin/configure_workflows_env - env | sort | grep ^ASPECT_WORKFLOWS_ - echo "--- :stethoscope: Agent health check" - /etc/aspect/workflows/bin/agent_health_check - echo "--- :bazel: bazel test //..." - pwd - rosetta bazelrc > /etc/bazel.bazelrc - bazel \ - --nohome_rc \ - --output_user_root=/mnt/ephemeral/bazel/aspect-cli/__main__ \ - --output_base=/mnt/ephemeral/output/aspect-cli/__main__ \ - test \ - --config=workflows \ - --config=ci \ - --test_output=summary \ - --show_progress_rate_limit=1 \ - -- //... - echo "--- :bazel: bazel build //:launcher //:cli" - bazel \ - --nohome_rc \ - --output_user_root=/mnt/ephemeral/bazel/aspect-cli/__main__ \ - --output_base=/mnt/ephemeral/output/aspect-cli/__main__ \ - build \ - --config=workflows \ - --config=ci \ - --test_output=summary \ - --show_progress_rate_limit=1 \ - --remote_download_outputs="toplevel" \ - -- //:launcher //:cli - # Use cquery to get the actual output paths (handles platform transitions on Linux) - WORKSPACE_ROOT=$$(pwd) - LAUNCHER=$$WORKSPACE_ROOT/$$(bazel --nohome_rc --output_user_root=/mnt/ephemeral/bazel/aspect-cli/__main__ --output_base=/mnt/ephemeral/output/aspect-cli/__main__ cquery --config=workflows --config=ci --output=files //crates/aspect-launcher) - CLI=$$WORKSPACE_ROOT/$$(bazel --nohome_rc --output_user_root=/mnt/ephemeral/bazel/aspect-cli/__main__ --output_base=/mnt/ephemeral/output/aspect-cli/__main__ cquery --config=workflows --config=ci --output=files //crates/aspect-cli) - echo "--- :aspect: aspect tests axl" - $$LAUNCHER tests axl - echo "--- :aspect: aspect demo template-demo" - $$LAUNCHER demo template-demo - echo "--- :aspect: aspect user user_task" - $$LAUNCHER user user_task - echo "--- :aspect: aspect user user_task_reexport" - $$LAUNCHER user user_task_reexport - echo "--- :aspect: aspect user user-task-manual" - $$LAUNCHER user user-task-manual - echo "--- :aspect: aspect user user-task-added-by-config" - $$LAUNCHER user user-task-added-by-config - echo "--- :aspect: aspect user user-task-subdir (in crates/aspect-cli)" - ( - cd crates/aspect-cli - $$LAUNCHER user user-task-subdir - ) - echo "--- :aspect: aspect help" - $$LAUNCHER help - echo "--- :aspect: aspect-launcher --version" - $$LAUNCHER --version - echo "--- :aspect: aspect-cli --version" - $$CLI --version - echo "--- :aspect: aspect version" - $$LAUNCHER version - - label: ":broom: Format" + timeout_in_minutes: 20 + retry: + automatic: + - exit_status: 78 + limit: 1 + command: + - echo "Pre-command builds and uploads CLI to remote cache" + - key: __main__::test + depends_on: + - __main__::warm + label: ":aspect: Test" agents: queue: aspect-default - command: | - echo "--- :aspect-build: Workflows environment" - /etc/aspect/workflows/bin/configure_workflows_env - env | sort | grep ^ASPECT_WORKFLOWS_ - echo "--- :stethoscope: Agent health check" - /etc/aspect/workflows/bin/agent_health_check - echo "--- :bazel: bazel run //tools/format:format.check" - pwd - rosetta bazelrc > /etc/bazel.bazelrc - bazel \ - --nohome_rc \ - --output_user_root=/mnt/ephemeral/bazel/aspect-cli/__main__ \ - --output_base=/mnt/ephemeral/output/aspect-cli/__main__ \ - run \ - --config=workflows \ - --config=ci \ - --show_progress_rate_limit=1 \ - //tools/format:format.check + timeout_in_minutes: 20 + retry: + automatic: + - exit_status: 78 + limit: 1 + command: + - | + echo "--- :bazel: aspect test //..." + ASPECT_DEBUG=1 aspect test //... --bazel_flag=--build_tests_only + - key: __main__::build + depends_on: + - __main__::warm + label: ":aspect: Build" + agents: + queue: aspect-huge + timeout_in_minutes: 20 + retry: + automatic: + - exit_status: 78 + limit: 1 + command: + - | + echo "--- :bazel: aspect build //:launcher //:cli" + aspect build //:launcher //:cli + # Use cquery to get the actual output paths (handles platform transitions on Linux) + WORKSPACE_ROOT=$$(pwd) + LAUNCHER=$$WORKSPACE_ROOT/$$(bazel $$BAZEL_STARTUP_OPTS cquery $$BAZEL_BUILD_OPTS --output=files //crates/aspect-launcher) + CLI=$$WORKSPACE_ROOT/$$(bazel $$BAZEL_STARTUP_OPTS cquery $$BAZEL_BUILD_OPTS --output=files //crates/aspect-cli) + echo "--- :aspect: aspect tests axl" + $$LAUNCHER tests axl + echo "--- :aspect: aspect demo template-demo" + $$LAUNCHER demo template-demo + echo "--- :aspect: aspect user user_task" + $$LAUNCHER user user_task + echo "--- :aspect: aspect user user_task_reexport" + $$LAUNCHER user user_task_reexport + echo "--- :aspect: aspect user user-task-manual" + $$LAUNCHER user user-task-manual + echo "--- :aspect: aspect user user-task-added-by-config" + $$LAUNCHER user user-task-added-by-config + echo "--- :aspect: aspect user user-task-subdir (in crates/aspect-cli)" + ( + cd crates/aspect-cli + $$LAUNCHER user user-task-subdir + ) + echo "--- :aspect: aspect help" + $$LAUNCHER help + echo "--- :aspect: aspect-launcher --version" + $$LAUNCHER --version + echo "--- :aspect: aspect-cli --version" + $$CLI --version + echo "--- :aspect: aspect version" + $$LAUNCHER version + - key: __main__::format + label: ":broom: Format" + depends_on: + - __main__::warm + agents: + queue: aspect-default + timeout_in_minutes: 20 + retry: + automatic: + - exit_status: 78 + limit: 1 + command: + - | + echo "--- :bazel: bazel run //tools/format:format.check" + bazel $$BAZEL_STARTUP_OPTS run $$BAZEL_BUILD_OPTS //tools/format:format.check + - key: __main__::delivery + label: ":package: Delivery" + depends_on: + - __main__::warm + agents: + queue: aspect-default + timeout_in_minutes: 20 + retry: + automatic: + - exit_status: 78 + limit: 1 + command: + - aspect delivery --build_url $BUILDKITE_BUILD_URL --commit_sha $BUILDKITE_COMMIT --force_target //:hello -- //:hello //:hello2 diff --git a/Cargo.lock b/Cargo.lock index fd0553b1c..57649f94d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -201,6 +201,7 @@ dependencies = [ "opentelemetry-semantic-conventions", "opentelemetry_sdk", "reqwest", + "sha256", "starlark", "thiserror 2.0.17", "tokio", diff --git a/crates/aspect-cli/.aspect/config.axl b/crates/aspect-cli/.aspect/config.axl index 5acc4d7af..417478929 100644 --- a/crates/aspect-cli/.aspect/config.axl +++ b/crates/aspect-cli/.aspect/config.axl @@ -1,3 +1,2 @@ def config(ctx: ConfigContext): - print("running crates/aspect-cli/.aspect/config.axl") pass diff --git a/crates/aspect-cli/BUILD.bazel b/crates/aspect-cli/BUILD.bazel index 57a9e9a0a..f247715a1 100644 --- a/crates/aspect-cli/BUILD.bazel +++ b/crates/aspect-cli/BUILD.bazel @@ -10,6 +10,7 @@ rust_binary( "@crates//:clap", "@crates//:miette", "@crates//:reqwest", + "@crates//:sha256", "@crates//:starlark", "@crates//:tokio", "@crates//:thiserror", @@ -24,6 +25,10 @@ rust_binary( "//crates/aspect-telemetry", "//crates/axl-runtime", ], + compile_data = glob([ + "src/builtins/**/*.axl", + "src/builtins/**/*.aspect", + ]), visibility = ["//:__pkg__"], ) diff --git a/crates/aspect-cli/Cargo.toml b/crates/aspect-cli/Cargo.toml index ee97bb331..22991f3c7 100644 --- a/crates/aspect-cli/Cargo.toml +++ b/crates/aspect-cli/Cargo.toml @@ -25,6 +25,7 @@ opentelemetry_sdk = "0.31.0" opentelemetry-otlp = {version = "0.31.0", features = ["grpc-tonic", "tls-roots"]} opentelemetry-semantic-conventions = "0.31.0" reqwest = { version = "0.12.22", default-features = false, features = ["http2", "charset", "system-proxy", "rustls-tls", "stream"] } +sha256 = "1.6.0" starlark = "0.13.0" thiserror = "2.0.17" tokio = { version = "1.47.1", features = ["macros", "rt", "rt-multi-thread"] } diff --git a/crates/aspect-cli/src/builtins/aspect/MODULE.aspect b/crates/aspect-cli/src/builtins/aspect/MODULE.aspect new file mode 100644 index 000000000..87cfb746c --- /dev/null +++ b/crates/aspect-cli/src/builtins/aspect/MODULE.aspect @@ -0,0 +1,22 @@ +use_task("build.axl", "build") +use_task("test.axl", "test") +use_task("axl_add.axl", "add") + +# Configure delivery +use_config("config/delivery.axl", "configure_delivery") + +# Configure builtins +use_config("config/builtins.axl", "configure_builtins") + +# Configure rules_lint if its declared by user +use_config( + "config/lint.axl", + "configure_rules_lint", + requires = ["aspect_rules_lint"] +) + +use_config( + "config/nolint.axl", + "configure_dummy_lint", + conflicts = ["aspect_rules_lint"] +) diff --git a/crates/axl-runtime/src/builtins/aspect/axl_add.axl b/crates/aspect-cli/src/builtins/aspect/axl_add.axl similarity index 100% rename from crates/axl-runtime/src/builtins/aspect/axl_add.axl rename to crates/aspect-cli/src/builtins/aspect/axl_add.axl diff --git a/crates/aspect-cli/src/builtins/aspect/bazel.axl b/crates/aspect-cli/src/builtins/aspect/bazel.axl new file mode 100644 index 000000000..f3ef897c5 --- /dev/null +++ b/crates/aspect-cli/src/builtins/aspect/bazel.axl @@ -0,0 +1,41 @@ +""" +Bazel exit code constants. + +These correspond to the exit codes defined in Bazel's ExitCode.java: +https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/util/ExitCode.java +""" + +exit_codes = struct( + SUCCESS = 0, + BUILD_FAILURE = 1, + PARSING_FAILURE = 1, + COMMAND_LINE_ERROR = 2, + TESTS_FAILED = 3, + PARTIAL_ANALYSIS_FAILURE = 3, + NO_TESTS_FOUND = 4, + RUN_FAILURE = 6, + ANALYSIS_FAILURE = 7, + INTERRUPTED = 8, + LOCK_HELD_NOBLOCK_FOR_LOCK = 9, + REMOTE_ENVIRONMENTAL_ERROR = 32, + OOM_ERROR = 33, + REMOTE_ERROR = 34, + LOCAL_ENVIRONMENTAL_ERROR = 36, + BLAZE_INTERNAL_ERROR = 37, + TRANSIENT_BUILD_EVENT_SERVICE_UPLOAD_ERROR = 38, + REMOTE_CACHE_EVICTED = 39, + PERSISTENT_BUILD_EVENT_SERVICE_UPLOAD_ERROR = 45, + EXTERNAL_DEPS_ERROR = 48, +) + +# https://bazel.build/run/scripts#exit-codes +def default_retry(code: int) -> bool: + """Returns True if the given exit code is retryable. + + Retryable codes are those indicating transient infrastructure failures + where re-running the command may succeed: + - BLAZE_INTERNAL_ERROR (37): Bazel server crash. + - LOCAL_ENVIRONMENTAL_ERROR (36): Local env failure, often caused by + a queued command failing because the server is crashing. + """ + return code == exit_codes.BLAZE_INTERNAL_ERROR or code == exit_codes.LOCAL_ENVIRONMENTAL_ERROR diff --git a/crates/aspect-cli/src/builtins/aspect/build.axl b/crates/aspect-cli/src/builtins/aspect/build.axl new file mode 100644 index 000000000..f85838d21 --- /dev/null +++ b/crates/aspect-cli/src/builtins/aspect/build.axl @@ -0,0 +1,103 @@ +""" +A default 'build' task that wraps a 'bazel build' command. +""" +load("./fragments.axl", "BazelFragment") + +def _collect_bes_from_args(ctx): + """Collect BES sinks from CLI args (--bes_backend/--bes_header).""" + sinks = [] + for bes_backend in ctx.args.bes_backend: + metadata = {} + for bes_header in ctx.args.bes_header: + (k, _, v) = bes_header.partition("=") + metadata[k] = v + sinks.append( + bazel.build_events.grpc( + uri = bes_backend, + metadata = metadata, + ) + ) + return sinks + +def impl(ctx: TaskContext) -> int: + health = ctx.bazel.health_check() + fragment = ctx.fragments[BazelFragment] + + if fragment.post_health_check: + fragment.post_health_check(ctx, health) + + if health.outcome == "unhealthy": + fail("Bazel server is unhealthy: " + health.message) + + # Flags: accumulate data, then optionally transform + flags = ["--isatty=" + str(int(ctx.std.io.stdout.is_tty))] + flags.extend(ctx.args.bazel_flag) + flags.extend(fragment.extra_flags) + if fragment.flags: + flags = fragment.flags(flags) + + startup_flags = list(ctx.args.bazel_startup_flag) + startup_flags.extend(fragment.extra_startup_flags) + if fragment.startup_flags: + startup_flags = fragment.startup_flags(startup_flags) + + # BES: merge arg-based sinks with fragment sinks + build_events = _collect_bes_from_args(ctx) + if fragment.build_event_sinks: + build_events.extend(fragment.build_event_sinks) + + # Coerce to bool/list for ctx.bazel.build: + # - non-empty list → stream to those sinks + build_events() iterator. + # - True → stream without explicit sinks (build_event handler only) + # - False → no BEP stream at all + if not build_events: + if fragment.build_event: + build_events = True + else: + build_events = False + + # Shared mutable state across build_start / build_event / build_end + state = {"_task_name": "build"} + + for handler in fragment.build_start: + handler(ctx, state) + + for _ in range(10): + build = ctx.bazel.build( + build_events = build_events, + execution_log = fragment.execution_log_sinks or False, + flags = flags, + startup_flags = startup_flags, + *ctx.args.target_pattern, + ) + + if fragment.build_event: + for event in build.build_events(): + for handler in fragment.build_event: + handler(ctx, state, event) + + build_status = build.wait() + + if build_status.code == 0 or not fragment.build_retry(build_status.code): + break + + for handler in fragment.build_end: + handler(ctx, state, build_status.code) + + return build_status.code + +build = task( + implementation = impl, + fragments = [ + BazelFragment + ], + args = { + "target_pattern": args.positional(minimum = 1, maximum = 512, default = ["..."]), + "bazel_flag": args.string_list(), + "bazel_startup_flag": args.string_list(), + "remote_executor": args.string(), + "remote_cache": args.string(), + "bes_backend": args.string_list(), + "bes_header": args.string_list(), + }, +) diff --git a/crates/aspect-cli/src/builtins/aspect/config/artifacts.axl b/crates/aspect-cli/src/builtins/aspect/config/artifacts.axl new file mode 100644 index 000000000..be4b40526 --- /dev/null +++ b/crates/aspect-cli/src/builtins/aspect/config/artifacts.axl @@ -0,0 +1,299 @@ +"""Configures mid-job artifact uploads for CI environments. + +Uploads artifacts on an interval (every BATCH_SIZE new files) during the build +via the build_event handler. Each upload is a tar.gz archive, and the previous +version is deleted after a successful upload (rolling replacement). + +Also configures Bazel profiling flags and uploads the profile at build_end. + +CircleCI is excluded — artifacts are staged at /workflows/testlogs for +CircleCI's native store_artifacts / store_test_results steps. +""" + +load("../lib/artifacts.axl", "detect_ci", "upload_file", "delete_artifact") +load("../lib/tar.axl", "tar_create") +load("../fragments.axl", "BazelFragment") + + +# Upload every N new files per group +_BATCH_SIZE = 5 + +# bb_clientd FUSE mount root for resolving bytestream:// URIs +_BB_CLIENTD_ROOT = "/mnt/ephemeral/buildbarn/bb_clientd" + +# Artifact staging root, matching Rosetta conventions +_ARTIFACTS_ROOT = "/workflows" + + +def _artifact_name_prefix(env): + """Derive artifact name prefix from CI environment.""" + return env.var("GITHUB_JOB") or env.var("CI_JOB_NAME") or "aspect" + + +def _file_uri_to_path(uri): + """Convert a file:// or bytestream:// URI to a local filesystem path. + + bytestream:// URIs are resolved via the bb_clientd FUSE mount: + bytestream://host:port/blobs// + -> {_BB_CLIENTD_ROOT}/cas/{host}/blobs/sha256/file/{hash}-{size} + """ + if not uri: + return uri + if uri.startswith("file://"): + return uri[len("file://"):] + if uri.startswith("bytestream://"): + rest = uri[len("bytestream://"):] + parts = rest.split("/") + host_port = parts[0] + host = host_port.split(":")[0] + blob_hash = parts[2] + blob_size = parts[3] + return _BB_CLIENTD_ROOT + "/cas/" + host + "/blobs/sha256/file/" + blob_hash + "-" + blob_size + return uri + + +def _collect_test_files(ctx, state, event): + """Collect test output file paths from a test_result event. + + Records source paths and desired archive paths in state without copying. + The archive is built later via mtree + bsdtar. + """ + if event.kind != "test_result": + return + + label = event.id.label + label_path = label.lstrip("/").replace(":", "/") + + if "_test_entries" not in state: + state["_test_entries"] = [] + if "_skipped_files" not in state: + state["_skipped_files"] = [] + + for file in event.payload.test_action_output: + raw_uri = file.file + src = _file_uri_to_path(raw_uri) + if not src: + src = "/".join(file.path_prefix) + "/" + file.name + if not src: + state["_skipped_files"].append({"label": label, "name": file.name, "reason": "no path", "raw": repr(raw_uri)}) + continue + if not ctx.std.fs.exists(src): + state["_skipped_files"].append({"label": label, "name": file.name, "reason": "not found", "raw": src}) + continue + + dest = label_path + "/" + file.name + state["_test_entries"].append({"src": src, "dest": dest, "label": label}) + + +def _build_mtree(entries): + """Build an mtree spec string mapping source files to archive paths.""" + lines = ["#mtree"] + for entry in entries: + lines.append("./" + entry["dest"] + " type=file contents=" + entry["src"]) + return "\n".join(lines) + "\n" + + +def _upload_testlogs(ctx, group, entries, name): + """Build and upload testlogs archive directly from source files via mtree.""" + count = len(entries) + if count == 0 or count == group["last_count"]: + return False + + archive_path = _ARTIFACTS_ROOT + "/" + name + ".tar.gz" + + ctx.std.fs.create_dir_all(_ARTIFACTS_ROOT) + if not tar_create(ctx, archive_path, _build_mtree(entries)): + print("artifact upload: failed to create archive for " + name) + return False + + group["version"] += 1 + artifact_name = name + "-v" + str(group["version"]) + result = upload_file(ctx, archive_path, name = artifact_name) + + if ctx.std.fs.exists(archive_path): + ctx.std.fs.remove_file(archive_path) + + if result["success"]: + if group["prev_ref"]: + delete_artifact(ctx, group["prev_ref"]) + group["prev_ref"] = result["artifact_ref"] + group["last_count"] = count + return True + else: + group["version"] -= 1 + for err in result["errors"]: + print("artifact upload error: " + err) + return False + + +def _maybe_upload_testlogs(ctx, group, entries, name): + """Upload testlogs only when enough new entries have accumulated.""" + if len(entries) < group["last_count"] + _BATCH_SIZE: + return + _upload_testlogs(ctx, group, entries, name) + + +def _upload_testlogs_buildkite(ctx, group, entries): + """Upload test files individually to Buildkite via a symlink tree. + + Creates a temp directory with symlinks matching the desired archive paths, + then uploads everything with buildkite-agent. Only uploads entries added + since the last successful upload. + """ + count = len(entries) + if count == 0 or count == group["last_count"]: + return False + + new_entries = entries[group["last_count"]:] + + child = ctx.std.process.command("mktemp").args(["-d"]).stdout("piped").stderr("piped").spawn() + tmp_dir = child.stdout().read_to_string().strip() + child.wait() + + for entry in new_entries: + dest_path = tmp_dir + "/" + entry["dest"] + dest_dir = dest_path.rsplit("/", 1)[0] + ctx.std.fs.create_dir_all(dest_dir) + ctx.std.process.command("ln").args(["-s", entry["src"], dest_path]).spawn().wait() + + child = ctx.std.process.command("buildkite-agent") \ + .args(["artifact", "upload", "**/*"]) \ + .current_dir(tmp_dir) \ + .stdout("inherit") \ + .stderr("inherit") \ + .spawn() + + status = child.wait() + ctx.std.process.command("rm").args(["-rf", tmp_dir]).spawn().wait() + + if status.code == 0: + group["last_count"] = count + return True + + print("artifact upload: buildkite-agent exit " + str(status.code)) + return False + + +def _maybe_upload_testlogs_buildkite(ctx, group, entries): + """Upload testlogs to Buildkite only when enough new entries have accumulated.""" + if len(entries) < group["last_count"] + _BATCH_SIZE: + return + _upload_testlogs_buildkite(ctx, group, entries) + + +def _init_upload_state(state): + """Ensure the upload state is initialized in the shared state dict.""" + if "_artifact_upload" not in state: + state["_artifact_upload"] = { + "testlogs": {"last_count": 0, "version": 0, "prev_ref": None}, + } + + +def _artifact_prefix(ci, env, task_name): + """Derive artifact name prefix from CI and task context. + + Buildkite groups artifacts by job, so the task name is sufficient. + Other CIs need a CI job prefix to disambiguate across jobs. + """ + if ci == "buildkite": + return task_name + return _artifact_name_prefix(env) + "-" + task_name + + +def configure_artifacts(ctx): + """Wire interval-based artifact upload into BazelFragment lifecycle hooks. + + - build_event: collect testlog entries + upload every BATCH_SIZE new files + - build_end: final flush of any remaining files + upload profile + + Expects state["_task_name"] to be set by the task implementation (e.g. "build", "test"). + """ + ci = detect_ci(ctx.std.env) + is_local = not ci + if is_local: + ci = "local" + + fragment = ctx.fragments[BazelFragment] + debug = bool(ctx.std.env.var("ASPECT_DEBUG")) + + # Generate unique profile path in /tmp + child = ctx.std.process.command("uuidgen").stdout("piped").stderr("piped").spawn() + uuid = child.stdout().read_to_string().strip() + child.wait() + + profile_path = "/tmp/" + uuid + ".profile.gz" + execlog_path = "/tmp/" + uuid + ".execlog.zstd" + bep_path = "/tmp/" + uuid + ".bep.bin" + + # Add profiling and heap dump flags + fragment.extra_flags.extend([ + "--generate_json_trace_profile", + "--experimental_profile_include_target_label", + "--profile=" + profile_path, + "--heap_dump_on_oom", + ]) + + # Add execution log and build event file sinks + fragment.execution_log_sinks.append( + bazel.execution_log.compact_file(path = execlog_path), + ) + fragment.build_event_sinks.append( + bazel.build_events.file(path = bep_path), + ) + + def _on_build_event(ctx, state, event): + _collect_test_files(ctx, state, event) + if not is_local: + _init_upload_state(state) + upload_state = state["_artifact_upload"] + entries = state.get("_test_entries", []) + if ci == "buildkite": + _maybe_upload_testlogs_buildkite(ctx, upload_state["testlogs"], entries) + else: + prefix = _artifact_prefix(ci, ctx.std.env, state.get("_task_name", "unknown")) + _maybe_upload_testlogs(ctx, upload_state["testlogs"], entries, prefix + "-testlogs") + + def _on_build_end(ctx, state, exit_code): + if debug: + entries = state.get("_test_entries", []) + skipped = state.get("_skipped_files", []) + print("") + print("--- artifact upload debug summary ---") + print("ci: " + ci) + print("profile: " + profile_path) + print("execlog: " + execlog_path) + print("bes/bep: " + bep_path) + print("testlog entries: " + str(len(entries))) + for f in entries: + print(" " + f["label"] + " " + f["src"] + " -> " + f["dest"]) + if skipped: + print("skipped files: " + str(len(skipped))) + for f in skipped: + print(" " + f["label"] + " " + f["name"] + " (" + f["reason"] + ": " + f["raw"] + ")") + if is_local: + print("no CI detected, uploads skipped (dry run)") + print("------------------------------------") + + if not is_local: + _init_upload_state(state) + upload_state = state["_artifact_upload"] + entries = state.get("_test_entries", []) + if ci == "buildkite": + _upload_testlogs_buildkite(ctx, upload_state["testlogs"], entries) + else: + prefix = _artifact_prefix(ci, ctx.std.env, state.get("_task_name", "unknown")) + _upload_testlogs(ctx, upload_state["testlogs"], entries, prefix + "-testlogs") + + # Upload diagnostic artifacts + task_name = state.get("_task_name", "unknown") + for src, artifact_name in [ + (profile_path, task_name + ".profile.gz"), + (execlog_path, task_name + ".execlog.zstd"), + (bep_path, task_name + ".bep.bin"), + ]: + if ctx.std.fs.exists(src): + upload_file(ctx, src, name = artifact_name) + ctx.std.fs.remove_file(src) + + fragment.build_event.append(_on_build_event) + fragment.build_end.append(_on_build_end) diff --git a/crates/aspect-cli/src/builtins/aspect/config/builtins.axl b/crates/aspect-cli/src/builtins/aspect/config/builtins.axl new file mode 100644 index 000000000..b9699c01d --- /dev/null +++ b/crates/aspect-cli/src/builtins/aspect/config/builtins.axl @@ -0,0 +1,67 @@ +"""Configures builtin tasks for Workflows""" + +load("../lib/platform.axl", + "read_platform_config", + "read_host_config", + "get_bazelrc_flags", + "DEFAULT_PLATFORM_DIR" +) +load("../lib/health_check.axl", "agent_health_check") +load("../lib/environment.axl", "configure_workflows_env") +load("../lib/build_metadata.axl", "get_build_metadata_flags") +load("../fragments.axl", "BazelFragment") +load("./artifacts.axl", "configure_artifacts") + +def _post_health_check(ctx, health): + print("--- :aspect: Agent Health Check") + agent_health_check(ctx, health) + +def configure_builtins(ctx: ConfigContext): + UNDER_WORKFLOWS = ctx.std.env.var("ASPECT_WORKFLOWS_RUNNER_VERSION") + is_buildkite = bool(ctx.std.env.var("BUILDKITE")) + if UNDER_WORKFLOWS: + if is_buildkite: + print("--- :aspect: Workflows Runner Environment") + configure_workflows_env(ctx.std.fs) + # Read platform config from disk + platform_config = read_platform_config(ctx.std.fs) + # Read host config from environment + host_config = read_host_config(ctx.std.env, ctx.std.io) + + # Configure BazelFragment globally + bazel_fragment = ctx.fragments[BazelFragment] + + if UNDER_WORKFLOWS: + # Generate bazelrc content + (startup_flags, build_flags) = get_bazelrc_flags( + platform_config = platform_config, + host_config = host_config, + bazel_version = "9.0.0", + root_dir = ctx.std.env.root_dir, + ) + + bessie_endpoint = platform_config.get("bessie_endpoint", None) + bessie_sinks = [] + if bessie_endpoint: + bessie_sinks.append(bazel.build_events.grpc( + uri = bessie_endpoint, + metadata = {} # TODO: how does bessie authenticate? + )) + + bazel_version_output = ctx.std.process.command("bazel").arg("--version").stdout("piped").spawn().wait_with_output() + if "aspect" in bazel_version_output.stdout: + bazel_fragment.extra_startup_flags.append("--aspect:disable_plugins") + + bazel_fragment.extra_startup_flags.extend(startup_flags) + bazel_fragment.extra_flags.extend(build_flags) + metadata_flags = get_build_metadata_flags(ctx.std.env, ctx.std.process, workspace = ".") + bazel_fragment.extra_flags.extend(metadata_flags) + bazel_fragment.build_start.append(lambda ctx, state: print("+++ :bazel: Building")) + bazel_fragment.build_event_sinks.extend(bessie_sinks) + if is_buildkite: + bazel_fragment.post_health_check = _post_health_check + else: + bazel_fragment.post_health_check = agent_health_check + + # Wire artifact uploads last so it wraps existing hooks + configure_artifacts(ctx) diff --git a/crates/aspect-cli/src/builtins/aspect/config/delivery.axl b/crates/aspect-cli/src/builtins/aspect/config/delivery.axl new file mode 100644 index 000000000..8ba1bb9bb --- /dev/null +++ b/crates/aspect-cli/src/builtins/aspect/config/delivery.axl @@ -0,0 +1,12 @@ +"""Configures delivery task for Workflows""" + +load("../tasks/delivery.axl", "delivery") +load("../fragments.axl", "DeliveryFragment") +load("../lib/deliveryd.axl", deliveryd_health = "health") + +def configure_delivery(ctx: ConfigContext): + # Add a delivery verb + ctx.tasks.add(delivery) + + # Configure DeliveryFragment fragment globally + ctx.fragments[DeliveryFragment].delivery_start = lambda: print("--- :bazel: Delivery") diff --git a/crates/aspect-cli/src/builtins/aspect/config/lint.axl b/crates/aspect-cli/src/builtins/aspect/config/lint.axl new file mode 100644 index 000000000..e62b47295 --- /dev/null +++ b/crates/aspect-cli/src/builtins/aspect/config/lint.axl @@ -0,0 +1,55 @@ +"""Configures rules_lint if its available""" + +load("../lib/platform.axl", + "read_platform_config", + "read_host_config", + "DEFAULT_PLATFORM_DIR" +) +load( + "../lib/github.axl", + "create_check_run", + "update_check_run", + "complete_check_run", + "build_output", + "build_annotation", + "create_review", + "build_suggestion", +) +load( + "../lib/sarif.axl", + "sarif_to_annotations", + "get_sarif_summary" +) +load( + "@aspect_rules_lint//lint/lint.axl", + "StrategyHoldTheLine", +) +load("../lib/linting.axl", "make_github_strategy", "make_github_changed_files_provider") + + + +def configure_rules_lint(ctx: ConfigContext): + + for task in ctx.tasks: + if task.name == "lint": + github_token = ctx.std.env.var("ASPECT_WORKFLOWS_PR_GITHUB_TOKEN") or ctx.std.env.var("GITHUB_TOKEN") + if github_token: + # CI mode: GitHub-aware strategy with hold-the-line + github_repository = ctx.std.env.var("GITHUB_REPOSITORY") or "" + repo_parts = github_repository.split("/") + gh_owner = repo_parts[0] if len(repo_parts) >= 2 else "" + gh_repo = repo_parts[1] if len(repo_parts) >= 2 else "" + + task.config.strategy = make_github_strategy( + StrategyHoldTheLine, + token = github_token, + owner = gh_owner, + repo = gh_repo, + mode = "streaming", + ) + task.config.changed_files_provider = make_github_changed_files_provider( + token = github_token, + owner = gh_owner, + repo = gh_repo, + ) + # else: local dev uses defaults (StrategyHoldTheLine + GitDiffProvider) diff --git a/crates/aspect-cli/src/builtins/aspect/config/nolint.axl b/crates/aspect-cli/src/builtins/aspect/config/nolint.axl new file mode 100644 index 000000000..bf7ae89df --- /dev/null +++ b/crates/aspect-cli/src/builtins/aspect/config/nolint.axl @@ -0,0 +1,8 @@ +"""Configures a dummy lint verb for migration.""" + +load("../tasks/dummy_lint.axl", "lint") +load("../tasks/dummy_format.axl", "format") + +def configure_dummy_lint(ctx: ConfigContext): + ctx.tasks.add(lint) + ctx.tasks.add(format) \ No newline at end of file diff --git a/crates/aspect-cli/src/builtins/aspect/fragments.axl b/crates/aspect-cli/src/builtins/aspect/fragments.axl new file mode 100644 index 000000000..152409a36 --- /dev/null +++ b/crates/aspect-cli/src/builtins/aspect/fragments.axl @@ -0,0 +1,28 @@ +"""Fragment type definitions for builtin tasks.""" + +load("./bazel.axl", "default_retry") + +BazelFragment = fragment( + # Declarative data — composable, zero-cost reads + extra_flags = attr(list[str], []), + extra_startup_flags = attr(list[str], []), + build_event_sinks = attr(list[bazel.build.BuildEventSink], []), + execution_log_sinks = attr(list[bazel.execution_log.ExecLogSink], []), + + # Optional transforms — only called when set + flags = attr(typing.Callable[[list[str]], list[str]] | None, None), + startup_flags = attr(typing.Callable[[list[str]], list[str]] | None, None), + + # Lifecycle hooks — lists of callables, all receive (ctx, state, ...) + post_health_check = attr(typing.Callable | None, None), + build_start = attr(list[typing.Callable[[TaskContext, dict], None]], []), + build_event = attr(list[typing.Callable[[TaskContext, dict, dict], None]], []), + build_retry = attr(typing.Callable[[int], bool], default_retry), + build_end = attr(list[typing.Callable[[TaskContext, dict, int], None]], []), +) + +DeliveryFragment = fragment( + delivery_start = attr(typing.Callable[[], None], lambda: None), + delivery_end = attr(typing.Callable[[], None], lambda: None), + deliver_target = attr(typing.Callable[[str, bool], None], lambda label, is_forced: None), +) diff --git a/crates/aspect-cli/src/builtins/aspect/lib/artifacts.axl b/crates/aspect-cli/src/builtins/aspect/lib/artifacts.axl new file mode 100644 index 000000000..219ec5ba7 --- /dev/null +++ b/crates/aspect-cli/src/builtins/aspect/lib/artifacts.axl @@ -0,0 +1,294 @@ +""" +Artifact Upload Library + +Platform-agnostic artifact upload functions for CI environments. +Supports GitHub Actions, Buildkite, and GitLab CI. +CircleCI is excluded — artifacts are staged at /workflows/testlogs only. +""" + + +def detect_ci(env): + """Detect CI platform from environment. + + Args: + env: Environment interface (ctx.std.env) + + Returns: + "github" | "buildkite" | "gitlab" | None + """ + if env.var("ACTIONS_RUNTIME_TOKEN"): + return "github" + elif env.var("BUILDKITE_AGENT_ACCESS_TOKEN"): + return "buildkite" + elif env.var("CI_JOB_TOKEN"): + return "gitlab" + # CircleCI (CIRCLE_PROJECT_REPONAME) intentionally excluded: + # artifacts are staged to disk for CircleCI's native store_artifacts step. + return None + + +def upload_file(ctx, path, name): + """Upload a single file to the current CI platform's artifact storage. + + Args: + ctx: Context (needs ctx.http(), ctx.std.env, ctx.std.fs, ctx.std.process) + path: str - absolute file path to upload + name: str - artifact name + + Returns: + dict with "success" (bool), "artifact_ref" (str|None), "errors" (list) + """ + if not ctx.std.fs.exists(path): + return {"success": False, "artifact_ref": None, "errors": ["file not found: " + path]} + + ci = detect_ci(ctx.std.env) + if ci == "github": + return _upload_file_github(ctx, path, name) + elif ci == "buildkite": + return _upload_file_buildkite(ctx, path, name) + elif ci == "gitlab": + return _upload_file_gitlab(ctx, path, name) + + return {"success": False, "artifact_ref": None, "errors": ["not running in supported CI"]} + + +def delete_artifact(ctx, name): + """Delete a previously uploaded artifact by name. + + Args: + ctx: Context (needs ctx.http(), ctx.std.env, ctx.std.process) + name: str - artifact name to delete + + Returns: + dict with "success" (bool) + """ + ci = detect_ci(ctx.std.env) + if ci == "github": + return _delete_github(ctx, name) + elif ci == "buildkite": + return _delete_buildkite(ctx, name) + # GitLab and others: no easy single-artifact deletion + return {"success": True} + + +# ============================================================================= +# GitHub Actions +# ============================================================================= + +def _decode_backend_ids(ctx, token): + """Extract workflowRunBackendId and workflowJobRunBackendId from ACTIONS_RUNTIME_TOKEN JWT. + + The token's 'scp' claim contains a scope like: + "Actions.Results:run-backend-id:job-backend-id" + + Args: + ctx: Context with ctx.std.process for base64 decoding + token: The ACTIONS_RUNTIME_TOKEN JWT string + + Returns: + (workflow_run_backend_id, workflow_job_run_backend_id) or (None, None) + """ + # JWT is three base64url-encoded segments separated by dots + parts = token.split(".") + if len(parts) < 2: + return (None, None) + + # Decode the payload (second segment). + # Convert base64url encoding to standard base64 before decoding. + payload_b64 = parts[1].replace("-", "+").replace("_", "/") + child = ctx.std.process.command("base64").args(["-d"]).stdin("piped").stdout("piped").stderr("piped").spawn() + child.stdin().write(payload_b64) + child.stdin().close() + payload_json = child.stdout().read_to_string() + child.wait() + + if not payload_json: + return (None, None) + + payload = json.decode(payload_json) + scp = payload.get("scp", "") + + # scp is space-separated scopes; find the Actions.Results one + scopes = scp.split(" ") + for scope in scopes: + if scope.startswith("Actions.Results:"): + scope_parts = scope.split(":") + if len(scope_parts) >= 3: + return (scope_parts[1], scope_parts[2]) + + return (None, None) + + +def _github_twirp(ctx, method, payload): + """Make a Twirp RPC call to the GitHub Actions artifact service. + + Args: + ctx: Context + method: str - Twirp method name (e.g. "CreateArtifact") + payload: dict - request payload + + Returns: + (success: bool, body: dict|None) + """ + token = ctx.std.env.var("ACTIONS_RUNTIME_TOKEN") + results_url = ctx.std.env.var("ACTIONS_RESULTS_URL") + if not token or not results_url: + return (False, None) + + if results_url.endswith("/"): + results_url = results_url[:-1] + + http = ctx.http() + url = results_url + "/twirp/github.actions.results.api.v1.ArtifactService/" + method + headers = { + "Authorization": "Bearer " + token, + "Content-Type": "application/json", + } + + resp = http.post(url = url, headers = headers, data = json.encode(payload)).block() + success = resp.status >= 200 and resp.status < 300 + body = json.decode(resp.body) if resp.body else None + return (success, body) + + +def _upload_file_github(ctx, path, name): + """Upload a single file to GitHub Actions using the Twirp artifact API. + + Flow: CreateArtifact -> PUT to signed Azure Blob URL -> FinalizeArtifact + """ + token = ctx.std.env.var("ACTIONS_RUNTIME_TOKEN") + if not token: + return {"success": False, "artifact_ref": None, "errors": ["missing ACTIONS_RUNTIME_TOKEN"]} + + run_id, job_id = _decode_backend_ids(ctx, token) + if not run_id or not job_id: + return {"success": False, "artifact_ref": None, "errors": ["failed to decode backend IDs from token"]} + + # Step 1: CreateArtifact + ok, body = _github_twirp(ctx, "CreateArtifact", { + "workflowRunBackendId": run_id, + "workflowJobRunBackendId": job_id, + "name": name, + "version": 4, + }) + if not ok or not body: + return {"success": False, "artifact_ref": None, "errors": ["CreateArtifact failed"]} + + signed_url = body.get("signedUploadUrl", "") + if not signed_url: + return {"success": False, "artifact_ref": None, "errors": ["no signedUploadUrl returned"]} + + # Step 2: Upload file to signed URL + upload_resp = ctx.http().put( + signed_url, + headers = { + "x-ms-blob-type": "BlockBlob", + "Content-Type": "application/octet-stream", + }, + data = ctx.std.fs.open(path), + ).block() + + if upload_resp.status < 200 or upload_resp.status >= 300: + return {"success": False, "artifact_ref": None, "errors": ["blob upload: HTTP " + str(upload_resp.status)]} + + # Step 3: FinalizeArtifact + ok, body = _github_twirp(ctx, "FinalizeArtifact", { + "workflowRunBackendId": run_id, + "workflowJobRunBackendId": job_id, + "name": name, + "size": str(ctx.std.fs.metadata(path).size), + }) + if not ok: + return {"success": False, "artifact_ref": None, "errors": ["FinalizeArtifact failed"]} + + return {"success": True, "artifact_ref": name, "errors": []} + + +def _delete_github(ctx, name): + """Delete an artifact from GitHub Actions using the Twirp API.""" + token = ctx.std.env.var("ACTIONS_RUNTIME_TOKEN") + if not token: + return {"success": False} + + run_id, job_id = _decode_backend_ids(ctx, token) + if not run_id or not job_id: + return {"success": False} + + ok, _ = _github_twirp(ctx, "DeleteArtifact", { + "workflowRunBackendId": run_id, + "workflowJobRunBackendId": job_id, + "name": name, + }) + return {"success": ok} + + +# ============================================================================= +# Buildkite +# ============================================================================= + +def _upload_file_buildkite(ctx, path, name): + """Upload a single file to Buildkite using the buildkite-agent CLI. + + Copies the file to a temp location with the desired name so the artifact + appears as just `name` rather than the full absolute path. + """ + dir = path.rsplit("/", 1)[0] + basename = path.rsplit("/", 1)[1] + needs_copy = basename != name + + if needs_copy: + ctx.std.process.command("cp").args([path, dir + "/" + name]).spawn().wait() + + child = ctx.std.process.command("buildkite-agent") \ + .args(["artifact", "upload", name]) \ + .current_dir(dir) \ + .stdout("inherit") \ + .stderr("inherit") \ + .spawn() + + status = child.wait() + + if needs_copy: + copied = dir + "/" + name + if ctx.std.fs.exists(copied): + ctx.std.fs.remove_file(copied) + + if status.code != 0: + return {"success": False, "artifact_ref": None, "errors": ["exit " + str(status.code)]} + return {"success": True, "artifact_ref": name, "errors": []} + + +def _delete_buildkite(ctx, name): + """Buildkite has no artifact deletion API — no-op.""" + return {"success": True} + + +# ============================================================================= +# GitLab CI +# ============================================================================= + +def _upload_file_gitlab(ctx, path, name): + """Upload a single file to GitLab using the Generic Packages API. + + PUT {CI_API_V4_URL}/projects/{CI_PROJECT_ID}/packages/generic/{name}/{version}/{filename} + """ + token = ctx.std.env.var("CI_JOB_TOKEN") + api_url = ctx.std.env.var("CI_API_V4_URL") + project_id = ctx.std.env.var("CI_PROJECT_ID") + pipeline_id = ctx.std.env.var("CI_PIPELINE_ID") or "0" + + if not token or not api_url or not project_id: + return {"success": False, "artifact_ref": None, "errors": ["missing GitLab CI env vars"]} + + file_name = path.split("/")[-1] + url = api_url + "/projects/" + project_id + "/packages/generic/" + name + "/" + pipeline_id + "/" + file_name + + resp = ctx.http().put( + url, + headers = {"JOB-TOKEN": token}, + data = ctx.std.fs.open(path), + ).block() + + if resp.status < 200 or resp.status >= 300: + return {"success": False, "artifact_ref": None, "errors": ["HTTP " + str(resp.status)]} + return {"success": True, "artifact_ref": name, "errors": []} diff --git a/crates/aspect-cli/src/builtins/aspect/lib/build_metadata.axl b/crates/aspect-cli/src/builtins/aspect/lib/build_metadata.axl new file mode 100644 index 000000000..6f921fec1 --- /dev/null +++ b/crates/aspect-cli/src/builtins/aspect/lib/build_metadata.axl @@ -0,0 +1,271 @@ +""" +Build Metadata Flag Generation + +Generates --build_metadata=KEY=VALUE flags for Bazel invocations. +These feed the Build Event Stream (BES) and are queryable in Bessie. +""" + + +def _git_show(process): + """Run git show HEAD and return a dict of commit fields.""" + out = process.command("git").arg("show").arg("HEAD") \ + .arg("--format=%H\n%aN\n%aE\n%s\n%aI\n%D").arg("--no-patch") \ + .stdout("piped").stderr("piped").spawn().wait_with_output() + + if not out.stdout: + return {} + + lines = out.stdout.strip().split("\n") + result = {} + + if len(lines) >= 1 and lines[0].strip(): + result["COMMIT_SHA"] = lines[0].strip() + if len(lines) >= 2 and lines[1].strip(): + result["COMMIT_AUTHOR"] = lines[1].strip() + if len(lines) >= 3 and lines[2].strip(): + result["COMMIT_AUTHOR_EMAIL"] = lines[2].strip() + if len(lines) >= 4 and lines[3].strip(): + msg = lines[3].strip() + if len(msg) > 80: + msg = msg[:80] + result["COMMIT_MESSAGE"] = msg + if len(lines) >= 5 and lines[4].strip(): + result["COMMIT_TIMESTAMP"] = lines[4].strip() + if len(lines) >= 6 and lines[5].strip(): + # Parse refs for tag: + for ref in lines[5].split(","): + ref = ref.strip() + if ref.startswith("tag: "): + result["TAG"] = ref[5:] + break + + return result + + +def _parse_git_remote_url(url): + """Parse git remote URL to extract REPO_OWNER, REPO, and VCS keys.""" + if not url: + return {} + + result = {} + + # Detect VCS host + if "github.com" in url: + result["VCS"] = "github" + elif "gitlab.com" in url: + result["VCS"] = "gitlab" + elif "bitbucket.org" in url: + result["VCS"] = "bitbucket" + + # Parse owner/repo from URL + path = None + if url.startswith("git@"): + # SSH: git@github.com:owner/repo.git + colon_idx = url.find(":") + if colon_idx >= 0: + path = url[colon_idx + 1:] + else: + scheme_end = url.find("://") + if scheme_end >= 0: + # HTTPS (possibly with credentials): https://[token@]host/owner/repo.git + rest = url[scheme_end + 3:] + slash_idx = rest.find("/") + if slash_idx >= 0: + path = rest[slash_idx + 1:] + + if path: + if path.endswith(".git"): + path = path[:-4] + parts = path.split("/") + if len(parts) >= 2: + result["REPO_OWNER"] = parts[0] + result["REPO"] = parts[1] + elif len(parts) == 1 and parts[0]: + result["REPO"] = parts[0] + + return result + + +def _collect_github(env, meta): + """Fill meta dict from GitHub Actions environment variables.""" + meta["CI"] = "github" + meta["VCS"] = "github" + + sha = env.var("GITHUB_SHA") + if sha: + meta["COMMIT_SHA"] = sha + + actor = env.var("GITHUB_ACTOR") + if actor: + meta["USER"] = actor + + # Branch detection: GITHUB_HEAD_REF is set on PR events, otherwise parse GITHUB_REF + head_ref = env.var("GITHUB_HEAD_REF") + if head_ref: + meta["BRANCH"] = head_ref + else: + ref = env.var("GITHUB_REF") + if ref and ref.startswith("refs/heads/"): + meta["BRANCH"] = ref[11:] + elif ref and ref.startswith("refs/tags/"): + meta["TAG"] = ref[10:] + + repo = env.var("GITHUB_REPOSITORY") + if repo: + if "/" in repo: + parts = repo.split("/") + meta["REPO_OWNER"] = parts[0] + meta["REPO"] = parts[-1] + else: + meta["REPO"] = repo + + +def _collect_buildkite(env, meta): + """Fill meta dict from Buildkite environment variables.""" + meta["CI"] = "buildkite" + + commit = env.var("BUILDKITE_COMMIT") + if commit: + meta["COMMIT_SHA"] = commit + + creator = env.var("BUILDKITE_BUILD_CREATOR") + if creator: + meta["USER"] = creator + + branch = env.var("BUILDKITE_BRANCH") + if branch: + meta["BRANCH"] = branch + + tag = env.var("BUILDKITE_TAG") + if tag: + meta["TAG"] = tag + + repo_url = env.var("BUILDKITE_REPO") + if repo_url: + parsed = _parse_git_remote_url(repo_url) + for k, v in parsed.items(): + meta[k] = v + + +def _collect_circleci(env, meta): + """Fill meta dict from CircleCI environment variables.""" + meta["CI"] = "circleci" + + sha = env.var("CIRCLE_SHA1") + if sha: + meta["COMMIT_SHA"] = sha + + username = env.var("CIRCLE_USERNAME") + if username: + meta["USER"] = username + + branch = env.var("CIRCLE_BRANCH") + if branch: + meta["BRANCH"] = branch + + tag = env.var("CIRCLE_TAG") + if tag: + meta["TAG"] = tag + + owner = env.var("CIRCLE_PROJECT_USERNAME") + if owner: + meta["REPO_OWNER"] = owner + + repo = env.var("CIRCLE_PROJECT_REPONAME") + if repo: + meta["REPO"] = repo + + repo_url = env.var("CIRCLE_REPOSITORY_URL") + if repo_url: + parsed = _parse_git_remote_url(repo_url) + for k, v in parsed.items(): + if k not in meta: + meta[k] = v + + +def _collect_gitlab(env, meta): + """Fill meta dict from GitLab CI environment variables.""" + meta["CI"] = "gitlab" + + sha = env.var("CI_COMMIT_SHA") + if sha: + meta["COMMIT_SHA"] = sha + + user = env.var("GITLAB_USER_NAME") + if user: + meta["USER"] = user + + branch = env.var("CI_COMMIT_BRANCH") + if branch: + meta["BRANCH"] = branch + + tag = env.var("CI_COMMIT_TAG") + if tag: + meta["TAG"] = tag + + namespace = env.var("CI_PROJECT_NAMESPACE") + if namespace: + meta["REPO_OWNER"] = namespace + + project = env.var("CI_PROJECT_NAME") + if project: + meta["REPO"] = project + + repo_url = env.var("CI_REPOSITORY_URL") + if repo_url: + parsed = _parse_git_remote_url(repo_url) + for k, v in parsed.items(): + if k not in meta: + meta[k] = v + + +def get_build_metadata_flags(env, process, workspace, task_id = None, task_name = None): + """ + Generate --build_metadata=KEY=VALUE flags for Bazel invocations. + + Args: + env: Environment interface (ctx.std.env) + process: Process interface (ctx.std.process) + workspace: Workspace name (e.g. ".") + task_id: Optional task ID string + task_name: Optional task name string + + Returns: + list of --build_metadata=KEY=VALUE strings (empty values are skipped) + """ + meta = {} + + # Step 1: Collect CI host metadata (provides CI-specific fields as fallback) + if env.var("GITHUB_ACTIONS"): + _collect_github(env, meta) + elif env.var("BUILDKITE"): + _collect_buildkite(env, meta) + elif env.var("CIRCLECI"): + _collect_circleci(env, meta) + elif env.var("GITLAB_CI"): + _collect_gitlab(env, meta) + + # Step 2: Run git show and overwrite commit fields (git is the primary source) + git_data = _git_show(process) + for k, v in git_data.items(): + meta[k] = v + + # Step 3: If no USER was set by CI, fall back to git author name + if not meta.get("USER") and meta.get("COMMIT_AUTHOR"): + meta["USER"] = meta["COMMIT_AUTHOR"] + + # Step 4: Set workspace and task context if provided + if workspace: + meta["WORKSPACE"] = workspace + if task_id: + meta["ASPECT_TASK_ID"] = task_id + if task_name: + meta["ASPECT_TASK_NAME"] = task_name + + # Step 5: Build --build_metadata=KEY=VALUE strings, skipping empty values + flags = [] + for key, value in meta.items(): + if value: + flags.append("--build_metadata=" + key + "=" + value) + + return flags diff --git a/crates/aspect-cli/src/builtins/aspect/lib/deliveryd.axl b/crates/aspect-cli/src/builtins/aspect/lib/deliveryd.axl new file mode 100644 index 000000000..6671eee19 --- /dev/null +++ b/crates/aspect-cli/src/builtins/aspect/lib/deliveryd.axl @@ -0,0 +1,123 @@ +""" +Client library for communicating with deliveryd. + +deliveryd is a Unix socket HTTP server that manages delivery state, +tracking which artifacts have been delivered and preventing re-delivery. +""" + +def _parse_endpoint(endpoint): + """ + Parse a deliveryd endpoint string. + + If the endpoint starts with "unix://", returns (base_url, socket_path). + Otherwise, returns (endpoint, None) to use as a direct HTTP endpoint. + """ + if endpoint.startswith("unix://"): + return ("http://localhost", endpoint[len("unix://"):]) + return (endpoint, None) + +def _post(http, endpoint, path, data): + """Make a POST request to deliveryd, handling unix:// endpoints.""" + base_url, socket_path = _parse_endpoint(endpoint) + encoded = json.encode(data) + if socket_path: + return http.post(url = base_url + path, headers={"Content-Type": "application/json"}, data=encoded, unix_socket=socket_path) + return http.post(url = base_url + path, headers={"Content-Type": "application/json"}, data=encoded) + +def _get(http, endpoint, path): + """Make a GET request to deliveryd, handling unix:// endpoints.""" + base_url, socket_path = _parse_endpoint(endpoint) + if socket_path: + return http.get(url=base_url + path, unix_socket=socket_path) + return http.get(url=base_url + path) + +def health(ctx, endpoint): + """ + Check if deliveryd is healthy by calling the /health endpoint. + + Returns: + True if healthy, False otherwise + """ + http = ctx.http() + result = _get(http, endpoint, "/health").map_err(lambda e: str(e)).block() + if type(result) == "string": + return False + return result.status >= 200 and result.status < 300 + +def query(ctx, endpoint, ci_host, commit_sha, workspace): + """ + Query deliveryd for delivery state of all targets in a commit. + Returns a dict mapping label -> {output_sha, delivered, delivered_by}. + """ + http = ctx.http() + response = _post(http, endpoint, "/query", { + "ci_host": ci_host, + "commit_sha": commit_sha, + "workspace": workspace, + }).block() + + + if response.status < 200 or response.status >= 300: + fail("deliveryd query failed: " + response.body) + + data = json.decode(response.body) + + targets = data.get("targets", []) or [] + # Build lookup dict by label + result = {} + for target in targets: + result[target["label"]] = { + "output_sha": target["output_sha"], + "delivered": target["delivered"], + "delivered_by": target.get("delivered_by"), + } + return result + +def deliver(ctx, endpoint, ci_host, output_sha, workspace, signature): + """ + Mark a target as delivered by setting its delivery signature. + """ + http = ctx.http() + response = _post(http, endpoint, "/deliver", { + "ci_host": ci_host, + "output_sha": output_sha, + "workspace": workspace, + "signature": signature, + }).block() + + if response.status < 200 or response.status >= 300: + fail("deliveryd deliver failed: " + response.body) + +def record(ctx, endpoint, ci_host, commit_sha, workspace, label, output_sha): + """ + Record a target's output SHA with deliveryd. + This must be called before the target can be queried or delivered. + """ + http = ctx.http() + response = _post(http, endpoint, "/record", { + "ci_host": ci_host, + "commit_sha": commit_sha, + "workspace": workspace, + "label": label, + "output_sha": output_sha, + }).map_err(lambda e: e).block() + + if type(response) == "string": + fail("deliveryd record failed: " + response) + + if response.status < 200 or response.status >= 300: + fail("deliveryd record failed: " + response.body) + +def delete_artifact(ctx, endpoint, ci_host, output_sha, workspace): + """ + Delete artifact metadata (used for cleanup on failed deliveries). + """ + http = ctx.http() + response = _post(http, endpoint, "/artifact/delete", { + "ci_host": ci_host, + "output_sha": output_sha, + "workspace": workspace, + }).block() + + if response.status < 200 or response.status >= 300: + fail("deliveryd artifact delete failed: " + response.body) diff --git a/crates/aspect-cli/src/builtins/aspect/lib/environment.axl b/crates/aspect-cli/src/builtins/aspect/lib/environment.axl new file mode 100644 index 000000000..18613fa60 --- /dev/null +++ b/crates/aspect-cli/src/builtins/aspect/lib/environment.axl @@ -0,0 +1,105 @@ +""" +Configure Workflows Environment Library + +Prints debug/diagnostic information about the runner: the Workflows product +version, whether warming is enabled, and cloud platform metadata (region, +instance type, spot/preemptible status, bootstrap log URLs, etc.). + +This is purely informational output for debugging — no side effects. +""" + +load("./platform.axl", "DEFAULT_PLATFORM_DIR") + +# AWS CloudWatch log group +AWS_LOG_GROUP = "/aw/runner/cloud-init/output" + + +def _url_encode(s): + """Percent-encode a string for use in URLs.""" + result = "" + for c in s.elems(): + if c == " ": + result += "%20" + elif c == "\"": + result += "%22" + elif c == "\n": + result += "%0A" + elif c == "=": + result += "%3D" + elif c == "/": + result += "%2F" + else: + result += c + return result + + +def _read_file(fs, path): + """Read a text file and strip whitespace, or return empty string if missing.""" + if fs.exists(path): + return fs.read_to_string(path).strip() + return "" + + +def _read_platform_metadata(fs): + """Read all platform metadata from files.""" + return { + "region": _read_file(fs, DEFAULT_PLATFORM_DIR + "/region"), + "az": _read_file(fs, DEFAULT_PLATFORM_DIR + "/az"), + "instance_id": _read_file(fs, DEFAULT_PLATFORM_DIR + "/instance_id"), + "instance_name": _read_file(fs, DEFAULT_PLATFORM_DIR + "/instance_name"), + "instance_type": _read_file(fs, DEFAULT_PLATFORM_DIR + "/instance_type"), + "account_id": _read_file(fs, DEFAULT_PLATFORM_DIR + "/account"), + "preemptible": fs.exists(DEFAULT_PLATFORM_DIR + "/preemptible"), + } + + +def _print_workflows_info(fs): + """Print Workflows version and warming status.""" + version = _read_file(fs, DEFAULT_PLATFORM_DIR + "/product_version") + warming_enabled = fs.exists(DEFAULT_PLATFORM_DIR + "/warming_enabled") + + print("Workflows Information") + print("\tVersion: " + version) + print("\tWarming enabled: " + ("true" if warming_enabled else "false")) + + +def _print_aws_info(meta): + """Print AWS-specific runner information.""" + print("AWS Information") + print("\tRegion: " + meta["region"]) + print("\tAvailability Zone: " + meta["az"]) + print("\tAccount ID: " + meta["account_id"]) + print("\tInstance ID: " + meta["instance_id"]) + print("\tInstance Name: " + meta["instance_name"]) + print("\tInstance Type: " + meta["instance_type"]) + print("\tSpot Instance: " + ("yes" if meta["preemptible"] else "no")) + print("\tCLI: 'aws logs tail \"/aw/runner/cloud-init/output\" --log-stream-names \"" + meta["instance_id"] + "\" --since=30d'") + + +def _print_gcp_info(meta): + """Print GCP-specific runner information.""" + print("GCP Information") + print("\tRegion: " + meta["region"]) + print("\tAvailability Zone: " + meta["az"]) + print("\tProject ID: " + meta["account_id"]) + print("\tInstance ID: " + meta["instance_id"]) + print("\tInstance Name: " + meta["instance_name"]) + print("\tInstance Type: " + meta["instance_type"]) + print("\tPreemptible: " + ("yes" if meta["preemptible"] else "no")) + print("\tCLI: 'gcloud logging read --project " + meta["account_id"] + " \"resource.type=gce_instance resource.labels.instance_id=" + meta["instance_id"] + " log_name=projects/" + meta["account_id"] + "/logs/google_metadata_script_runner\" --format=\"value(jsonPayload.message)\" --freshness=30d | tac'") + + +def configure_workflows_env(fs): + """ + Print debug/diagnostic information about the runner environment. + + Args: + fs: Filesystem interface (ctx.std.fs) + """ + _print_workflows_info(fs) + meta = _read_platform_metadata(fs) + + if fs.exists(DEFAULT_PLATFORM_DIR + "/aws"): + _print_aws_info(meta) + elif fs.exists(DEFAULT_PLATFORM_DIR + "/gcp"): + _print_gcp_info(meta) diff --git a/crates/aspect-cli/src/builtins/aspect/lib/github.axl b/crates/aspect-cli/src/builtins/aspect/lib/github.axl new file mode 100644 index 000000000..2f86c050e --- /dev/null +++ b/crates/aspect-cli/src/builtins/aspect/lib/github.axl @@ -0,0 +1,727 @@ +""" +GitHub Check Runs Client Library + +Client for creating and updating GitHub Check Runs via the GitHub API. +""" + +DEFAULT_GITHUB_API = "https://api.github.com" + + +def _normalize_output(output): + """ + Normalize output parameter to the required dict format. + + If output is a string, wraps it in a dict with title and summary. + If output is already a dict, returns it as-is. + """ + if output == None: + return None + if type(output) == "string": + return { + "title": "Check Run Output", + "summary": output, + } + return output + + +def _do_request(ctx, method, url, token, payload = None): + """ + Make an HTTP request to GitHub API. + + Args: + ctx: Context with http() + method: HTTP method ("POST" or "PATCH") + url: Full URL to request + token: GitHub token (PAT or Actions token) + payload: Optional dict to send as JSON body + + Returns: + (success: bool, status: int, body: dict or str) + """ + http = ctx.http() + + headers = { + "Authorization": "Bearer " + token, + "Accept": "application/vnd.github+json", + "Content-Type": "application/json", + "X-GitHub-Api-Version": "2022-11-28", + } + + if method == "POST": + response = http.post( + url = url, + headers = headers, + data = json.encode(payload) if payload else None, + ).block() + elif method == "PATCH": + response = http.patch( + url = url, + headers = headers, + data = json.encode(payload) if payload else None, + ).block() + else: + return (False, 0, "unsupported method: " + method) + + success = response.status >= 200 and response.status < 300 + + # Try to parse response as JSON + body = response.body + if body: + body = json.decode(body) + + return (success, response.status, body) + + +def _do_get_request(ctx, url, token): + """ + Make a GET request to GitHub API. + + Args: + ctx: Context with http() + url: Full URL to request + token: GitHub token (PAT or Actions token) + + Returns: + (success: bool, status: int, body: dict or str) + """ + http = ctx.http() + + headers = { + "Authorization": "Bearer " + token, + "Accept": "application/vnd.github+json", + "X-GitHub-Api-Version": "2022-11-28", + } + + response = http.get( + url = url, + headers = headers, + ).block() + + success = response.status >= 200 and response.status < 300 + + body = response.body + if body: + body = json.decode(body) + + return (success, response.status, body) + + +def _do_delete_request(ctx, url, token): + """ + Make a DELETE request to GitHub API. + + Args: + ctx: Context with http() + url: Full URL to request + token: GitHub token (PAT or Actions token) + + Returns: + (success: bool, status: int, body: str or None) + """ + http = ctx.http() + + headers = { + "Authorization": "Bearer " + token, + "Accept": "application/vnd.github+json", + "X-GitHub-Api-Version": "2022-11-28", + } + + response = http.delete( + url = url, + headers = headers, + ).block() + + success = response.status >= 200 and response.status < 300 + + return (success, response.status, response.body) + + +def get_pull_request(ctx, token, owner, repo, pull_number, api_base = DEFAULT_GITHUB_API): + """ + Get a pull request by number. + + Args: + ctx: Context with http() + token: GitHub token + owner: Repository owner + repo: Repository name + pull_number: The PR number + api_base: GitHub API base URL + + Returns: + dict with "success" (bool), "pull_request" (dict) on success + dict with "success" (False), "error" (str), "status" (int) on failure + """ + url = api_base + "/repos/" + owner + "/" + repo + "/pulls/" + str(pull_number) + + success, status_code, body = _do_get_request(ctx, url, token) + + if success: + return { + "success": True, + "pull_request": body, + } + + error_msg = "request failed: " + str(status_code) + if body and type(body) == "dict" and body.get("message"): + error_msg = error_msg + " - " + body["message"] + + return {"success": False, "error": error_msg, "status": status_code} + + +def list_review_comments(ctx, token, owner, repo, pull_number, api_base = DEFAULT_GITHUB_API): + """ + List all review comments on a pull request. + + Handles pagination to retrieve all comments. + + Args: + ctx: Context with http() + token: GitHub token + owner: Repository owner + repo: Repository name + pull_number: The PR number + api_base: GitHub API base URL + + Returns: + dict with "success" (bool), "comments" (list) on success + dict with "success" (False), "error" (str), "status" (int) on failure + """ + all_comments = [] + + for page in range(1, 101): # max 100 pages (10,000 comments) + url = api_base + "/repos/" + owner + "/" + repo + "/pulls/" + str(pull_number) + "/comments?per_page=100&page=" + str(page) + + success, status_code, body = _do_get_request(ctx, url, token) + + if not success: + error_msg = "request failed: " + str(status_code) + if body and type(body) == "dict" and body.get("message"): + error_msg = error_msg + " - " + body["message"] + return {"success": False, "error": error_msg, "status": status_code} + + if not body or len(body) == 0: + break + + all_comments.extend(body) + + if len(body) < 100: + break + + return { + "success": True, + "comments": all_comments, + } + + +def delete_review_comment(ctx, token, owner, repo, comment_id, api_base = DEFAULT_GITHUB_API): + """ + Delete a review comment on a pull request. + + Args: + ctx: Context with http() + token: GitHub token + owner: Repository owner + repo: Repository name + comment_id: The comment ID to delete + api_base: GitHub API base URL + + Returns: + dict with "success" (bool) on success + dict with "success" (False), "error" (str), "status" (int) on failure + """ + url = api_base + "/repos/" + owner + "/" + repo + "/pulls/comments/" + str(comment_id) + + success, status_code, body = _do_delete_request(ctx, url, token) + + if success: + return {"success": True} + + error_msg = "request failed: " + str(status_code) + return {"success": False, "error": error_msg, "status": status_code} + + +def list_pull_request_files(ctx, token, owner, repo, pull_number, api_base = DEFAULT_GITHUB_API): + """ + List files changed in a pull request. + + Handles pagination to retrieve all files. + + Args: + ctx: Context with http() + token: GitHub token + owner: Repository owner + repo: Repository name + pull_number: The PR number + api_base: GitHub API base URL + + Returns: + dict with "success" (bool), "files" (list) on success + dict with "success" (False), "error" (str), "status" (int) on failure + """ + all_files = [] + + for page in range(1, 101): # max 100 pages (10,000 files) + url = api_base + "/repos/" + owner + "/" + repo + "/pulls/" + str(pull_number) + "/files?per_page=100&page=" + str(page) + + success, status_code, body = _do_get_request(ctx, url, token) + + if not success: + error_msg = "request failed: " + str(status_code) + if body and type(body) == "dict" and body.get("message"): + error_msg = error_msg + " - " + body["message"] + return {"success": False, "error": error_msg, "status": status_code} + + if not body or len(body) == 0: + break + + all_files.extend(body) + + if len(body) < 100: + break + + return { + "success": True, + "files": all_files, + } + + +def create_check_run(ctx, token, owner, repo, name, head_sha, status = None, output = None, details_url = None, external_id = None, started_at = None, api_base = DEFAULT_GITHUB_API): + """ + Create a new check run on a commit. + + Args: + ctx: Context with http() + token: GitHub token (PAT or GITHUB_TOKEN from Actions) + owner: Repository owner + repo: Repository name + name: Name of the check run + head_sha: The SHA of the commit to create the check on + status: Optional status ("queued", "in_progress", "completed") + output: Optional dict with "title", "summary", and optionally "text", "annotations" + details_url: Optional URL for more details + external_id: Optional external identifier + started_at: Optional ISO 8601 timestamp + api_base: GitHub API base URL (default: https://api.github.com) + + Returns: + dict with "success" (bool), "check_run_id" (int), "html_url" (str) on success + dict with "success" (False), "error" (str), "status" (int) on failure + """ + url = api_base + "/repos/" + owner + "/" + repo + "/check-runs" + + payload = { + "name": name, + "head_sha": head_sha, + } + + if status: + payload["status"] = status + if output: + payload["output"] = output + if details_url: + payload["details_url"] = details_url + if external_id: + payload["external_id"] = external_id + if started_at: + payload["started_at"] = started_at + + success, status_code, body = _do_request(ctx, "POST", url, token, payload) + + if success: + return { + "success": True, + "check_run_id": body.get("id"), + "html_url": body.get("html_url"), + "response": body, + } + + error_msg = "request failed: " + str(status_code) + if body and type(body) == "dict" and body.get("message"): + error_msg = error_msg + " - " + body["message"] + + return {"success": False, "error": error_msg, "status": status_code} + + +def update_check_run(ctx, token, owner, repo, check_run_id, status = None, conclusion = None, output = None, details_url = None, completed_at = None, api_base = DEFAULT_GITHUB_API): + """ + Update an existing check run. + + Args: + ctx: Context with http() + token: GitHub token + owner: Repository owner + repo: Repository name + check_run_id: The ID of the check run to update + status: Optional new status ("queued", "in_progress", "completed") + conclusion: Required if status is "completed". One of: + "action_required", "cancelled", "failure", "neutral", + "success", "skipped", "stale", "timed_out" + output: Optional dict with "title", "summary", and optionally "text", "annotations" + details_url: Optional URL for more details + completed_at: Optional ISO 8601 timestamp (required if conclusion is set) + api_base: GitHub API base URL + + Returns: + dict with "success" (bool), "check_run_id" (int) on success + dict with "success" (False), "error" (str), "status" (int) on failure + """ + url = api_base + "/repos/" + owner + "/" + repo + "/check-runs/" + str(check_run_id) + + payload = {} + + if status: + payload["status"] = status + if conclusion: + payload["conclusion"] = conclusion + if output: + payload["output"] = output + if details_url: + payload["details_url"] = details_url + if completed_at: + payload["completed_at"] = completed_at + + success, status_code, body = _do_request(ctx, "PATCH", url, token, payload) + + if success: + return { + "success": True, + "check_run_id": body.get("id"), + "html_url": body.get("html_url"), + "response": body, + } + + error_msg = "request failed: " + str(status_code) + if body and type(body) == "dict" and body.get("message"): + error_msg = error_msg + " - " + body["message"] + + return {"success": False, "error": error_msg, "status": status_code} + + +def complete_check_run(ctx, token, owner, repo, check_run_id, conclusion, output = None, api_base = DEFAULT_GITHUB_API): + """ + Complete a check run with a conclusion. + + Convenience wrapper around update_check_run for completing checks. + + Args: + ctx: Context with http() + token: GitHub token + owner: Repository owner + repo: Repository name + check_run_id: The ID of the check run to complete + conclusion: One of: "action_required", "cancelled", "failure", + "neutral", "success", "skipped", "stale", "timed_out" + output: Optional dict with "title", "summary" + api_base: GitHub API base URL + + Returns: + dict with "success" (bool), "check_run_id" (int) on success + dict with "success" (False), "error" (str), "status" (int) on failure + """ + return update_check_run( + ctx, + token, + owner, + repo, + check_run_id, + status = "completed", + conclusion = conclusion, + output = output, + api_base = api_base, + ) + + +def build_output(title, summary, text = None, annotations = None): + """ + Helper to build an output object for check runs. + + Args: + title: Title of the check run output + summary: Summary (supports markdown) + text: Optional detailed text (supports markdown) + annotations: Optional list of annotation dicts + + Returns: + dict suitable for the "output" parameter + """ + output = { + "title": title, + "summary": summary, + } + if text: + output["text"] = text + if annotations: + output["annotations"] = annotations + return output + + +def build_annotation(path, start_line, end_line, message, annotation_level = "warning", start_column = None, end_column = None, title = None, raw_details = None): + """ + Helper to build an annotation for check run output. + + Args: + path: Path of the file to annotate (relative to repo root) + start_line: Start line of the annotation + end_line: End line of the annotation + message: Short description of the feedback + annotation_level: "notice", "warning", or "failure" (default: "warning") + start_column: Optional start column + end_column: Optional end column + title: Optional title for the annotation + raw_details: Optional raw details string + + Returns: + dict suitable for the "annotations" list + """ + annotation = { + "path": path, + "start_line": start_line, + "end_line": end_line, + "annotation_level": annotation_level, + "message": message, + } + if start_column: + annotation["start_column"] = start_column + if end_column: + annotation["end_column"] = end_column + if title: + annotation["title"] = title + if raw_details: + annotation["raw_details"] = raw_details + return annotation + + +# ============================================================================= +# Pull Request Review Comments API +# ============================================================================= + +def create_review(ctx, token, owner, repo, pull_number, body = None, event = "COMMENT", comments = None, commit_id = None, api_base = DEFAULT_GITHUB_API): + """ + Create a pull request review with optional comments. + + This creates comments that appear directly on the PR diff page. + + Args: + ctx: Context with http() + token: GitHub token + owner: Repository owner + repo: Repository name + pull_number: The PR number + body: Optional review body text (shown at top of review) + event: Review action - "APPROVE", "REQUEST_CHANGES", or "COMMENT" (default) + comments: Optional list of review comment dicts (use build_review_comment) + commit_id: Optional commit SHA to review (defaults to PR head) + api_base: GitHub API base URL + + Returns: + dict with "success" (bool), "review_id" (int) on success + dict with "success" (False), "error" (str), "status" (int) on failure + """ + url = api_base + "/repos/" + owner + "/" + repo + "/pulls/" + str(pull_number) + "/reviews" + + payload = { + "event": event, + } + + if body: + payload["body"] = body + if comments: + payload["comments"] = comments + if commit_id: + payload["commit_id"] = commit_id + + success, status_code, response_body = _do_request(ctx, "POST", url, token, payload) + + if success: + return { + "success": True, + "review_id": response_body.get("id"), + "html_url": response_body.get("html_url"), + "response": response_body, + } + + error_msg = "request failed: " + str(status_code) + if response_body and type(response_body) == "dict" and response_body.get("message"): + error_msg = error_msg + " - " + response_body["message"] + + return {"success": False, "error": error_msg, "status": status_code} + + +def create_review_comment(ctx, token, owner, repo, pull_number, body, path, line = None, commit_id = None, side = "RIGHT", start_line = None, start_side = None, subject_type = None, api_base = DEFAULT_GITHUB_API): + """ + Create a single review comment on a PR diff. + + Args: + ctx: Context with http() + token: GitHub token + owner: Repository owner + repo: Repository name + pull_number: The PR number + body: The comment text (supports markdown) + path: File path relative to repo root + line: Line number in the diff to comment on (required unless using subject_type="file") + commit_id: Optional commit SHA (defaults to PR head) + side: "LEFT" (deletion) or "RIGHT" (addition, default) + start_line: For multi-line comments, the first line + start_side: Side for start_line ("LEFT" or "RIGHT") + subject_type: "line" (default) or "file" for file-level comments + api_base: GitHub API base URL + + Returns: + dict with "success" (bool), "comment_id" (int) on success + dict with "success" (False), "error" (str), "status" (int) on failure + """ + url = api_base + "/repos/" + owner + "/" + repo + "/pulls/" + str(pull_number) + "/comments" + + payload = { + "body": body, + "path": path, + } + + if subject_type: + payload["subject_type"] = subject_type + if line: + payload["line"] = line + payload["side"] = side + if commit_id: + payload["commit_id"] = commit_id + if start_line: + payload["start_line"] = start_line + if start_side: + payload["start_side"] = start_side + + success, status_code, response_body = _do_request(ctx, "POST", url, token, payload) + + if success: + return { + "success": True, + "comment_id": response_body.get("id"), + "html_url": response_body.get("html_url"), + "response": response_body, + } + + error_msg = "request failed: " + str(status_code) + if response_body and type(response_body) == "dict" and response_body.get("message"): + error_msg = error_msg + " - " + response_body["message"] + + return {"success": False, "error": error_msg, "status": status_code} + + +def build_review_comment(path, body, line = None, side = "RIGHT", start_line = None, start_side = None): + """ + Helper to build a review comment for use with create_review. + + Args: + path: File path relative to repo root + body: Comment text (supports markdown) + line: Line number in the diff (the ending line for multi-line) + side: "LEFT" (deletion) or "RIGHT" (addition, default) + start_line: For multi-line comments, the starting line + start_side: Side for start_line + + Returns: + dict suitable for the "comments" list in create_review + """ + comment = { + "path": path, + "body": body, + } + if line: + comment["line"] = line + comment["side"] = side + if start_line: + comment["start_line"] = start_line + if start_side: + comment["start_side"] = start_side + return comment + + +def build_suggestion(path, line, suggested_code, message = None, start_line = None): + """ + Helper to build a code suggestion comment that shows "Apply suggestion" button. + + Args: + path: File path relative to repo root + line: Line number to suggest replacement for (end line if multi-line) + suggested_code: The replacement code (what the line(s) should become) + message: Optional message to show above the suggestion + start_line: For multi-line suggestions, the starting line + + Returns: + dict suitable for the "comments" list in create_review + + Example: + # Single line suggestion + build_suggestion( + path = "src/main.py", + line = 42, + suggested_code = "const FOO = 'bar'", + message = "Use const instead of let for constants", + ) + + # Multi-line suggestion (replace lines 10-12 with new code) + build_suggestion( + path = "src/main.py", + start_line = 10, + line = 12, + suggested_code = "function foo() {\\n return bar\\n}", + ) + """ + body = "" + if message: + body = message + "\n\n" + body = body + "```suggestion\n" + suggested_code + "\n```" + + comment = { + "path": path, + "body": body, + "line": line, + "side": "RIGHT", + } + if start_line: + comment["start_line"] = start_line + comment["start_side"] = "RIGHT" + return comment + + +def create_suggestion(ctx, token, owner, repo, pull_number, path, line, suggested_code, message = None, start_line = None, commit_id = None, api_base = DEFAULT_GITHUB_API): + """ + Create a single code suggestion on a PR. + + This creates an "Apply suggestion" button on the PR diff. + + Args: + ctx: Context with http() + token: GitHub token + owner: Repository owner + repo: Repository name + pull_number: The PR number + path: File path relative to repo root + line: Line number to suggest replacement for + suggested_code: The replacement code + message: Optional message above the suggestion + start_line: For multi-line suggestions, the starting line + commit_id: Optional commit SHA (defaults to PR head) + api_base: GitHub API base URL + + Returns: + dict with "success" (bool), "comment_id" (int) on success + dict with "success" (False), "error" (str), "status" (int) on failure + """ + body = "" + if message: + body = message + "\n\n" + body = body + "```suggestion\n" + suggested_code + "\n```" + + return create_review_comment( + ctx, token, owner, repo, pull_number, + body = body, + path = path, + line = line, + commit_id = commit_id, + side = "RIGHT", + start_line = start_line, + start_side = "RIGHT" if start_line else None, + api_base = api_base, + ) diff --git a/crates/aspect-cli/src/builtins/aspect/lib/health_check.axl b/crates/aspect-cli/src/builtins/aspect/lib/health_check.axl new file mode 100644 index 000000000..632561777 --- /dev/null +++ b/crates/aspect-cli/src/builtins/aspect/lib/health_check.axl @@ -0,0 +1,176 @@ +""" +Agent Health Check Library + +Implements the runner agent health check step that runs at the start of every +job. It does two things: + +1. Waits for warming — polls until the runner's cache warming completes, then + reports the result. +2. Displays the last runner health check — reads the JSON file written by the + fleet service between jobs and prints its contents. +""" + +load("./platform.axl", "DEFAULT_WORKFLOWS_DIR", "DEFAULT_PLATFORM_DIR", "read_platform_config") + +# AWS CloudWatch log group +AWS_LOG_GROUP = "/aw/runner/cloud-init/output" + + +def _url_encode(s): + """Percent-encode a string for use in URLs.""" + result = "" + for c in s.elems(): + if c == " ": + result += "%20" + elif c == "\"": + result += "%22" + elif c == "\n": + result += "%0A" + elif c == "=": + result += "%3D" + elif c == "/": + result += "%2F" + else: + result += c + return result + + +def _bootstrap_log_url(fs, config): + """ + Construct the bootstrap log URL based on the cloud provider. + + Returns the URL string, or None if the provider cannot be determined. + """ + if fs.exists(DEFAULT_PLATFORM_DIR + "/aws"): + return _aws_bootstrap_log_url(config) + elif fs.exists(DEFAULT_PLATFORM_DIR + "/gcp"): + return _gcp_bootstrap_log_url(config) + return None + + +def _aws_bootstrap_log_url(config): + """Construct AWS CloudWatch Logs URL for bootstrap logs.""" + region = config["region"] + instance_id = config["instance_id"] + + # URL-encode the log group: replace "/" with "%252F" (double-encoded) + escaped_log_group = AWS_LOG_GROUP.replace("/", "%252F") + + return ( + "https://" + region + ".console.aws.amazon.com/cloudwatch/home" + + "?region=" + region + + "#logsV2:log-groups/log-group/" + escaped_log_group + + "/log-events/" + instance_id + ) + + +def _gcp_bootstrap_log_url(config): + """Construct GCP Cloud Logging URL for bootstrap logs.""" + instance_id = config["instance_id"] + project_id = config["account"] + + query = ( + 'resource.type="gce_instance"\n' + + 'resource.labels.instance_id=' + instance_id + '\n' + + 'log_name="projects/' + project_id + '/logs/google_metadata_script_runner"' + ) + + return ( + "https://console.cloud.google.com/logs/query" + + ";query=" + _url_encode(query) + + ";duration=P30D" + + "?referrer=search&project=" + project_id + ) + + +def _wait_for_warming(fs, config): + """ + Wait for warming to complete and report the result. + + If warming is not configured, returns immediately. + """ + # If warming is not configured, skip entirely + if not fs.exists(DEFAULT_PLATFORM_DIR + "/warming_enabled"): + return + + # If warming hasn't completed yet, block until it does. + # The bootstrap process runs concurrently. If it hits a critical error, + # it terminates the runner — so this loop will not hang indefinitely. + if not fs.exists(DEFAULT_PLATFORM_DIR + "/warming_complete"): + print("Waiting for warming to complete...") + for _ in forever(1000): + if fs.exists(DEFAULT_PLATFORM_DIR + "/warming_complete"): + break + + # Report warming result + cache_version_file = DEFAULT_WORKFLOWS_DIR + "/warming_current_cache" + job_history_file = DEFAULT_PLATFORM_DIR + "/runner_job_history" + + if fs.exists(cache_version_file): + version = fs.read_to_string(cache_version_file).strip() + print("Runner warmed from cache version: " + version + "\n") + elif fs.exists(job_history_file) and fs.metadata(job_history_file).size == 0: + # This is the first job on the runner and warming failed. + # On subsequent jobs, previous work has already populated caches, + # so no warning is needed. + print("Warming was unsuccessful. This first build on this runner will be cold.") + url = _bootstrap_log_url(fs, config) + if url: + print("See bootstrap logs for more details:") + print(url + "\n") + + +def _display_bazel_health(health): + """ + Display the bazel health check result. + """ + if health.outcome == "healthy": + print("\n\u2022 Bazel Health") + print("\t\u2713 bazel health check passed") + elif health.outcome == "unhealthy": + print("\n\u2022 Bazel Health") + print("\t\u2717 bazel health check failed: " + (health.message or "unknown error")) + elif health.outcome == "inconclusive": + print("\n\u2022 Bazel Health") + print("\t? bazel health check inconclusive: " + (health.message or "unknown")) + + +def _display_runner_health(fs): + """ + Display the last runner health check results. + + Reads the JSON file written by the fleet service between jobs. + """ + print("\x1b[1;4;34mRunner Health\x1b[0m") + + last_health_check_file = DEFAULT_PLATFORM_DIR + "/last_health_check" + if not fs.exists(last_health_check_file): + print("Health check has not yet been run on this runner") + return + + content = fs.read_to_string(last_health_check_file) + data = json.decode(content) + + # data.timestamp is a unix epoch integer + # data.output is a pre-formatted multi-line string + print("Last run on " + str(data["timestamp"]) + "\n") + print(data["output"]) + + +def agent_health_check(ctx, health): + """ + Post health check hook for BazelFragment. + + Runs the agent health check at the start of every job: + 1. Waits for warming to complete + 2. Displays the last runner health check + + Args: + ctx: TaskContext + health: bazel.HealthCheckResult from the built-in health check + """ + fs = ctx.std.fs + config = read_platform_config(fs) + _wait_for_warming(fs, config) + _display_runner_health(fs) + _display_bazel_health(health) diff --git a/crates/aspect-cli/src/builtins/aspect/lib/linting.axl b/crates/aspect-cli/src/builtins/aspect/lib/linting.axl new file mode 100644 index 000000000..155810725 --- /dev/null +++ b/crates/aspect-cli/src/builtins/aspect/lib/linting.axl @@ -0,0 +1,393 @@ +"""GitHub-aware lint strategy and changed files provider.""" + +load("./github.axl", "create_review", "create_review_comment", "get_pull_request", "list_review_comments", "delete_review_comment", "list_pull_request_files") +load("./sarif.axl", "parse_sarif", "sarif_to_review_comments") +load("@aspect_rules_lint//lint/lint.axl", "Strategy", "ChangedFilesProvider") + + +def _parse_github_diff_patch(patch): + """ + Parse a GitHub file patch string to extract added line numbers. + + Args: + patch: The patch string from GitHub's files API + + Returns: + List of 0-based line numbers of added lines + """ + if not patch: + return [] + + lines = [] + current_line = 0 + for line in patch.split("\n"): + if line.startswith("@@"): + # Parse hunk header: @@ -old,count +new,count @@ + parts = line.split(" ") + for part in parts: + if part.startswith("+") and part != "+++": + plus = part.removeprefix("+") + if "," in plus: + current_line = int(plus.split(",")[0]) + else: + current_line = int(plus) + break + elif line.startswith("+"): + # Added line (0-based) + lines.append(current_line - 1) + current_line += 1 + elif line.startswith("-"): + # Deleted line, don't increment current_line + pass + else: + # Context line + current_line += 1 + + return lines + + +def make_github_changed_files_provider(token, owner, repo): + """ + Create a ChangedFilesProvider that fetches changed files from the GitHub API. + + Args: + token: GitHub token + owner: Repository owner + repo: Repository name + + Returns: + ChangedFilesProvider instance + """ + def get_changed_files(ctx, state): + ref = ctx.std.env.var("GITHUB_REF") or "" + if not (ref.startswith("refs/pull/") and ref.endswith("/merge")): + return [] # not a PR build + + pr_number = int(ref.removeprefix("refs/pull/").removesuffix("/merge")) + state["pr_number"] = pr_number + + # Fetch changed files from GitHub API + result = list_pull_request_files(ctx, token, owner, repo, pr_number) + if not result["success"]: + return [] + + all_files = [] + for f in result["files"]: + if f.get("status", "") == "removed": + continue + filename = f.get("filename", "") + patch = f.get("patch", "") + added_lines = _parse_github_diff_patch(patch) + all_files.append({"file": filename, "lines": added_lines}) + + state["changed_lines"] = {f["file"]: f["lines"] for f in all_files} + return all_files + + return ChangedFilesProvider(get_changed_files = get_changed_files) + + +def _enrich_with_suggestions(ctx, comments): + """Read source files and append suggestion blocks for fixable comments.""" + file_cache = {} + for comment in comments: + fixes = comment.get("_fixes") + if not fixes: + continue + + path = comment["path"] + if path not in file_cache: + file_cache[path] = ctx.std.fs.read_to_string(path) + content = file_cache[path] + if not content: + continue + + lines = content.split("\n") + line_num = comment["line"] + if line_num < 1 or line_num > len(lines): + continue + + # Calculate byte offset of the target line start + line_byte_start = 0 + for i in range(line_num - 1): + line_byte_start += len(lines[i]) + 1 # +1 for \n + + original_line = lines[line_num - 1] + + # Convert absolute byte offsets to line-relative, filter to this line + applicable = [] + for f in fixes: + rel_start = f["byteOffset"] - line_byte_start + rel_end = rel_start + f["byteLength"] + if 0 <= rel_start and rel_end <= len(original_line): + applicable.append({ + "start": rel_start, + "end": rel_end, + "replacement": f["replacement"], + }) + + if not applicable: + continue + + # Apply in reverse position order to preserve earlier offsets + applicable = sorted(applicable, key = lambda f: f["start"], reverse = True) + fixed = original_line + for f in applicable: + fixed = fixed[:f["start"]] + f["replacement"] + fixed[f["end"]:] + + if fixed != original_line: + comment["body"] += "\n\n```suggestion\n" + fixed + "\n```" + + # Clean up internal metadata + comment.pop("_fixes", None) + + +# ============================================================================= +# GitHub Strategy Wrapper +# ============================================================================= + +def _build_comment_marker(tool, file, line, rule_id): + """Build a hidden HTML comment marker for identifying lint comments.""" + return "".format(tool, file, line, rule_id) + + +def _extract_comment_marker(body): + """Extract the aspect-lint marker from a comment body, or None.""" + prefix = "" + if not body: + return None + idx = body.find(prefix) + if idx < 0: + return None + end = body.find(suffix, idx) + if end < 0: + return None + return body[idx:end + len(suffix)] + + +def _check_staleness(ctx, state): + """ + Check if the current run is stale (PR HEAD has moved past our commit). + + Returns True if stale, False otherwise. + On API failure, assumes NOT stale. + """ + gh = state["github"] + pr_number = state.get("pr_number") + if not pr_number: + return False + + result = get_pull_request( + ctx, + token = gh["token"], + owner = gh["owner"], + repo = gh["repo"], + pull_number = pr_number, + ) + + if not result["success"]: + # API failure: assume not stale (better to post stale comments than lose results) + return False + + pr = result["pull_request"] + head_sha = pr.get("head", {}).get("sha", "") + return head_sha != gh["head_sha"] + + +def _filter_by_diff(comments, changed_lines): + """Keep only comments that target lines within the PR diff.""" + if not changed_lines: + return list(comments) + return [ + c for c in comments + if (c.get("line", 0) - 1) in (changed_lines.get(c.get("path", "")) or []) + ] + + +def _get_existing_markers(ctx, gh, pr_number): + """Fetch all aspect-lint markers currently on the PR. Returns {marker: True}.""" + result = list_review_comments( + ctx, token = gh["token"], owner = gh["owner"], + repo = gh["repo"], pull_number = pr_number, + ) + if not result["success"]: + return {} + markers = {} + for c in result["comments"]: + marker = _extract_comment_marker(c.get("body", "")) + if marker: + markers[marker] = True + return markers + + +def _post_as_review(ctx, gh, pr_number, comments, existing_markers): + """Post comments as a single grouped review, skipping duplicates.""" + to_post = [ + c for c in comments + if _extract_comment_marker(c.get("body", "")) not in existing_markers + ] + if not to_post: + return + create_review( + ctx, token = gh["token"], owner = gh["owner"], + repo = gh["repo"], pull_number = pr_number, + body = "Lint findings", event = "COMMENT", + comments = to_post, commit_id = gh["head_sha"], + ) + + +def _post_individually(ctx, gh, pr_number, comments, existing_markers): + """Post comments one at a time, skipping duplicates.""" + for c in comments: + marker = _extract_comment_marker(c.get("body", "")) + if marker and marker in existing_markers: + continue + result = create_review_comment( + ctx, token = gh["token"], owner = gh["owner"], + repo = gh["repo"], pull_number = pr_number, + body = c["body"], path = c["path"], + line = c.get("line"), commit_id = gh["head_sha"], + side = c.get("side", "RIGHT"), + start_line = c.get("start_line"), + start_side = c.get("start_side"), + ) + if result["success"] and marker: + existing_markers[marker] = True + + +def _cleanup_comments(ctx, state): + """Delete stale comments and deduplicate.""" + gh = state["github"] + pr_number = state.get("pr_number") + if not pr_number: + return + + # Desired markers: diagnostics that are within the diff + changed_lines = state.get("changed_lines", {}) + desired = {} + for diag in state.get("diagnostics", []): + lines = changed_lines.get(diag["file"]) + if lines and (diag["line"] - 1) in lines: + marker = _build_comment_marker(diag["tool"], diag["file"], diag["line"], diag["rule_id"]) + desired[marker] = True + + # Fetch fresh state of comments on PR + result = list_review_comments( + ctx, token = gh["token"], owner = gh["owner"], + repo = gh["repo"], pull_number = pr_number, + ) + if not result["success"]: + return + + # Group by marker + by_marker = {} + for c in result["comments"]: + marker = _extract_comment_marker(c.get("body", "")) + if not marker: + continue + if marker not in by_marker: + by_marker[marker] = [] + by_marker[marker].append(c) + + # Delete stale (not desired) and duplicates (keep newest) + for marker, comments in by_marker.items(): + if marker not in desired: + for c in comments: + delete_review_comment( + ctx, token = gh["token"], owner = gh["owner"], + repo = gh["repo"], comment_id = c["id"], + ) + elif len(comments) > 1: + by_id = sorted(comments, key = lambda c: c["id"]) + for c in by_id[:-1]: + delete_review_comment( + ctx, token = gh["token"], owner = gh["owner"], + repo = gh["repo"], comment_id = c["id"], + ) + + +def make_github_strategy(base_strategy, token, owner, repo, mode = "grouped"): + """ + Create a GitHub-aware strategy that wraps a base strategy with GitHub reporting. + + Args: + base_strategy: The underlying Strategy to delegate to + token: GitHub token + owner: Repository owner + repo: Repository name + mode: "grouped" posts one review at the end, + "streaming" posts comments individually as linters finish + + Returns: + Strategy instance with GitHub integration + """ + def setup(ctx, state): + base_strategy.setup(ctx, state) + state["github"] = { + "token": token, + "owner": owner, + "repo": repo, + "head_sha": ctx.std.env.var("GITHUB_SHA") or "", + "pending_comments": [], + "stale": False, + } + + def process(ctx, state, filepath): + # Accumulate diagnostics and build review comments + diag_count_before = len(state.get("diagnostics", [])) + base_strategy.process(ctx, state, filepath) + + gh = state["github"] + if gh["stale"]: + return + + content = ctx.std.fs.read_to_string(filepath) + sarif = parse_sarif(content) + comments = sarif_to_review_comments(sarif) + _enrich_with_suggestions(ctx, comments) + + # Stamp each comment with a hidden marker for identity tracking + new_diagnostics = state.get("diagnostics", [])[diag_count_before:] + for i, comment in enumerate(comments): + if i < len(new_diagnostics): + diag = new_diagnostics[i] + marker = _build_comment_marker( + diag["tool"], diag["file"], diag["line"], diag["rule_id"]) + comment["body"] = marker + "\n" + comment["body"] + + gh["pending_comments"].extend(comments) + + # In streaming mode, post comments as they arrive + if mode == "streaming": + pr_number = state.get("pr_number") + if not pr_number: + return + if "existing_markers" not in gh: + gh["existing_markers"] = _get_existing_markers(ctx, gh, pr_number) + ready = _filter_by_diff(gh["pending_comments"], state.get("changed_lines", {})) + _post_individually(ctx, gh, pr_number, ready, gh["existing_markers"]) + gh["pending_comments"] = [] + + def finish(ctx, state): + gh = state["github"] + + if gh["stale"] or _check_staleness(ctx, state): + gh["stale"] = True + return base_strategy.finish(ctx, state) + + pr_number = state.get("pr_number") + if pr_number: + if mode == "grouped": + existing = _get_existing_markers(ctx, gh, pr_number) + ready = _filter_by_diff(gh["pending_comments"], state.get("changed_lines", {})) + _post_as_review(ctx, gh, pr_number, ready, existing) + _cleanup_comments(ctx, state) + + return base_strategy.finish(ctx, state) + + return Strategy( + needs_machine = base_strategy.needs_machine, + setup = setup, + process = process, + finish = finish, + ) diff --git a/crates/aspect-cli/src/builtins/aspect/lib/platform.axl b/crates/aspect-cli/src/builtins/aspect/lib/platform.axl new file mode 100644 index 000000000..9dd1a711e --- /dev/null +++ b/crates/aspect-cli/src/builtins/aspect/lib/platform.axl @@ -0,0 +1,234 @@ +""" +Platform Configuration and Bazelrc Generation Library + +Pure functions for reading platform/host configuration and generating bazelrc flags. +""" + +DEFAULT_WORKFLOWS_DIR = "/etc/aspect/workflows" +DEFAULT_STORAGE_PATH = "/mnt/ephemeral" +DEFAULT_PLATFORM_DIR = DEFAULT_WORKFLOWS_DIR + "/platform" +DEFAULT_BIN_DIR = DEFAULT_WORKFLOWS_DIR + "/bin" + +PLATFORM_CONFIG_KEYS = { + "remote_cache_endpoint": "remote_cache_endpoint", + "remote_cache_address": "remote_cache_address", + "storage_path": "storage_path", + "bessie_endpoint": "bessie_endpoint", + "build_result_ui_base_url": "build_result_ui_base_url", + "instance_id": "instance_id", + "instance_name": "instance_name", + "account": "account", + "region": "region", +} + + +def read_platform_config(fs, platform_dir = DEFAULT_PLATFORM_DIR): + """ + Read platform configuration from disk. + + Args: + fs: Filesystem interface (ctx.std.fs) + platform_dir: Path to platform config directory + + Returns: + dict with platform config keys + """ + config = {} + + for key, filename in PLATFORM_CONFIG_KEYS.items(): + path = platform_dir + "/" + filename + if fs.exists(path): + content = fs.read_to_string(path) + if content: + config[key] = content.strip() + + tokens_path = platform_dir + "/rosetta_api_tokens" + if fs.exists(tokens_path): + content = fs.read_to_string(tokens_path) + if content: + config["rosetta_api_tokens"] = json.decode(content) + + if "storage_path" not in config: + config["storage_path"] = DEFAULT_STORAGE_PATH + + return config + + +def read_warming_config(fs, platform_dir = DEFAULT_PLATFORM_DIR): + """ + Read warming-specific configuration from platform config files. + + Args: + fs: Filesystem interface (ctx.std.fs) + platform_dir: Path to platform config directory + + Returns: + dict with optional keys: warming_bucket, warming_additional_paths + """ + config = {} + + bucket_path = platform_dir + "/warming_bucket" + if fs.exists(bucket_path): + content = fs.read_to_string(bucket_path) + if content: + config["warming_bucket"] = content.strip() + + paths_path = platform_dir + "/warming_additional_paths" + if fs.exists(paths_path): + content = fs.read_to_string(paths_path) + if content: + config["warming_additional_paths"] = content.strip() + + return config + + +def read_host_config(env, io): + """ + Read host/CI configuration from environment. + + Args: + env: Environment interface (ctx.std.env) + io: IO interface (ctx.std.io) + + Returns: + dict with keys: supports_curses, scm_repo_name, ci_host + """ + config = { + "supports_curses": io.stdout.is_tty, + "scm_repo_name": None, + "ci_host": None, + } + + if env.var("BUILDKITE_REPO"): + config["ci_host"] = "buildkite" + config["scm_repo_name"] = _parse_git_url_name(env.var("BUILDKITE_REPO")) + config["supports_curses"] = True + elif env.var("GITHUB_REPOSITORY"): + config["ci_host"] = "github" + repo = env.var("GITHUB_REPOSITORY") + config["scm_repo_name"] = repo.split("/")[-1] if "/" in repo else repo + elif env.var("CIRCLE_PROJECT_REPONAME"): + config["ci_host"] = "circleci" + config["scm_repo_name"] = env.var("CIRCLE_PROJECT_REPONAME") + elif env.var("CI_PROJECT_NAME"): + config["ci_host"] = "gitlab" + config["scm_repo_name"] = env.var("CI_PROJECT_NAME") + + return config + + +def _parse_git_url_name(url): + if not url: + return None + name = url.rstrip("/") + if name.endswith(".git"): + name = name[:-4] + return name.split("/")[-1].split(":")[-1] + + +def parse_version(version_str): + parts = version_str.split(".") + major = int(parts[0]) if len(parts) > 0 else 0 + minor = int(parts[1]) if len(parts) > 1 else 0 + patch_str = parts[2].split("-")[0] if len(parts) > 2 else "0" + patch = int(patch_str) if patch_str else 0 + return (major, minor, patch) + + +def version_satisfies(version, constraint): + if not version or constraint == "*": + return True + + v = parse_version(version) + parts = constraint.split() + for i in range(0, len(parts), 2): + if i + 1 >= len(parts): + break + op = parts[i] + target = parse_version(parts[i + 1]) + + if op == "<" and not (v < target): + return False + elif op == "<=" and not (v <= target): + return False + elif op == ">" and not (v > target): + return False + elif op == ">=" and not (v >= target): + return False + elif op == "=" and v != target: + return False + + return True + + +def _sanitize_filename(name): + if not name: + return "" + result = "" + for c in name.elems(): + if c.isalnum() or c in "-_.": + result += c + else: + result += "_" + return result + + +def get_bazelrc_flags(platform_config, host_config, bazel_version = None, root_dir = None): + """ + Generate bazelrc flags from platform and host configuration. + + Args: + platform_config: dict from read_platform_config() + host_config: dict from read_host_config() + bazel_version: str like "7.0.0" or None + root_dir: absolute path to the workspace root directory + + Returns: + (startup_flags, build_flags): two lists of flag strings + """ + storage_path = platform_config.get("storage_path", DEFAULT_STORAGE_PATH) + repo_name = host_config.get("scm_repo_name") + subdir = _sanitize_filename(root_dir.rstrip("/").split("/")[-1]) if root_dir else "__main__" + + build_flags = [] + + build_flags.append("--remote_upload_local_results") + build_flags.append("--heap_dump_on_oom") + build_flags.append("--generate_json_trace_profile") + build_flags.append("--experimental_repository_cache_hardlinks") + build_flags.append("--remote_accept_cached") + + if version_satisfies(bazel_version, "< 7"): + build_flags.append("--incompatible_remote_results_ignore_disk") + + build_flags.append("--disk_cache=") + build_flags.append("--remote_timeout=3600") + build_flags.append("--remote_retries=360") + build_flags.append("--grpc_keepalive_timeout=30s") + + if version_satisfies(bazel_version, "< 8"): + build_flags.append("--noexperimental_remote_cache_compression") + else: + build_flags.append("--noremote_cache_compression") + + remote_cache_endpoint = platform_config.get("remote_cache_endpoint") + if remote_cache_endpoint: + build_flags.append("--remote_cache=" + remote_cache_endpoint) + + remote_cache_address = platform_config.get("remote_cache_address") + if remote_cache_address: + build_flags.append("--remote_bytestream_uri_prefix=" + remote_cache_address) + + build_flags.append("--repository_cache=" + storage_path + "/caches/repository") + + startup_flags = [] + + if repo_name: + sanitized = _sanitize_filename(repo_name) + startup_flags.append("--output_user_root=" + storage_path + "/bazel/" + sanitized + "/" + subdir) + startup_flags.append("--output_base=" + storage_path + "/output/" + sanitized + "/" + subdir) + else: + startup_flags.append("--output_user_root=" + storage_path + "/bazel/" + subdir) + startup_flags.append("--output_base=" + storage_path + "/output/" + subdir) + + return (startup_flags, build_flags) diff --git a/crates/aspect-cli/src/builtins/aspect/lib/sarif.axl b/crates/aspect-cli/src/builtins/aspect/lib/sarif.axl new file mode 100644 index 000000000..2acdb2941 --- /dev/null +++ b/crates/aspect-cli/src/builtins/aspect/lib/sarif.axl @@ -0,0 +1,228 @@ +""" +SARIF (Static Analysis Results Interchange Format) GitHub Translation + +Converts SARIF output from linters into GitHub PR review comments and annotations. +Base parsing utilities (parse_sarif, get_sarif_summary) are loaded from rules_lint. +""" + +load("@aspect_rules_lint//lint/sarif.axl", "parse_sarif", "get_sarif_summary") + + +def _get_level_emoji(level): + """Map SARIF level to display text.""" + if level == "error": + return "error" + elif level == "warning": + return "warning" + elif level == "note": + return "note" + return level or "warning" + + +def sarif_result_to_comment(result, tool_name): + """ + Convert a single SARIF result to a GitHub review comment dict. + + Args: + result: A single result from runs[].results[] + tool_name: Name of the tool (from runs[].tool.driver.name) + + Returns: + dict suitable for create_review comments list, or None if invalid + """ + locations = result.get("locations", []) + if not locations: + return None + + location = locations[0] + physical = location.get("physicalLocation") + if not physical: + return None + + artifact = physical.get("artifactLocation", {}) + path = artifact.get("uri") + if not path: + return None + + region = physical.get("region", {}) + start_line = region.get("startLine") + end_line = region.get("endLine", start_line) + + if not start_line: + return None + + # Build comment body + level = _get_level_emoji(result.get("level", "warning")) + message_obj = result.get("message", {}) + message = message_obj.get("text", "") + + body = "**{}** ({})".format(tool_name, level) + if message: + body = body + "\n\n" + message + + comment = { + "path": path, + "line": end_line, + "side": "RIGHT", + "body": body, + } + + # Multi-line comment if start != end + if start_line != end_line: + comment["start_line"] = start_line + comment["start_side"] = "RIGHT" + + # Extract fix hints from relatedLocations + related = result.get("relatedLocations", []) + fixes = [] + for loc in related: + msg = loc.get("message", {}).get("text", "") + if not msg.startswith("try"): + continue + region = loc.get("physicalLocation", {}).get("region", {}) + byte_offset = region.get("byteOffset") + byte_length = region.get("byteLength") + if byte_offset == None or byte_length == None: + continue + # Parse replacement text from "try" message + if msg == "try": + replacement = "" + else: + text = msg[4:] # strip "try " + # Strip decorative outer quotes (clippy wraps replacements in quotes) + if len(text) >= 2 and text[0] == '"' and text[-1] == '"': + text = text[1:-1] + replacement = text + fixes.append({ + "byteOffset": byte_offset, + "byteLength": byte_length, + "replacement": replacement, + }) + if fixes: + comment["_fixes"] = fixes + + return comment + + +def sarif_to_review_comments(sarif): + """ + Convert SARIF output to GitHub review comments. + + Args: + sarif: Parsed SARIF dict (or JSON string) + + Returns: + List of comment dicts suitable for create_review + """ + if type(sarif) == "string": + sarif = json.decode(sarif) + + comments = [] + runs = sarif.get("runs", []) + + for run in runs: + tool = run.get("tool", {}) + driver = tool.get("driver", {}) + tool_name = driver.get("name", "Linter") + + results = run.get("results", []) + for result in results: + comment = sarif_result_to_comment(result, tool_name) + if comment: + comments.append(comment) + + return comments + + +def sarif_to_annotations(sarif): + """ + Convert SARIF output to GitHub Check Run annotations. + + Args: + sarif: Parsed SARIF dict (or JSON string) + + Returns: + List of annotation dicts suitable for build_output + """ + if type(sarif) == "string": + sarif = json.decode(sarif) + + annotations = [] + runs = sarif.get("runs", []) + + for run in runs: + tool = run.get("tool", {}) + driver = tool.get("driver", {}) + tool_name = driver.get("name", "Linter") + + results = run.get("results", []) + for result in results: + annotation = sarif_result_to_annotation(result, tool_name) + if annotation: + annotations.append(annotation) + + return annotations + + +def sarif_result_to_annotation(result, tool_name): + """ + Convert a single SARIF result to a GitHub Check Run annotation. + + Args: + result: A single result from runs[].results[] + tool_name: Name of the tool + + Returns: + dict suitable for check run annotations list, or None if invalid + """ + locations = result.get("locations", []) + if not locations: + return None + + location = locations[0] + physical = location.get("physicalLocation") + if not physical: + return None + + artifact = physical.get("artifactLocation", {}) + path = artifact.get("uri") + if not path: + return None + + region = physical.get("region", {}) + start_line = region.get("startLine") + end_line = region.get("endLine", start_line) + + if not start_line: + return None + + # Map SARIF level to GitHub annotation level + sarif_level = result.get("level", "warning") + if sarif_level == "error": + annotation_level = "failure" + elif sarif_level == "warning": + annotation_level = "warning" + else: + annotation_level = "notice" + + message_obj = result.get("message", {}) + message = message_obj.get("text", "") + + annotation = { + "path": path, + "start_line": start_line, + "end_line": end_line, + "annotation_level": annotation_level, + "message": message, + "title": tool_name, + } + + # Add column info if available + start_column = region.get("startColumn") + end_column = region.get("endColumn") + if start_column: + annotation["start_column"] = start_column + if end_column: + annotation["end_column"] = end_column + + return annotation diff --git a/crates/aspect-cli/src/builtins/aspect/lib/tar.axl b/crates/aspect-cli/src/builtins/aspect/lib/tar.axl new file mode 100644 index 000000000..342c05f95 --- /dev/null +++ b/crates/aspect-cli/src/builtins/aspect/lib/tar.axl @@ -0,0 +1,101 @@ +"""Prebuilt bsdtar binary download and execution helper. + +Downloads a platform-appropriate bsdtar binary from +https://github.com/aspect-build/bsdtar-prebuilt and caches it locally. +""" + +_VERSION = "v3.8.1-fix.1" +_BASE_URL = "https://github.com/aspect-build/bsdtar-prebuilt/releases/download/" + _VERSION + +_SHA256 = { + "darwin-amd64": "e8893f7d775d070a333dc386b2aab70dfa43411fcd890222c81212724be7de25", + "darwin-arm64": "48c1bd214aac26487eaf623d17b77ebce4db3249be851a54edcc940d09d50999", + "linux-amd64": "fff8f72758a52e60fe82beae64b18e7996467013ffe8bec09173d1ba6b66e490", + "linux-arm64": "683468ae45d371e4f392b0e5a524440f6f4507d7da0db60d03ff31f3cf951fc3", +} + +_ARCH_MAP = {"x86_64": "amd64", "aarch64": "arm64"} +_OS_MAP = {"macos": "darwin", "linux": "linux"} + + +def _platform_key(ctx): + os = _OS_MAP.get(ctx.std.env.os()) + arch = _ARCH_MAP.get(ctx.std.env.arch()) + if not os or not arch: + fail("unsupported platform: " + ctx.std.env.os() + "/" + ctx.std.env.arch()) + return os, arch + + +def _cache_dir(ctx): + home = ctx.std.env.home_dir() + if not home: + return None + return home + "/.cache/aspect/bsdtar/" + _VERSION + + +def bsdtar(ctx): + """Return the path to a cached bsdtar binary, downloading if necessary.""" + cache = _cache_dir(ctx) + if not cache: + fail("cannot determine home directory for bsdtar cache") + + os, arch = _platform_key(ctx) + bin_path = cache + "/tar" + + if ctx.std.fs.exists(bin_path): + return bin_path + + asset = "tar_" + os + "_" + arch + url = _BASE_URL + "/" + asset + sha256 = _SHA256.get(os + "-" + arch) + + ctx.std.fs.create_dir_all(cache) + ctx.http().download(url = url, output = bin_path, mode = 0o755, sha256 = sha256).block() + + return bin_path + + +def tar_create_from_dir(ctx, archive_path, dir_path): + """Create a tar.gz archive of a directory's contents. + + Args: + ctx: TaskContext + archive_path: str - output .tar.gz path + dir_path: str - directory to archive + + Returns: + bool - True on success + """ + bin_path = bsdtar(ctx) + child = ctx.std.process.command(bin_path).args([ + "czf", archive_path, "-C", dir_path, ".", + ]).stdout("inherit").stderr("inherit").spawn() + status = child.wait() + return status.code == 0 + + +def tar_create(ctx, archive_path, mtree_spec): + """Create a tar.gz archive from an mtree spec string. + + Args: + ctx: TaskContext + archive_path: str - output .tar.gz path + mtree_spec: str - mtree spec content mapping archive paths to source files + + Returns: + bool - True on success + """ + bin_path = bsdtar(ctx) + mtree_path = archive_path + ".mtree" + + ctx.std.fs.write(mtree_path, mtree_spec) + + child = ctx.std.process.command(bin_path).args([ + "czf", archive_path, "@" + mtree_path, + ]).stdout("inherit").stderr("inherit").spawn() + status = child.wait() + + if ctx.std.fs.exists(mtree_path): + ctx.std.fs.remove_file(mtree_path) + + return status.code == 0 diff --git a/crates/aspect-cli/src/builtins/aspect/tasks/delivery.axl b/crates/aspect-cli/src/builtins/aspect/tasks/delivery.axl new file mode 100644 index 000000000..4c5d77696 --- /dev/null +++ b/crates/aspect-cli/src/builtins/aspect/tasks/delivery.axl @@ -0,0 +1,237 @@ +""" +Delivery task that coordinates artifact delivery via deliveryd. + +Delivers each target via bazel run with stamping enabled, and signs artifacts +to prevent re-delivery. + +Uses deliveryd (Unix socket HTTP server) for all delivery state operations. +""" + +load("../fragments.axl", "DeliveryFragment") + +load( + "../lib/deliveryd.axl", + deliveryd_query = "query", + deliveryd_deliver = "deliver", + deliveryd_record = "record", + deliveryd_delete_artifact = "delete_artifact", +) + + +# ANSI codes +_BOLD = "\033[1m" +_GREEN = "\033[32m" +_YELLOW = "\033[33m" +_RED = "\033[31m" +_RESET = "\033[0m" + +def _style(text, codes, is_tty): + """Wrap text in ANSI codes if terminal is TTY.""" + if is_tty: + return codes + text + _RESET + return text + +def _run_bazel(ctx, verb, target, flags): + """ + Run a bazel command and return the exit code. + TODO: Implement ctx.bazel.run() when available. + """ + print(" [TODO] bazel {} {} {}".format(verb, " ".join(flags), target)) + return 0 # Simulate success + +# Helper to pad string to width +def pad(s, width): + return s + " " * (width - len(s)) + + +def _deliver_target(ctx, endpoint, ci_host, workspace, build_url, bazel_flags, label, is_forced, target_state, is_tty): + """ + Deliver a single target. + + Args: + is_forced: If True, skip signature check and always deliver. + target_state: Dict with {output_sha, delivered, delivered_by} from deliveryd, or None. + is_tty: Whether terminal supports colors. + + Returns (status: str, message: str) where status is one of: + - "success": Successfully delivered + - "skipped": Already delivered (only for non-forced) + - "build_failed": Bazel build failed + - "run_failed": Bazel run failed + """ + output_sha = target_state.get("output_sha") if target_state else None + + # For non-forced targets, check if already delivered + if not is_forced: + if target_state: + if target_state.get("delivered"): + return ("skipped", "Already delivered by {}".format(target_state.get("delivered_by"))) + else: + # No state found - target may have been added before signatures + # were introduced. Proceed with delivery. + print(" {}: No delivery state found for {}, bypassing signature check".format( + _style("Warning", _BOLD + _YELLOW, is_tty), label)) + + # Run bazel to deliver the target with stamping + print(" {} {}...".format(_style("Delivering", _BOLD, is_tty), label)) + exit_code = _run_bazel(ctx, "run", label, bazel_flags) + + if exit_code != 0: + # Delivery failed - delete artifact metadata so it can be retried + if output_sha: + deliveryd_delete_artifact(ctx, endpoint, ci_host, output_sha, workspace) + return ("run_failed", "Delivery failed with exit code {}".format(exit_code)) + + # Sign the artifact to mark as delivered + if output_sha: + deliveryd_deliver(ctx, endpoint, ci_host, output_sha, workspace, build_url) + + return ("success", "Delivered successfully") + +def _delivery_impl(ctx): + fragment = ctx.fragments[DeliveryFragment] + fragment.delivery_start() + + # Check if terminal supports colors + is_tty = ctx.std.io.stdout.is_tty + + # deliveryd socket path (runners start deliveryd automatically and expose the socket via env) + endpoint = ctx.std.env.var("ASPECT_WORKFLOWS_DELIVERY_API_ENDPOINT") + if not endpoint: + fail("ASPECT_WORKFLOWS_DELIVERY_API_ENDPOINT is not set. deliveryd must be running.") + + # Delivery context + ci_host = ctx.args.ci_host + workspace = ctx.args.workspace + build_url = ctx.args.build_url + commit_sha = ctx.args.commit_sha + + # Build bazel flags for delivery + # Default: --stamp --noremote_upload_local_results --remote_download_outputs=toplevel + bazel_flags = ctx.args.bazel_flag + if not bazel_flags: + bazel_flags = ["--stamp"] + + # Add flags that Workflows forces during delivery + bazel_flags.append("--noremote_upload_local_results") + bazel_flags.append("--remote_download_outputs=toplevel") + + print(_style("Delivery:", _BOLD, is_tty)) + print(" {}: {}".format(_style("deliveryd", _BOLD, is_tty), endpoint)) + print(" {}: {}".format(_style("Host", _BOLD, is_tty), ci_host)) + print(" {}: {}".format(_style("Commit", _BOLD, is_tty), commit_sha)) + print(" {}: {}".format(_style("Workspace", _BOLD, is_tty), workspace)) + print(" {}: {}".format(_style("URL", _BOLD, is_tty), build_url)) + print(" {}: {}".format(_style("Flags", _BOLD, is_tty), bazel_flags)) + print() + + + targets = ctx.args.targets + forced_targets = ctx.args.force_target + + if not targets: + print(_style("No targets to deliver", _BOLD + _YELLOW, is_tty)) + return 0 + + print(_style("Found {} target(s) to deliver:".format(len(targets)), _BOLD, is_tty)) + for t in targets: + forced_marker = _style(" (forced)", _YELLOW, is_tty) if t in forced_targets else "" + print(" - {}{}".format(t, forced_marker)) + print("") + + # Record each target with deliveryd (so they can be queried/signed) + for label in targets: + # Use hash of commit_sha + label as output_sha + # TODO: query remote-cache action key to determine target hash. + output_sha = hash(commit_sha + label) + deliveryd_record(ctx, endpoint, ci_host, commit_sha, workspace, label, str(output_sha)) + + # Query deliveryd for delivery state of all targets + delivery_state = deliveryd_query(ctx, endpoint, ci_host, commit_sha, workspace) + + # Track results + results = [] # List of (label, status, delivered_by) + success_count = 0 + skipped_count = 0 + failed_count = 0 + + for label in targets: + is_forced = label in forced_targets + target_state = delivery_state.get(label) + status, message = _deliver_target( + ctx, endpoint, ci_host, workspace, build_url, + bazel_flags, label, is_forced, target_state, is_tty + ) + + fragment.deliver_target(label, is_forced) + + forced_marker = " (FORCED)" if is_forced else "" + if status == "success": + success_count += 1 + results.append((label, "OK" + forced_marker, "ok", "-")) + elif status == "skipped": + skipped_count += 1 + delivered_by = target_state.get("delivered_by") if target_state else "-" + results.append((label, "SKIP", "skip", delivered_by or "-")) + else: # build_failed or run_failed + failed_count += 1 + results.append((label, "FAIL" + forced_marker, "fail", "-")) + + # Calculate column width for alignment + max_label_width = len("TARGET") + for label, _, _, _ in results: + if len(label) > max_label_width: + max_label_width = len(label) + + + # Calculate status column width + max_status_width = len("STATUS") + for _, status_text, _, _ in results: + if len(status_text) > max_status_width: + max_status_width = len(status_text) + + # Style mapping for status types (bold + color) + status_styles = {"ok": _BOLD + _GREEN, "skip": _BOLD + _YELLOW, "fail": _BOLD + _RED} + + # Print table header (bold) + print("") + header = " {} {} {}".format(pad("TARGET", max_label_width), pad("STATUS", max_status_width), "DELIVERED BY") + print(_style(header, _BOLD, is_tty)) + for label, status_text, status_type, delivered_by in results: + styled_status = _style(status_text, status_styles[status_type], is_tty) + # Pad based on original text length, then apply style + padding = " " * (max_status_width - len(status_text)) + print(" {} {}{} {}".format(pad(label, max_label_width), styled_status, padding, delivered_by)) + + # Summary (single line with bold colors) + print("") + summary_parts = [ + _style("{} delivered".format(success_count), _BOLD + _GREEN, is_tty), + _style("{} skipped".format(skipped_count), _BOLD + _YELLOW, is_tty), + _style("{} failed".format(failed_count), _BOLD + _RED, is_tty), + ] + print("{} {}".format(_style("Summary:", _BOLD, is_tty), ", ".join(summary_parts))) + + fragment.delivery_end() + + if failed_count > 0: + return 1 + + return 0 + + + +delivery = task( + name = "delivery", + implementation = _delivery_impl, + fragments = [DeliveryFragment], + args = { + "ci_host": args.string(default = "bk"), + "commit_sha": args.string(), + "workspace": args.string(default = "."), + "build_url": args.string(default = "-"), + "bazel_flag": args.string_list(default = []), + "force_target": args.string_list(default = []), + "targets": args.trailing_var_args() + }, +) diff --git a/crates/aspect-cli/src/builtins/aspect/tasks/dummy_format.axl b/crates/aspect-cli/src/builtins/aspect/tasks/dummy_format.axl new file mode 100644 index 000000000..5d6470063 --- /dev/null +++ b/crates/aspect-cli/src/builtins/aspect/tasks/dummy_format.axl @@ -0,0 +1,22 @@ +""" +A stub 'format' task registered when aspect_rules_lint is not installed. +Prints a helpful message directing the user to install the lint package. +""" + +def _format_impl(ctx: TaskContext) -> int: + ctx.std.io.stderr.write("Error: The format task requires the aspect_rules_lint package.\n") + ctx.std.io.stderr.write("\n") + ctx.std.io.stderr.write("Install it by running:\n") + ctx.std.io.stderr.write("\n") + ctx.std.io.stderr.write(" aspect axl add gh:aspect-build/rules_lint\n") + ctx.std.io.stderr.write("\n") + return 1 + +format = task( + name = "format", + implementation = _format_impl, + description = "Format source code (requires aspect_rules_lint)", + args = { + "all": args.positional(minimum = 0, maximum = 1000) + } +) diff --git a/crates/aspect-cli/src/builtins/aspect/tasks/dummy_lint.axl b/crates/aspect-cli/src/builtins/aspect/tasks/dummy_lint.axl new file mode 100644 index 000000000..345211053 --- /dev/null +++ b/crates/aspect-cli/src/builtins/aspect/tasks/dummy_lint.axl @@ -0,0 +1,22 @@ +""" +A stub 'lint' task registered when aspect_rules_lint is not installed. +Prints a helpful message directing the user to install the lint package. +""" + +def _lint_impl(ctx: TaskContext) -> int: + ctx.std.io.stderr.write("Error: The lint task requires the aspect_rules_lint package.\n") + ctx.std.io.stderr.write("\n") + ctx.std.io.stderr.write("Install it by running:\n") + ctx.std.io.stderr.write("\n") + ctx.std.io.stderr.write(" aspect axl add gh:aspect-build/rules_lint\n") + ctx.std.io.stderr.write("\n") + return 1 + +lint = task( + name = "lint", + implementation = _lint_impl, + description = "Run linters (requires aspect_rules_lint)", + args = { + "all": args.positional(minimum = 0, maximum = 1000) + } +) diff --git a/crates/aspect-cli/src/builtins/aspect/test.axl b/crates/aspect-cli/src/builtins/aspect/test.axl new file mode 100644 index 000000000..61cb59746 --- /dev/null +++ b/crates/aspect-cli/src/builtins/aspect/test.axl @@ -0,0 +1,103 @@ +""" +A default 'test' task that wraps a 'bazel test' command. +""" +load("./fragments.axl", "BazelFragment") + +def _collect_bes_from_args(ctx): + """Collect BES sinks from CLI args (--bes_backend/--bes_header).""" + sinks = [] + for bes_backend in ctx.args.bes_backend: + metadata = {} + for bes_header in ctx.args.bes_header: + (k, _, v) = bes_header.partition("=") + metadata[k] = v + sinks.append( + bazel.build_events.grpc( + uri = bes_backend, + metadata = metadata, + ) + ) + return sinks + +def _test_impl(ctx: TaskContext) -> int: + health = ctx.bazel.health_check() + fragment = ctx.fragments[BazelFragment] + + if fragment.post_health_check: + fragment.post_health_check(ctx, health) + + if health.outcome == "unhealthy": + fail("Bazel server is unhealthy: " + health.message) + + # Flags: accumulate data, then optionally transform + flags = ["--isatty=" + str(int(ctx.std.io.stdout.is_tty))] + flags.extend(ctx.args.bazel_flag) + flags.extend(fragment.extra_flags) + if fragment.flags: + flags = fragment.flags(flags) + + startup_flags = list(ctx.args.bazel_startup_flag) + startup_flags.extend(fragment.extra_startup_flags) + if fragment.startup_flags: + startup_flags = fragment.startup_flags(startup_flags) + + # BES: merge arg-based sinks with fragment sinks + build_events = _collect_bes_from_args(ctx) + if fragment.build_event_sinks: + build_events.extend(fragment.build_event_sinks) + + # Coerce to bool/list for ctx.bazel.test: + # - non-empty list → stream to those sinks + build_events() iterator. + # - True → stream without explicit sinks (build_event handler only) + # - False → no BEP stream at all + if not build_events: + if fragment.build_event: + build_events = True + else: + build_events = False + + # Shared mutable state across build_start / build_event / build_end + state = {"_task_name": "test"} + + for handler in fragment.build_start: + handler(ctx, state) + + for _ in range(10): + test = ctx.bazel.test( + build_events = build_events, + execution_log = fragment.execution_log_sinks if fragment.execution_log_sinks else False, + flags = flags, + startup_flags = startup_flags, + *ctx.args.target_pattern, + ) + + if fragment.build_event: + for event in test.build_events(): + for handler in fragment.build_event: + handler(ctx, state, event) + + build_status = test.wait() + + if build_status.code == 0 or not fragment.build_retry(build_status.code): + break + + for handler in fragment.build_end: + handler(ctx, state, build_status.code) + + return build_status.code + +test = task( + implementation = _test_impl, + fragments = [ + BazelFragment + ], + args = { + # TODO: Support a long --pattern_file like bazel does (@./targets) + # TODO: Support - (list from stdin) + "target_pattern": args.positional(minimum = 1, maximum = 512, default = ["..."]), + "bazel_flag": args.string_list(), + "bazel_startup_flag": args.string_list(), + "bes_backend": args.string_list(), + "bes_header": args.string_list(), + }, +) diff --git a/crates/aspect-cli/src/builtins/mod.rs b/crates/aspect-cli/src/builtins/mod.rs new file mode 100644 index 000000000..03dd6533c --- /dev/null +++ b/crates/aspect-cli/src/builtins/mod.rs @@ -0,0 +1,136 @@ +use std::path::PathBuf; + +/// A builtin module: name and its embedded files (relative path, content). +#[cfg(not(debug_assertions))] +struct Builtin { + name: &'static str, + files: &'static [(&'static str, &'static str)], +} + +#[cfg(not(debug_assertions))] +const ASPECT: Builtin = Builtin { + name: "aspect", + files: &[ + ("bazel.axl", include_str!("./aspect/bazel.axl")), + ("build.axl", include_str!("./aspect/build.axl")), + ("fragments.axl", include_str!("./aspect/fragments.axl")), + ("test.axl", include_str!("./aspect/test.axl")), + ("axl_add.axl", include_str!("./aspect/axl_add.axl")), + ("MODULE.aspect", include_str!("./aspect/MODULE.aspect")), + // config/ + ( + "config/builtins.axl", + include_str!("./aspect/config/builtins.axl"), + ), + ( + "config/delivery.axl", + include_str!("./aspect/config/delivery.axl"), + ), + ("config/lint.axl", include_str!("./aspect/config/lint.axl")), + ( + "config/nolint.axl", + include_str!("./aspect/config/nolint.axl"), + ), + ( + "config/artifacts.axl", + include_str!("./aspect/config/artifacts.axl"), + ), + // tasks/ + ( + "tasks/delivery.axl", + include_str!("./aspect/tasks/delivery.axl"), + ), + ( + "tasks/dummy_lint.axl", + include_str!("./aspect/tasks/dummy_lint.axl"), + ), + ( + "tasks/dummy_format.axl", + include_str!("./aspect/tasks/dummy_format.axl"), + ), + // lib/ + ( + "lib/deliveryd.axl", + include_str!("./aspect/lib/deliveryd.axl"), + ), + ("lib/github.axl", include_str!("./aspect/lib/github.axl")), + ("lib/linting.axl", include_str!("./aspect/lib/linting.axl")), + ( + "lib/platform.axl", + include_str!("./aspect/lib/platform.axl"), + ), + ("lib/sarif.axl", include_str!("./aspect/lib/sarif.axl")), + ( + "lib/health_check.axl", + include_str!("./aspect/lib/health_check.axl"), + ), + ( + "lib/artifacts.axl", + include_str!("./aspect/lib/artifacts.axl"), + ), + ("lib/tar.axl", include_str!("./aspect/lib/tar.axl")), + ( + "lib/environment.axl", + include_str!("./aspect/lib/environment.axl"), + ), + ( + "lib/build_metadata.axl", + include_str!("./aspect/lib/build_metadata.axl"), + ), + ], +}; + +#[cfg(not(debug_assertions))] +const ALL: &[&Builtin] = &[&ASPECT]; + +#[cfg(debug_assertions)] +pub fn expand_builtins( + _root_dir: PathBuf, + _broot: PathBuf, +) -> std::io::Result> { + let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + Ok(vec![( + "aspect".to_string(), + manifest_dir.join("src/builtins/aspect"), + )]) +} + +#[cfg(not(debug_assertions))] +pub fn expand_builtins( + _root_dir: PathBuf, + broot: PathBuf, +) -> std::io::Result> { + use std::fs; + + // Hash all builtin content to detect staleness across versions + let content_hash = { + let mut combined = String::new(); + for builtin in ALL { + combined.push_str(builtin.name); + for (path, content) in builtin.files { + combined.push_str(path); + combined.push_str(content); + } + } + sha256::digest(combined) + }; + + let builtins_root = broot.join(content_hash); + + // Extract each builtin into its own directory + for builtin in ALL { + let dir = builtins_root.join(builtin.name); + if !dir.exists() { + for (path, content) in builtin.files { + let out_path = dir.join(path); + fs::create_dir_all(out_path.parent().unwrap())?; + fs::write(&out_path, content)?; + } + } + } + + Ok(ALL + .iter() + .map(|b| (b.name.to_string(), builtins_root.join(b.name))) + .collect()) +} diff --git a/crates/aspect-cli/src/main.rs b/crates/aspect-cli/src/main.rs index c998ebabc..5a85bce3c 100644 --- a/crates/aspect-cli/src/main.rs +++ b/crates/aspect-cli/src/main.rs @@ -1,9 +1,10 @@ +mod builtins; mod cmd_tree; mod flags; mod helpers; mod trace; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::env::var; use std::path::PathBuf; use std::process::ExitCode; @@ -14,7 +15,7 @@ use axl_runtime::engine::task_arg::TaskArg; use axl_runtime::engine::task_args::TaskArgs; use axl_runtime::eval::{self, FrozenTaskModuleLike, ModuleScope, execute_task_with_args}; use axl_runtime::module::{AXL_MODULE_FILE, AXL_ROOT_MODULE_NAME}; -use axl_runtime::module::{AxlModuleEvaluator, DiskStore}; +use axl_runtime::module::{AxlModuleEvaluator, DiskStore, UseConfigEntry}; use clap::{Arg, ArgAction, Command}; use miette::{IntoDiagnostic, miette}; use starlark::environment::FrozenModule; @@ -85,26 +86,33 @@ async fn main() -> miette::Result { .evaluate(AXL_ROOT_MODULE_NAME.to_string(), repo_root.clone()) .into_diagnostic()?; - // Expand all module dependencies (including the builtin @aspect module) to the disk store and collect their root paths. - // This results in a Vec of (String, PathBuf) such as - // [ - // ( "aspect", "/Users/username/Library/Caches/axl/deps/27e6d838c365a7c5d79674a7b6c7ec7b8d22f686dbcc8088a8d1454a6489a9ae/aspect" ), - // ( "experimental", "/Users/username/Library/Caches/axl/deps/27e6d838c365a7c5d79674a7b6c7ec7b8d22f686dbcc8088a8d1454a6489a9ae/experimental" ), - // ( "local", "/Users/username/Library/Caches/axl/deps/27e6d838c365a7c5d79674a7b6c7ec7b8d22f686dbcc8088a8d1454a6489a9ae/local" ), - // ] + // Expand builtins to disk and pass them to the store expander. + let builtins = builtins::expand_builtins(repo_root.clone(), disk_store.builtins_path()) + .into_diagnostic()?; + + // Expand all module dependencies (including builtins) to the disk store. + // Returns (name, path, use_config) for each module. let module_roots = disk_store - .expand_store(&root_module_store) + .expand_store(&root_module_store, builtins) .await .into_diagnostic()?; - // Collect root and dependency modules into a vector of modules with exported tasks. + // Build the set of deps with use_config enabled (as determined by disk_store) + let use_config_deps: HashSet = module_roots + .iter() + .filter(|(_, _, use_config)| *use_config) + .map(|(name, _, _)| name.clone()) + .collect(); + + // Collect root and dependency modules into a vector of modules with exported tasks and configs. let mut modules = vec![( root_module_store.module_name, root_module_store.module_root, root_module_store.tasks.take(), + root_module_store.configs.take(), )]; - for (name, root) in module_roots { + for (name, root, _) in module_roots { let module_store = module_eval.evaluate(name, root).into_diagnostic()?; if debug_mode() { eprintln!( @@ -116,6 +124,7 @@ async fn main() -> miette::Result { module_store.module_name, module_store.module_root, module_store.tasks.take(), + module_store.configs.take(), )) } @@ -170,7 +179,11 @@ async fn main() -> miette::Result { HashMap)>, )> = vec![]; - for (module_name, module_root, map) in modules.into_iter() { + // Collect configs from each module for use_config processing + let mut module_configs: Vec<(String, PathBuf, Vec)> = vec![]; + + for (module_name, module_root, map, configs) in modules.into_iter() { + module_configs.push((module_name.clone(), module_root.clone(), configs)); let mut mmap = HashMap::new(); for (path, (label, symbols)) in map.into_iter() { let rel_path = path.strip_prefix(&module_root).unwrap().to_path_buf(); @@ -234,17 +247,79 @@ async fn main() -> miette::Result { } } + // Build scoped configs: package configs first (from use_config), then customer configs last + let root_scope = ModuleScope { + name: AXL_ROOT_MODULE_NAME.to_string(), + path: repo_root.clone(), + }; + + // Collect resolved package names for requires/conflicts checking + let resolved_packages: HashSet = module_configs + .iter() + .map(|(name, _, _)| name.clone()) + .collect(); + + // Build package configs from use_config() declarations (dependency order, leaves first) + let mut scoped_configs: Vec<(ModuleScope, PathBuf, String)> = vec![]; + + for (module_name, module_root, configs_entries) in &module_configs { + // Skip if root module didn't enable use_config for this dep (root module is always allowed) + if module_name != AXL_ROOT_MODULE_NAME && !use_config_deps.contains(module_name) { + continue; + } + for entry in configs_entries { + // Check requires: all referenced packages must be present + let requires_met = entry.requires.iter().all(|(pkg, version_constraint)| { + if !resolved_packages.contains(pkg) { + return false; + } + // Version constraint checking deferred until modules carry version metadata + if version_constraint.is_some() { + // TODO: implement version constraint checking with semver crate + // For now, presence check is sufficient + } + true + }); + // Check conflicts: all referenced packages must be absent + let conflicts_clear = entry + .conflicts + .iter() + .all(|pkg| !resolved_packages.contains(pkg)); + + if requires_met && conflicts_clear { + let scope = ModuleScope { + name: module_name.clone(), + path: module_root.clone(), + }; + let abs_path = module_root.join(&entry.path); + scoped_configs.push((scope, abs_path, entry.function.clone())); + + if debug_mode() { + eprintln!( + "use_config: @{} -> {} (fn: {})", + module_name, entry.path, entry.function + ); + } + } else if debug_mode() { + eprintln!( + "use_config: @{} -> {} SKIPPED (requires={}, conflicts={})", + module_name, entry.path, requires_met, conflicts_clear + ); + } + } + } + + // Append customer configs (filesystem-discovered) — always last + for path in configs.iter() { + scoped_configs.push((root_scope.clone(), path.clone(), "config".to_string())); + } + // Run all config functions, passing in vector of tasks for configuration - let tasks = config_eval - .run_all( - ModuleScope { - name: AXL_ROOT_MODULE_NAME.to_string(), - path: repo_root.clone(), - }, - configs.clone(), - tasks, - ) + let config_result = config_eval + .run_all(scoped_configs, tasks) .into_diagnostic()?; + let tasks = config_result.tasks; + let fragment_data = config_result.fragment_data; // Build the command tree from the evaluated and configured tasks. let mut tree = CommandTree::default(); @@ -349,7 +424,7 @@ async fn main() -> miette::Result { let store = axl_loader.new_store(task.path.clone()); // Execute the selected task using the new execution function - let exit_code = execute_task_with_args(task, store, |heap| { + let exit_code = execute_task_with_args(task, store, &fragment_data, |heap| { let mut args = TaskArgs::new(); for (k, v) in definition.args().iter() { let val = match v { diff --git a/crates/axl-runtime/BUILD.bazel b/crates/axl-runtime/BUILD.bazel index 5c8b2ff4a..9b8dca150 100644 --- a/crates/axl-runtime/BUILD.bazel +++ b/crates/axl-runtime/BUILD.bazel @@ -24,6 +24,7 @@ rust_library( "@crates//:liquid-core", "@crates//:liquid", "@crates//:minijinja", + "@crates//:nix", "@crates//:prost", "@crates//:rand", "@crates//:reqwest", @@ -57,8 +58,4 @@ rust_library( "//crates/aspect-cli:__pkg__", "//crates/axl-lsp:__pkg__", ], - compile_data = glob([ - "src/builtins/**/*.axl", - "src/builtins/**/*.aspect", - ]), ) diff --git a/crates/axl-runtime/Cargo.toml b/crates/axl-runtime/Cargo.toml index 8ac87bf94..cd790570b 100644 --- a/crates/axl-runtime/Cargo.toml +++ b/crates/axl-runtime/Cargo.toml @@ -32,7 +32,7 @@ http-body-util = "0.1.3" url = "2.5.4" zstd = "0.13.3" -nix = { version = "0.30.1", features = ["fs"] } +nix = { version = "0.30.1", features = ["fs", "signal"] } wasmi = "0.51.0" wasmi_wasi = "0.51.0" @@ -61,6 +61,7 @@ dirs = "6.0.0" fibre = "0.5.0" flate2 = "1.1.2" rand = "0.8.5" +semver = "1" sha256 = "1.6.0" ssri = "9.2.0" base64 = "0.22.1" diff --git a/crates/axl-runtime/src/builtins/aspect/MODULE.aspect b/crates/axl-runtime/src/builtins/aspect/MODULE.aspect deleted file mode 100644 index bd4ae78fe..000000000 --- a/crates/axl-runtime/src/builtins/aspect/MODULE.aspect +++ /dev/null @@ -1,3 +0,0 @@ -use_task("build.axl", "build") -use_task("test.axl", "test") -use_task("axl_add.axl", "add") diff --git a/crates/axl-runtime/src/builtins/aspect/build.axl b/crates/axl-runtime/src/builtins/aspect/build.axl deleted file mode 100644 index 28a27304e..000000000 --- a/crates/axl-runtime/src/builtins/aspect/build.axl +++ /dev/null @@ -1,52 +0,0 @@ -""" -A default 'build' task that wraps a 'bazel build' command. -""" - -def impl(ctx: TaskContext) -> int: - stdout = ctx.std.io.stdout - - build_events = True - for bes_backend in ctx.args.bes_backend: - metadata = {} - for bes_header in ctx.args.bes_header: - (k, _, v) = bes_header.partition("=") - metadata[k] = v - if type(build_events) != "list": - build_events = [] - build_events.append( - bazel.build_events.grpc( - uri = bes_backend, - metadata = metadata - ) - ) - - bazel_flags = ["--isatty=" + str(int(ctx.std.io.stdout.is_tty))] - for bazel_flag in ctx.args.bazel_flag: - bazel_flags.append(bazel_flag) - - bazel_startup_flags = [] - for flag in ctx.args.bazel_startup_flag: - bazel_startup_flags.append(flag) - - build = ctx.bazel.build( - build_events = build_events, - flags = bazel_flags, - startup_flags = bazel_startup_flags, - *ctx.args.target_pattern - ) - - build_status = build.wait() - return build_status.code - -build = task( - implementation = impl, - args = { - # TODO: Support a long --pattern_file like bazel does (@./targets) - # TODO: Support - (list from stdin) - "target_pattern": args.positional(minimum = 1, maximum = 512, default = ["..."]), - "bazel_flag": args.string_list(), - "bazel_startup_flag": args.string_list(), - "bes_backend": args.string_list(), - "bes_header": args.string_list(), - } -) diff --git a/crates/axl-runtime/src/builtins/aspect/test.axl b/crates/axl-runtime/src/builtins/aspect/test.axl deleted file mode 100644 index 6d7b1edce..000000000 --- a/crates/axl-runtime/src/builtins/aspect/test.axl +++ /dev/null @@ -1,52 +0,0 @@ -""" -A default 'test' task that wraps a 'bazel test' command. -""" - -def _test_impl(ctx: TaskContext) -> int: - stdout = ctx.std.io.stdout - - build_events = True - for bes_backend in ctx.args.bes_backend: - metadata = {} - for bes_header in ctx.args.bes_header: - (k, _, v) = bes_header.partition("=") - metadata[k] = v - if type(build_events) != "list": - build_events = [] - build_events.append( - bazel.build_events.grpc( - uri = bes_backend, - metadata = metadata - ) - ) - - bazel_flags = ["--isatty=" + str(int(ctx.std.io.stdout.is_tty))] - for flag in ctx.args.bazel_flag: - bazel_flags.append(flag) - - bazel_startup_flags = [] - for flag in ctx.args.bazel_startup_flag: - bazel_startup_flags.append(flag) - - test = ctx.bazel.test( - build_events = build_events, - flags = bazel_flags, - startup_flags = bazel_startup_flags, - *ctx.args.target_pattern - ) - - build_status = test.wait() - return build_status.code - -test = task( - implementation = _test_impl, - args = { - # TODO: Support a long --pattern_file like bazel does (@./targets) - # TODO: Support - (list from stdin) - "target_pattern": args.positional(minimum = 1, maximum = 512, default = ["..."]), - "bazel_flag": args.string_list(), - "bazel_startup_flag": args.string_list(), - "bes_backend": args.string_list(), - "bes_header": args.string_list() - } -) diff --git a/crates/axl-runtime/src/builtins/mod.rs b/crates/axl-runtime/src/builtins/mod.rs deleted file mode 100644 index babd7f65e..000000000 --- a/crates/axl-runtime/src/builtins/mod.rs +++ /dev/null @@ -1,45 +0,0 @@ -use std::path::PathBuf; - -#[cfg(debug_assertions)] -pub fn expand_builtins( - _root_dir: PathBuf, - _broot: PathBuf, -) -> std::io::Result> { - // Use CARGO_MANIFEST_DIR to locate builtins relative to this crate's source, - // not the user's project root (which could be /tmp or anywhere) - let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")); - Ok(vec![( - "aspect".to_string(), - manifest_dir.join("src/builtins/aspect"), - )]) -} - -#[cfg(not(debug_assertions))] -pub fn expand_builtins( - _root_dir: PathBuf, - broot: PathBuf, -) -> std::io::Result> { - use aspect_telemetry::cargo_pkg_version; - use std::fs; - - let builtins_root = broot.join(sha256::digest(cargo_pkg_version())); - fs::create_dir_all(&builtins_root)?; - - let builtins = vec![ - ("aspect/build.axl", include_str!("./aspect/build.axl")), - ("aspect/test.axl", include_str!("./aspect/test.axl")), - ("aspect/axl_add.axl", include_str!("./aspect/axl_add.axl")), - ( - "aspect/MODULE.aspect", - include_str!("./aspect/MODULE.aspect"), - ), - ]; - - for (path, content) in builtins { - let out_path = &builtins_root.join(path); - fs::create_dir_all(&out_path.parent().unwrap())?; - fs::write(out_path, content)?; - } - - Ok(vec![("aspect".to_string(), builtins_root.join("aspect"))]) -} diff --git a/crates/axl-runtime/src/engine/bazel/build.rs b/crates/axl-runtime/src/engine/bazel/build.rs index 1666e46f2..dc7da4a5b 100644 --- a/crates/axl-runtime/src/engine/bazel/build.rs +++ b/crates/axl-runtime/src/engine/bazel/build.rs @@ -30,7 +30,7 @@ use starlark::values::starlark_value; use crate::engine::r#async::rt::AsyncRuntime; -use super::helpers::format_bazel_command; +use super::execlog_sink::ExecLogSink; use super::iter::BuildEventIterator; use super::iter::ExecutionLogIterator; use super::iter::WorkspaceEventIterator; @@ -89,6 +89,9 @@ pub enum BuildEventSink { uri: String, metadata: HashMap, }, + File { + path: String, + }, } starlark_simple_value!(BuildEventSink); @@ -118,6 +121,9 @@ impl BuildEventSink { metadata.clone(), ) } + BuildEventSink::File { .. } => { + unreachable!("File sinks are handled as raw file paths, not subscriber threads") + } } } } @@ -204,7 +210,7 @@ impl Build { verb: &str, targets: impl IntoIterator, (build_events, sinks): (bool, Vec), - execution_logs: bool, + (execution_logs, execlog_sinks): (bool, Vec), workspace_events: bool, flags: Vec, startup_flags: Vec, @@ -226,23 +232,29 @@ impl Build { let targets: Vec = targets.into_iter().collect(); - if debug_mode() { - eprintln!( - "running {}", - format_bazel_command(&startup_flags, verb, &flags, &targets) - ); - } - let mut cmd = Command::new("bazel"); cmd.args(startup_flags); cmd.arg(verb); + cmd.args(flags); if let Some(current_dir) = current_dir { cmd.current_dir(current_dir); } + // Split BES sinks: File sinks accumulate raw pipe bytes in memory and + // are written after the FIFO closes; subscriber sinks (Grpc, etc.) get + // a real-time channel subscription. + let mut bes_file_paths: Vec = vec![]; + let mut bes_subscriber_sinks: Vec = vec![]; + for sink in sinks { + match &sink { + BuildEventSink::File { path } => bes_file_paths.push(path.clone()), + _ => bes_subscriber_sinks.push(sink), + } + } + let build_event_stream = if build_events { - let (out, stream) = BuildEventStream::spawn_with_pipe(pid)?; + let (out, stream) = BuildEventStream::spawn_with_pipe(pid, bes_file_paths)?; cmd.arg("--build_event_publish_all_actions") .arg("--build_event_binary_file_upload_mode=fully_async") .arg("--build_event_binary_file") @@ -260,8 +272,31 @@ impl Build { None }; + // Split execlog sinks: compact paths go to the tee reader inside the stream thread; + // decoded File sinks are spawned separately against the decoded receiver. + let mut compact_paths: Vec = vec![]; + let mut decoded_sinks: Vec = vec![]; + for sink in execlog_sinks { + match &sink { + ExecLogSink::CompactFile { path } => compact_paths.push(path.clone()), + ExecLogSink::File { .. } => decoded_sinks.push(sink), + } + } + let execlog_stream = if execution_logs { - let (out, stream) = ExecLogStream::spawn_with_pipe(pid)?; + // If there is a CompactFile sink, let Bazel write directly to its path + // so no separate temp file or tee step is needed for that copy. + let direct_path = if compact_paths.is_empty() { + None + } else { + Some(std::path::PathBuf::from(compact_paths.remove(0))) + }; + let (out, stream) = ExecLogStream::spawn_with_file( + pid, + direct_path, + compact_paths, + !decoded_sinks.is_empty(), + )?; cmd.arg("--execution_log_compact_file").arg(&out); Some(stream) } else { @@ -270,10 +305,20 @@ impl Build { // Build Event sinks for forwarding the build events let mut sink_handles: Vec> = vec![]; - for sink in sinks { + for sink in bes_subscriber_sinks { let handle = sink.spawn(rt.clone(), build_event_stream.as_ref().unwrap()); sink_handles.push(handle); } + + // Decoded ExecLog File sinks — spawned after the execlog stream so the + // receiver is valid. They disconnect naturally when execlog_stream is joined. + for sink in decoded_sinks { + if let ExecLogSink::File { path } = sink { + let handle = + ExecLogSink::spawn_file(execlog_stream.as_ref().unwrap().receiver(), path); + sink_handles.push(handle); + } + } if build_events { // Use subscribe_realtime() since this subscribes at stream creation // and doesn't need history replay. @@ -283,10 +328,13 @@ impl Build { )) } - cmd.args(flags); cmd.arg("--"); // separate flags from target patterns (not strictly necessary for build & test verbs but good form) cmd.args(targets); + if debug_mode() { + eprintln!("exec: {:?}", cmd.get_args()); + } + // TODO: if not inheriting, we should pipe and make the streams available to AXL cmd.stdout(if inherit_stdout { Stdio::inherit() @@ -348,7 +396,7 @@ pub(crate) fn build_methods(registry: &mut MethodsBuilder) { let build = this.downcast_ref::().unwrap(); let execlog_stream = build.execlog_stream.borrow(); let execlog_stream = execlog_stream.as_ref().ok_or(anyhow::anyhow!( - "call `ctx.bazel.build` with `execution_logs = true` in order to receive execution log events." + "call `ctx.bazel.build` with `execution_log = true` in order to receive execution log events." ))?; Ok(ExecutionLogIterator::new(execlog_stream.receiver())) @@ -378,8 +426,19 @@ pub(crate) fn build_methods(registry: &mut MethodsBuilder) { }) } + /// Block until the Bazel invocation finishes and return a `BuildStatus`. + /// + /// After `wait()` returns, the execution log pipe has been closed and the + /// producer thread has exited. Calling `execution_logs()` after `wait()` + /// will fail — the stream is consumed as part of the wait. Iterate + /// `execution_logs()` **before** calling `wait()` if you need to process + /// entries. + /// + /// `build_events()` remains usable after `wait()` for replaying historical + /// events, because the build event stream retains its buffer. fn wait<'v>(this: values::Value<'v>) -> anyhow::Result { let build = this.downcast_ref_err::()?; + let result = build.child.borrow_mut().wait()?; // TODO: consider adding a wait_events() method for granular control. @@ -390,7 +449,6 @@ pub(crate) fn build_methods(registry: &mut MethodsBuilder) { if let Some(ref mut event_stream) = *build.build_event_stream.borrow_mut() { match event_stream.join() { Ok(_) => {} - // TODO: tell the user which one and why Err(err) => anyhow::bail!("build event stream thread error: {}", err), } } @@ -400,7 +458,6 @@ pub(crate) fn build_methods(registry: &mut MethodsBuilder) { if let Some(workspace_event_stream) = workspace_event_stream { match workspace_event_stream.join() { Ok(_) => {} - // TODO: tell the user which one and why Err(err) => anyhow::bail!("workspace event stream thread error: {}", err), } }; @@ -410,7 +467,6 @@ pub(crate) fn build_methods(registry: &mut MethodsBuilder) { if let Some(execlog_stream) = execlog_stream { match execlog_stream.join() { Ok(_) => {} - // TODO: tell the user which one and why Err(err) => anyhow::bail!("execlog stream thread error: {}", err), } }; @@ -419,14 +475,13 @@ pub(crate) fn build_methods(registry: &mut MethodsBuilder) { for handle in handles { match handle.join() { Ok(_) => continue, - // TODO: tell the user which one and why Err(err) => anyhow::bail!("one of the sinks failed: {:#?}", err), } } - // BES ends here let span = build.span.replace(tracing::trace_span!("build").entered()); span.exit(); + Ok(BuildStatus { success: result.success(), code: result.code(), diff --git a/crates/axl-runtime/src/engine/bazel/execlog_sink.rs b/crates/axl-runtime/src/engine/bazel/execlog_sink.rs new file mode 100644 index 000000000..760e471d5 --- /dev/null +++ b/crates/axl-runtime/src/engine/bazel/execlog_sink.rs @@ -0,0 +1,63 @@ +use std::fs::File; +use std::io::{BufWriter, Write}; +use std::thread::{self, JoinHandle}; + +use allocative::Allocative; +use axl_proto::tools::protos::ExecLogEntry; +use derive_more::Display; +use fibre::RecvError; +use fibre::spmc::Receiver; +use prost::Message; +use starlark::starlark_simple_value; +use starlark::values; +use starlark::values::starlark_value; +use starlark::values::{NoSerialize, ProvidesStaticType, UnpackValue, ValueLike}; + +/// Sink types for execution log output. +/// +/// | Variant | Format | +/// |---|---| +/// | `File` | Varint-length-prefixed binary proto, no zstd (decoded entries re-encoded) | +/// | `CompactFile` | Raw zstd-compressed bytes (identical to `--execution_log_compact_file`) | +#[derive(Debug, Display, ProvidesStaticType, NoSerialize, Allocative, Clone)] +#[display("")] +pub enum ExecLogSink { + File { path: String }, + CompactFile { path: String }, +} + +starlark_simple_value!(ExecLogSink); + +#[starlark_value(type = "bazel.execlog.ExecLogSink")] +impl<'v> values::StarlarkValue<'v> for ExecLogSink {} + +impl<'v> UnpackValue<'v> for ExecLogSink { + type Error = anyhow::Error; + + fn unpack_value_impl(value: values::Value<'v>) -> Result, Self::Error> { + let value = value.downcast_ref_err::()?; + Ok(Some(value.clone())) + } +} + +impl ExecLogSink { + /// Spawns a thread that reads decoded `ExecLogEntry` values from `recv` and + /// writes them to `path` in varint-length-prefixed binary proto format. + pub fn spawn_file(recv: Receiver, path: String) -> JoinHandle<()> { + thread::spawn(move || { + let file = File::create(&path).expect("failed to create execlog output file"); + let mut file = BufWriter::new(file); + loop { + match recv.recv() { + Ok(entry) => { + if let Err(e) = file.write_all(&entry.encode_length_delimited_to_vec()) { + eprintln!("ExecLogSink: failed to write entry: {}", e); + break; + } + } + Err(RecvError::Disconnected) => break, + } + } + }) + } +} diff --git a/crates/axl-runtime/src/engine/bazel/health_check.rs b/crates/axl-runtime/src/engine/bazel/health_check.rs new file mode 100644 index 000000000..37c404da6 --- /dev/null +++ b/crates/axl-runtime/src/engine/bazel/health_check.rs @@ -0,0 +1,251 @@ +use std::path::{Path, PathBuf}; +use std::process::{Command, Stdio}; + +use allocative::Allocative; +use derive_more::Display; +use starlark::environment::{Methods, MethodsBuilder, MethodsStatic}; +use starlark::starlark_module; +use starlark::starlark_simple_value; +use starlark::values; +use starlark::values::none::NoneOr; +use starlark::values::starlark_value; +use starlark::values::{NoSerialize, ProvidesStaticType, ValueLike}; + +/// Bazel exit codes that indicate a potentially recoverable server issue. +const RETRYABLE_EXIT_CODES: &[i32] = &[ + 1, // Build or parsing failure + 37, // Blaze internal error + 36, // Local environmental error + 9, // Lock held (noblock_for_lock) +]; + +#[derive(Debug, Display, ProvidesStaticType, NoSerialize, Allocative)] +#[display("")] +pub struct HealthCheckResult { + /// One of "healthy", "unhealthy", or "inconclusive". + outcome: String, + message: Option, + exit_code: Option, +} + +starlark_simple_value!(HealthCheckResult); + +#[starlark_value(type = "bazel.HealthCheckResult")] +impl<'v> values::StarlarkValue<'v> for HealthCheckResult { + fn get_methods() -> Option<&'static Methods> { + static RES: MethodsStatic = MethodsStatic::new(); + RES.methods(health_check_result_methods) + } +} + +#[starlark_module] +pub(crate) fn health_check_result_methods(registry: &mut MethodsBuilder) { + /// The server health state: `"healthy"`, `"unhealthy"`, or `"inconclusive"`. + #[starlark(attribute)] + fn outcome<'v>(this: values::Value<'v>) -> anyhow::Result { + Ok(this + .downcast_ref::() + .unwrap() + .outcome + .clone()) + } + + /// Diagnostic message, if any. + #[starlark(attribute)] + fn message<'v>(this: values::Value<'v>) -> anyhow::Result> { + Ok(NoneOr::from_option( + this.downcast_ref::() + .unwrap() + .message + .clone(), + )) + } + + /// The original Bazel exit code, if available. + #[starlark(attribute)] + fn exit_code<'v>(this: values::Value<'v>) -> anyhow::Result> { + Ok(NoneOr::from_option( + this.downcast_ref::().unwrap().exit_code, + )) + } +} + +struct CheckResult { + success: bool, + exit_code: Option, + stderr: String, +} + +/// Runs `bazel --noblock_for_lock info server_pid` and returns the result. +fn check_bazel_server() -> CheckResult { + let output = Command::new("bazel") + .arg("--noblock_for_lock") + .arg("info") + .arg("server_pid") + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .stdin(Stdio::null()) + .output(); + + match output { + Ok(output) => CheckResult { + success: output.status.success(), + exit_code: output.status.code(), + stderr: String::from_utf8_lossy(&output.stderr).to_string(), + }, + Err(e) => CheckResult { + success: false, + exit_code: None, + stderr: e.to_string(), + }, + } +} + +/// Reads the PID from a server PID file on disk. +/// +/// Returns `None` if the path is not absolute, does not exist, cannot be read, +/// or does not contain a valid integer. The file is read as latin1 and trimmed. +fn extract_server_pid(server_pid_file: Option<&Path>) -> Option { + let path = server_pid_file?; + + if !path.is_absolute() { + return None; + } + + let content = std::fs::read(path).ok()?; + // latin1: each byte maps directly to a unicode codepoint + let text: String = content.iter().map(|&b| b as char).collect(); + text.trim().parse::().ok() +} + +/// Probes whether a process with the given PID exists using signal 0. +#[cfg(unix)] +fn is_pid_running(pid: u32) -> bool { + use nix::sys::signal; + use nix::unistd::Pid; + + signal::kill(Pid::from_raw(pid as i32), None).is_ok() +} + +#[cfg(not(unix))] +fn is_pid_running(_pid: u32) -> bool { + false +} + +/// Sends SIGKILL to the given PID. Silently ignores failures. +#[cfg(unix)] +fn kill_server_pid(pid: u32) { + use nix::sys::signal::{self, Signal}; + use nix::unistd::Pid; + + tracing::warn!("Workflows killing bazel server with PID {}", pid); + let _ = signal::kill(Pid::from_raw(pid as i32), Signal::SIGKILL); +} + +#[cfg(not(unix))] +fn kill_server_pid(_pid: u32) { + tracing::warn!("kill_server_pid is not supported on this platform"); +} + +/// Tries to determine the Bazel output base by running `bazel info output_base`. +fn get_output_base() -> Option { + let output = Command::new("bazel") + .arg("info") + .arg("output_base") + .stdout(Stdio::piped()) + .stderr(Stdio::null()) + .stdin(Stdio::null()) + .output() + .ok()?; + + if !output.status.success() { + return None; + } + + let path = String::from_utf8_lossy(&output.stdout).trim().to_string(); + if path.is_empty() { + return None; + } + Some(PathBuf::from(path)) +} + +pub fn run(output_base: Option<&str>) -> HealthCheckResult { + // Step 1: Determine server directories + let output_base = match output_base { + Some(path) => Some(PathBuf::from(path)), + None => get_output_base(), + }; + + let server_pid_file = output_base + .as_ref() + .map(|base| base.join("server").join("server.pid.txt")); + + // Step 2: Run health check + let result = check_bazel_server(); + + // Step 3: Success + if result.success { + return HealthCheckResult { + outcome: "healthy".to_string(), + message: None, + exit_code: Some(0), + }; + } + + // Step 4: Failure + let exit_code = result.exit_code; + + // 4a: Non-retryable error → inconclusive + if let Some(code) = exit_code { + if !RETRYABLE_EXIT_CODES.contains(&code) { + return HealthCheckResult { + outcome: "inconclusive".to_string(), + message: Some(format!( + "Unable to health check bazel server due to potential configuration issues: {}", + result.stderr.trim() + )), + exit_code: Some(code), + }; + } + } + + // 4b: Retryable error → attempt recovery + let diagnostic = format!( + "Bazel server returned an exit code ({}) that has caused the health check to fail", + exit_code.map_or("unknown".to_string(), |c| c.to_string()) + ); + + // 4b.i: Extract server PID from filesystem + let pid = extract_server_pid(server_pid_file.as_deref()); + + // 4b.ii: PID cannot be determined + let Some(pid) = pid else { + return HealthCheckResult { + outcome: "unhealthy".to_string(), + message: Some(diagnostic), + exit_code, + }; + }; + + // 4b.iii / 4b.iv: Kill if running, then retry + if is_pid_running(pid) { + kill_server_pid(pid); + } + + // Retry health check + let retry = check_bazel_server(); + + if retry.success { + HealthCheckResult { + outcome: "healthy".to_string(), + message: None, + exit_code: Some(0), + } + } else { + HealthCheckResult { + outcome: "unhealthy".to_string(), + message: Some(diagnostic), + exit_code, + } + } +} diff --git a/crates/axl-runtime/src/engine/bazel/helpers.rs b/crates/axl-runtime/src/engine/bazel/helpers.rs deleted file mode 100644 index e73df5462..000000000 --- a/crates/axl-runtime/src/engine/bazel/helpers.rs +++ /dev/null @@ -1,37 +0,0 @@ -pub fn join_strings(items: &[impl AsRef], sep: &str) -> String { - if items.is_empty() { - return String::new(); - } - items - .iter() - .map(|s| s.as_ref()) - .collect::>() - .join(sep) -} - -pub fn format_bazel_command( - startup_flags: &Vec, - verb: &str, - flags: &Vec, - targets: &Vec, -) -> String { - let startup_str = join_strings(&startup_flags, " "); - let flags_str = join_strings(&flags, " "); - let targets_str = join_strings(&targets, " "); - - let mut parts: Vec = Vec::new(); - parts.push("bazel".to_string()); - if !startup_str.is_empty() { - parts.push(startup_str); - } - parts.push(verb.to_string()); - if !flags_str.is_empty() { - parts.push(flags_str); - } - parts.push("--".to_string()); - if !targets_str.is_empty() { - parts.push(targets_str); - } - - join_strings(&parts, " ") -} diff --git a/crates/axl-runtime/src/engine/bazel/iter/build_event.rs b/crates/axl-runtime/src/engine/bazel/iter/build_event.rs index 4e772991e..bf9799112 100644 --- a/crates/axl-runtime/src/engine/bazel/iter/build_event.rs +++ b/crates/axl-runtime/src/engine/bazel/iter/build_event.rs @@ -1,5 +1,5 @@ use std::cell::RefCell; -use std::sync::mpsc::{RecvError, TryRecvError}; +use std::sync::mpsc::TryRecvError; use allocative::Allocative; use starlark::environment::Methods; @@ -88,7 +88,7 @@ impl<'v> values::StarlarkValue<'v> for BuildEventIterator { unsafe fn iter_next(&self, _index: usize, heap: &'v Heap) -> Option> { match self.recv.borrow_mut().recv() { Ok(ev) => Some(ev.alloc_value(heap)), - Err(RecvError) => None, + Err(_) => None, } } unsafe fn iter_stop(&self) {} diff --git a/crates/axl-runtime/src/engine/bazel/mod.rs b/crates/axl-runtime/src/engine/bazel/mod.rs index 37c672940..de8564c84 100644 --- a/crates/axl-runtime/src/engine/bazel/mod.rs +++ b/crates/axl-runtime/src/engine/bazel/mod.rs @@ -57,7 +57,8 @@ fn resolve_flags<'v>( } mod build; -mod helpers; +mod execlog_sink; +mod health_check; mod iter; mod query; mod stream; @@ -104,6 +105,14 @@ pub(crate) fn bazel_methods(registry: &mut MethodsBuilder) { /// * `inherit_stderr` - Inherit stderr from the parent process. Defaults to `True`. /// * `current_dir` - Working directory for the Bazel invocation. /// + /// # Arguments + /// * `execution_log`: Enable Bazel execution log collection. Pass `True` to + /// enable the in-memory decoded iterator (accessible via `build.execution_logs()`), + /// or pass a list of sinks such as `[execution_log.compact_file(path = "out.binpb.zst")]` + /// to write the log to one or more files. Sinks and the iterator can be combined: + /// passing a list of sinks still allows calling `build.execution_logs()` to iterate + /// entries in-process. + /// /// **Examples** /// /// ```python @@ -130,7 +139,10 @@ pub(crate) fn bazel_methods(registry: &mut MethodsBuilder) { UnpackList, >, #[starlark(require = named, default = false)] workspace_events: bool, - #[starlark(require = named, default = false)] execution_logs: bool, + #[starlark(require = named, default = Either::Left(false))] execution_log: Either< + bool, + UnpackList, + >, #[starlark(require = named, default = UnpackList::default())] flags: UnpackList< Either, (values::StringValue<'v>, values::StringValue<'v>)>, >, @@ -146,6 +158,10 @@ pub(crate) fn bazel_methods(registry: &mut MethodsBuilder) { Either::Left(events) => (events, vec![]), Either::Right(sinks) => (true, sinks.items), }; + let execution_log = match execution_log { + Either::Left(b) => (b, vec![]), + Either::Right(sinks) => (true, sinks.items), + }; let has_conditional = flags.items.iter().any(|f| f.is_right()) || startup_flags.items.iter().any(|f| f.is_right()); let bazel_version = if has_conditional { @@ -162,7 +178,7 @@ pub(crate) fn bazel_methods(registry: &mut MethodsBuilder) { "build", targets.items.iter().map(|f| f.as_str().to_string()), build_events, - execution_logs, + execution_log, workspace_events, resolved_flags, resolved_startup_flags, @@ -198,6 +214,14 @@ pub(crate) fn bazel_methods(registry: &mut MethodsBuilder) { /// * `inherit_stderr` - Inherit stderr from the parent process. Defaults to `True`. /// * `current_dir` - Working directory for the Bazel invocation. /// + /// # Arguments + /// * `execution_log`: Enable Bazel execution log collection. Pass `True` to + /// enable the in-memory decoded iterator (accessible via `build.execution_logs()`), + /// or pass a list of sinks such as `[execution_log.compact_file(path = "out.binpb.zst")]` + /// to write the log to one or more files. Sinks and the iterator can be combined: + /// passing a list of sinks still allows calling `build.execution_logs()` to iterate + /// entries in-process. + /// /// **Examples** /// /// ```python @@ -224,7 +248,10 @@ pub(crate) fn bazel_methods(registry: &mut MethodsBuilder) { UnpackList, >, #[starlark(require = named, default = false)] workspace_events: bool, - #[starlark(require = named, default = false)] execution_logs: bool, + #[starlark(require = named, default = Either::Left(false))] execution_log: Either< + bool, + UnpackList, + >, #[starlark(require = named, default = UnpackList::default())] flags: UnpackList< Either, (values::StringValue<'v>, values::StringValue<'v>)>, >, @@ -240,6 +267,10 @@ pub(crate) fn bazel_methods(registry: &mut MethodsBuilder) { Either::Left(events) => (events, vec![]), Either::Right(sinks) => (true, sinks.items), }; + let execution_log = match execution_log { + Either::Left(b) => (b, vec![]), + Either::Right(sinks) => (true, sinks.items), + }; let has_conditional = flags.items.iter().any(|f| f.is_right()) || startup_flags.items.iter().any(|f| f.is_right()); let bazel_version = if has_conditional { @@ -256,7 +287,7 @@ pub(crate) fn bazel_methods(registry: &mut MethodsBuilder) { "test", targets.items.iter().map(|f| f.as_str().to_string()), build_events, - execution_logs, + execution_log, workspace_events, resolved_flags, resolved_startup_flags, @@ -345,6 +376,30 @@ pub(crate) fn bazel_methods(registry: &mut MethodsBuilder) { } Ok(map) } + + /// Probe the Bazel server to determine whether it is responsive. + /// + /// Runs `bazel --noblock_for_lock info server_pid`. If the server is + /// unresponsive, attempts recovery by killing the server process and + /// re-checking. + /// + /// Returns a `HealthCheckResult` with `.success`, `.healthy`, `.message`, + /// and `.exit_code` attributes. + /// + /// **Examples** + /// + /// ```python + /// def _health_probe_impl(ctx): + /// result = ctx.bazel.health_check() + /// if not result.healthy: + /// fail("Bazel server is unhealthy") + /// ``` + fn health_check<'v>( + #[allow(unused)] this: values::Value<'v>, + #[starlark(require = named, default = NoneOr::None)] output_base: NoneOr, + ) -> anyhow::Result { + Ok(health_check::run(output_base.into_option().as_deref())) + } } #[starlark_module] @@ -361,6 +416,26 @@ fn register_build_events(globals: &mut GlobalsBuilder) { metadata: HashMap::from_iter(metadata.entries), }) } + + fn file(#[starlark(require = named)] path: String) -> starlark::Result { + Ok(build::BuildEventSink::File { path }) + } +} + +#[starlark_module] +fn register_execlog_sinks(globals: &mut GlobalsBuilder) { + #[starlark(as_type = execlog_sink::ExecLogSink)] + fn file( + #[starlark(require = named)] path: String, + ) -> starlark::Result { + Ok(execlog_sink::ExecLogSink::File { path }) + } + + fn compact_file( + #[starlark(require = named)] path: String, + ) -> starlark::Result { + Ok(execlog_sink::ExecLogSink::CompactFile { path }) + } } #[starlark_module] @@ -376,6 +451,11 @@ fn register_build_types(globals: &mut GlobalsBuilder) { StarlarkValueAsType::new(); } +#[starlark_module] +fn register_execlog_types(globals: &mut GlobalsBuilder) { + const ExecLogSink: StarlarkValueAsType = StarlarkValueAsType::new(); +} + #[starlark_module] fn register_query_types(globals: &mut GlobalsBuilder) { const Query: StarlarkValueAsType = StarlarkValueAsType::new(); @@ -385,6 +465,8 @@ fn register_query_types(globals: &mut GlobalsBuilder) { #[starlark_module] fn register_types(globals: &mut GlobalsBuilder) { const Bazel: StarlarkValueAsType = StarlarkValueAsType::new(); + const HealthCheckResult: StarlarkValueAsType = + StarlarkValueAsType::new(); } pub fn register_globals(globals: &mut GlobalsBuilder) { @@ -405,4 +487,9 @@ pub fn register_globals(globals: &mut GlobalsBuilder) { globals.namespace("build_events", |globals| { register_build_events(globals); }); + + globals.namespace("execution_log", |globals| { + register_execlog_types(globals); + register_execlog_sinks(globals); + }); } diff --git a/crates/axl-runtime/src/engine/bazel/stream/build_event.rs b/crates/axl-runtime/src/engine/bazel/stream/build_event.rs index ae42cc584..246cbf6d6 100644 --- a/crates/axl-runtime/src/engine/bazel/stream/build_event.rs +++ b/crates/axl-runtime/src/engine/bazel/stream/build_event.rs @@ -1,18 +1,20 @@ use axl_proto::build_event_stream::BuildEvent; use prost::Message; +use std::fs::File; +use std::io::BufWriter; use std::io::ErrorKind; use std::sync::mpsc::RecvError; use std::sync::{Arc, Mutex}; use std::{env, io}; use std::{ - io::Read, + io::{Read, Write}, path::PathBuf, thread::{self, JoinHandle}, }; use thiserror::Error; use super::broadcaster::{Broadcaster, Subscriber}; -use super::util::read_varint; +use super::util::{MultiTeeReader, read_varint}; #[derive(Error, Debug)] pub enum BuildEventStreamError { @@ -99,13 +101,16 @@ pub struct BuildEventStream { } impl BuildEventStream { - pub fn spawn_with_pipe(pid: u32) -> io::Result<(PathBuf, Self)> { + pub fn spawn_with_pipe( + pid: u32, + raw_file_sink_paths: Vec, + ) -> io::Result<(PathBuf, Self)> { let out = env::temp_dir().join(format!("build-event-out-{}.bin", uuid::Uuid::new_v4())); - let stream = Self::spawn(out.clone(), pid)?; + let stream = Self::spawn(out.clone(), pid, raw_file_sink_paths)?; Ok((out, stream)) } - pub fn spawn(path: PathBuf, pid: u32) -> io::Result { + pub fn spawn(path: PathBuf, pid: u32, raw_file_sink_paths: Vec) -> io::Result { let broadcaster = Broadcaster::new(); let broadcaster_for_thread = broadcaster.clone(); let broadcaster_holder = Arc::new(Mutex::new(Some(broadcaster))); @@ -115,23 +120,31 @@ impl BuildEventStream { let handle = thread::spawn(move || { let mut buf: Vec = Vec::with_capacity(1024 * 5); buf.resize(10, 0); - let mut out_raw = + let pipe = galvanize::Pipe::new(path.clone(), galvanize::RetryPolicy::IfOpenForPid(pid))?; + let writers = raw_file_sink_paths + .iter() + .map(|p| Ok(BufWriter::new(File::create(p)?))) + .collect::>>()?; + let mut reader = MultiTeeReader { + inner: pipe, + writers, + }; let read_event = |buf: &mut Vec, - out_raw: &mut galvanize::Pipe| + reader: &mut MultiTeeReader| -> Result { - let size = read_varint(out_raw)?; + let size = read_varint(reader)?; if size > buf.len() { buf.resize(size, 0); } - out_raw.read_exact(&mut buf[0..size])?; + reader.read_exact(&mut buf[0..size])?; let event = BuildEvent::decode(&buf[0..size])?; Ok(event) }; loop { - match read_event(&mut buf, &mut out_raw) { + match read_event(&mut buf, &mut reader) { Ok(event) => { let last_message = event.last_message; @@ -143,11 +156,13 @@ impl BuildEventStream { if last_message { broadcaster_for_thread.close(); + reader.flush()?; return Ok(()); } } Err(BuildEventStreamError::IO(err)) if err.kind() == ErrorKind::BrokenPipe => { broadcaster_for_thread.close(); + reader.flush()?; return Ok(()); } Err(err) => { diff --git a/crates/axl-runtime/src/engine/bazel/stream/execlog.rs b/crates/axl-runtime/src/engine/bazel/stream/execlog.rs index 34433d965..5a7e59842 100644 --- a/crates/axl-runtime/src/engine/bazel/stream/execlog.rs +++ b/crates/axl-runtime/src/engine/bazel/stream/execlog.rs @@ -1,17 +1,19 @@ use axl_proto::tools::protos::ExecLogEntry; use fibre::spmc::{Receiver, bounded}; -use fibre::{CloseError, SendError}; +use fibre::{CloseError, SendError, TrySendError}; use prost::Message; use std::fmt::Debug; +use std::fs::File; use std::io; -use std::io::Read; +use std::io::{BufWriter, Read, Write}; use std::path::PathBuf; use std::thread::JoinHandle; use std::{env, thread}; -use thiserror::Error; -use zstd::Decoder; +use super::util::MultiTeeReader; use super::util::read_varint; +use thiserror::Error; +use zstd::Decoder; #[derive(Error, Debug)] pub enum ExecLogStreamError { @@ -25,29 +27,110 @@ pub enum ExecLogStreamError { Close(#[from] CloseError), } +/// Wraps a `Read` source, blocking on empty reads until real data arrives. +/// +/// Some `Read` implementations (e.g. [`galvanize::StreamingFile`]) return `Ok(0)` to signal +/// "no data yet, try again" while the writer is still active. Framing layers like the zstd +/// `Decoder` interpret `Ok(0)` as EOF and error with "incomplete frame". This adapter sits +/// between such a source and the decoder, converting empty reads into a brief sleep-and-retry +/// so the decoder always receives either real bytes or a terminal error. +struct RetryRead { + inner: R, +} + +impl Read for RetryRead { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + loop { + match self.inner.read(buf) { + Ok(0) => std::thread::sleep(std::time::Duration::from_millis(1)), + other => return other, + } + } + } +} + #[derive(Debug)] pub struct ExecLogStream { handle: JoinHandle>, - recv: Receiver, + // Holds the initial subscriber clone. Kept as Option so join() can drop it + // before the thread finishes. In fibre's SPMC broadcast ring buffer every + // Receiver clone is an independent subscriber whose tail the sender must not + // lap; an unconsumed clone prevents the Closed signal that tells the producer + // to stop decoding. Dropping it first means the sender sees Closed on the + // first try_send when no external subscribers exist, skipping all decoding. + recv: Option>, } impl ExecLogStream { - pub fn spawn_with_pipe(pid: u32) -> io::Result<(PathBuf, Self)> { + /// Spawn the execlog reader thread using a FIFO (named pipe). + /// + /// # Warning — do not use when `--build_event_binary_file` is also a FIFO + /// + /// Bazel checksums the compact execlog file after writing it in order to populate + /// the `build_tool_logs` BEP event. A FIFO cannot be re-read for this purpose, so + /// Bazel stalls mid-build trying to seek back, which in turn prevents the BEP FIFO + /// from being flushed, causing a deadlock. See: + /// + /// + /// Use [`spawn_with_file`](Self::spawn_with_file) instead. This method is retained + /// for contexts where the BEP stream is not active and the checksum path is not hit. + #[allow(dead_code)] + pub fn spawn_with_pipe( + pid: u32, + compact_sink_paths: Vec, + has_file_sinks: bool, + ) -> io::Result<(PathBuf, Self)> { let out = env::temp_dir().join(format!("execlog-out-{}.bin", uuid::Uuid::new_v4())); - let stream = Self::spawn(out.clone(), pid)?; + let stream = Self::spawn(out.clone(), pid, compact_sink_paths, has_file_sinks)?; Ok((out, stream)) } - pub fn spawn(path: PathBuf, pid: u32) -> io::Result { + /// Spawn the execlog reader thread. + /// + /// ## Send strategy + /// + /// `has_file_sinks` controls how decoded entries are sent to the channel: + /// + /// - `true` — blocking [`Sender::send`]. File-sink threads must receive every entry + /// to produce a complete output file, so the producer waits for the channel to drain + /// rather than dropping entries. The build may slow under sustained I/O pressure, but + /// it will not deadlock because the sink threads are always consuming. + /// + /// - `false` — non-blocking [`Sender::try_send`]. Used when the only consumer is the + /// optional `execution_logs()` iterator. A full channel means the caller is not + /// consuming fast enough; entries are dropped rather than stalling the build. + /// Once all receiver clones are gone (`Closed`), decoding is skipped entirely. + /// + /// `CompactFile` sinks are unaffected by this flag — raw bytes are always tee'd + /// by `MultiTeeReader` before decoding. + pub fn spawn( + path: PathBuf, + pid: u32, + compact_sink_paths: Vec, + has_file_sinks: bool, + ) -> io::Result { let (mut sender, recv) = bounded::(1000); let handle = thread::spawn(move || { let mut buf: Vec = Vec::with_capacity(1024 * 5); // 10 is the maximum size of a varint so start with that size. buf.resize(10, 0); + let out_raw = galvanize::Pipe::new(path.clone(), galvanize::RetryPolicy::IfOpenForPid(pid))?; + let writers = compact_sink_paths + .iter() + .map(|p| Ok(BufWriter::new(File::create(p)?))) + .collect::>>()?; + let out_raw = MultiTeeReader { + inner: out_raw, + writers, + }; let mut out_raw = Decoder::new(out_raw)?; + // Only used in the try_send path (no file sinks). + // Set to false when try_send returns Closed, skipping future decodes. + let mut has_readers = true; + let mut read = || -> Result<(), ExecLogStreamError> { // varint size can be somewhere between 1 to 10 bytes. let size = read_varint(&mut out_raw)?; @@ -57,40 +140,148 @@ impl ExecLogStream { out_raw.read_exact(&mut buf[0..size])?; - let entry = ExecLogEntry::decode(&buf[0..size])?; - // Send blocks until there is room in the buffer. - // https://docs.rs/fibre/latest/fibre/spmc/index.html - sender.send(entry)?; + if has_file_sinks { + let entry = ExecLogEntry::decode(&buf[0..size])?; + sender.send(entry)?; + } else if has_readers { + let entry = ExecLogEntry::decode(&buf[0..size])?; + match sender.try_send(entry) { + Ok(()) | Err(TrySendError::Sent(_)) => {} + // Channel full: iterator consumer is slow, drop entry. + Err(TrySendError::Full(_)) => {} + // No receivers left: skip decoding for remaining entries. + Err(TrySendError::Closed(_)) => has_readers = false, + } + } Ok(()) }; loop { - let result = read(); + match read() { + Ok(()) => continue, + // End of stream. + Err(ExecLogStreamError::IO(err)) if err.kind() == io::ErrorKind::BrokenPipe => { + sender.close()?; + out_raw.get_mut().get_mut().flush()?; + return Ok(()); + } + Err(err) => return Err(err), + } + } + }); + Ok(Self { + handle, + recv: Some(recv), + }) + } - // event decoding was succesfull move to the next. - if result.is_ok() { - continue; + /// Spawn the execlog reader thread for a regular file. + /// + /// `pid` is the Bazel server process ID, used to detect when Bazel has finished + /// writing the file. `out_path` is the file Bazel will write + /// `--execution_log_compact_file` to. Pass `Some(path)` to reuse an existing sink + /// path (e.g. a `CompactFile` sink so Bazel writes directly to the caller's + /// destination without a tee step). Pass `None` to have a UUID-named temp file + /// created automatically. + /// + /// The thread streams the file as Bazel writes it using [`galvanize::StreamingFile`], + /// which busy-polls for file existence at open time and retries reads while Bazel + /// holds the file open. It self-terminates when Bazel closes the file. + pub fn spawn_with_file( + pid: u32, + out_path: Option, + compact_sink_paths: Vec, + has_file_sinks: bool, + ) -> io::Result<(PathBuf, Self)> { + let out = out_path.unwrap_or_else(|| { + env::temp_dir().join(format!("execlog-out-{}.bin", uuid::Uuid::new_v4())) + }); + let (mut sender, recv) = bounded::(1000); + let path = out.clone(); + let handle = thread::spawn(move || { + let mut buf: Vec = Vec::with_capacity(1024 * 5); + // 10 is the maximum size of a varint so start with that size. + buf.resize(10, 0); + + let out_raw = galvanize::StreamingFile::open(path.clone(), pid)?; + let writers = compact_sink_paths + .iter() + .map(|p| Ok(BufWriter::new(File::create(p)?))) + .collect::>>()?; + let out_raw = MultiTeeReader { + inner: out_raw, + writers, + }; + // RetryRead prevents zstd from seeing Ok(0) ("no data yet") as EOF. + let out_raw = RetryRead { inner: out_raw }; + let mut out_raw = Decoder::new(out_raw)?; + + // Only used in the try_send path (no file sinks). + let mut has_readers = true; + + let mut read = || -> Result<(), ExecLogStreamError> { + let size = read_varint(&mut out_raw)?; + if size > buf.len() { + buf.resize(size, 0); + } + + out_raw.read_exact(&mut buf[0..size])?; + + if has_file_sinks { + let entry = ExecLogEntry::decode(&buf[0..size])?; + sender.send(entry)?; + } else if has_readers { + let entry = ExecLogEntry::decode(&buf[0..size])?; + match sender.try_send(entry) { + Ok(()) | Err(TrySendError::Sent(_)) => {} + // Channel full: iterator consumer is slow, drop entry. + Err(TrySendError::Full(_)) => {} + // No receivers left: skip decoding for remaining entries. + Err(TrySendError::Closed(_)) => has_readers = false, + } } - match result.unwrap_err() { - // this marks the end of the stream - ExecLogStreamError::IO(err) if err.kind() == io::ErrorKind::BrokenPipe => { + Ok(()) + }; + + loop { + match read() { + Ok(()) => continue, + // BrokenPipe signals that Bazel closed the file (end of stream). + Err(ExecLogStreamError::IO(err)) if err.kind() == io::ErrorKind::BrokenPipe => { sender.close()?; + out_raw.get_mut().get_mut().inner.flush()?; return Ok(()); } - err => return Err(err), + Err(err) => return Err(err), } } }); - Ok(Self { handle, recv }) + + Ok(( + out, + Self { + handle, + recv: Some(recv), + }, + )) } pub fn receiver(&self) -> Receiver { - self.recv.clone() + self.recv + .as_ref() + .expect("receiver() called after join()") + .clone() } - pub fn join(self) -> Result<(), ExecLogStreamError> { + /// Wait for the execlog stream to finish. + /// + /// Drops the struct's `recv` clone so that if no external subscriber exists + /// the first `try_send` returns `Closed` and remaining bytes are drained + /// without proto decoding. Then waits for the thread to exit. + pub fn join(mut self) -> Result<(), ExecLogStreamError> { + self.recv.take(); self.handle.join().expect("join error") } } diff --git a/crates/axl-runtime/src/engine/bazel/stream/util.rs b/crates/axl-runtime/src/engine/bazel/stream/util.rs index 4c50a7ec4..8399b1ef4 100644 --- a/crates/axl-runtime/src/engine/bazel/stream/util.rs +++ b/crates/axl-runtime/src/engine/bazel/stream/util.rs @@ -1,6 +1,43 @@ +use std::fs::File; use std::io; use std::io::Read; use std::io::Result; +use std::io::{BufWriter, Write}; + +/// Wraps a `Read` source and tees every byte read to one or more `BufWriter` sinks. +/// +/// Used to intercept raw bytes from a stream before any further processing, +/// allowing file sinks to capture a copy without a second pass. +pub(super) struct MultiTeeReader { + pub(super) inner: R, + pub(super) writers: Vec>, +} + +impl Read for MultiTeeReader { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + let n = self.inner.read(buf)?; + for w in &mut self.writers { + w.write_all(&buf[..n])?; + } + Ok(n) + } +} + +impl Write for MultiTeeReader { + fn write(&mut self, buf: &[u8]) -> io::Result { + for w in &mut self.writers { + w.write_all(buf)?; + } + Ok(buf.len()) + } + + fn flush(&mut self) -> io::Result<()> { + for w in &mut self.writers { + w.flush()?; + } + Ok(()) + } +} pub const CONTINUATION_BIT: u8 = 1 << 7; diff --git a/crates/axl-runtime/src/engine/config/context.rs b/crates/axl-runtime/src/engine/config/context.rs index 34af5f023..3d723b813 100644 --- a/crates/axl-runtime/src/engine/config/context.rs +++ b/crates/axl-runtime/src/engine/config/context.rs @@ -33,28 +33,36 @@ use super::tasks::value::TaskList; /// Config context for evaluating config.axl files. /// -/// This context holds the list of tasks that config functions can modify. -/// Tasks are stored as `ConfiguredTask` which use `OwnedFrozenValue` internally, -/// so no lifetime parameter is needed on this type. +/// This context holds the list of tasks and the fragment map that config functions can modify. #[derive(Debug, Clone, ProvidesStaticType, Trace, Display, NoSerialize, Allocative)] #[display("")] pub struct ConfigContext<'v> { #[allocative(skip)] tasks: values::Value<'v>, #[allocative(skip)] + fragment_map: values::Value<'v>, + #[allocative(skip)] config_modules: RefCell>, } impl<'v> ConfigContext<'v> { - /// Create a new ConfigContext with the given tasks. - pub fn new(tasks: Vec, heap: &'v Heap) -> Self { + /// Create a new ConfigContext with the given tasks and fragment map. + pub fn new( + tasks: Vec, + fragment_map: values::Value<'v>, + heap: &'v Heap, + ) -> Self { let tasks: Vec> = tasks .into_iter() .map(|task| task.alloc_value(heap)) .collect(); - let x = TaskListGen(RefCell::new(TaskList::new(tasks))); + let x = TaskListGen(RefCell::new(TaskList::new_with_fragment_map( + tasks, + fragment_map, + ))); Self { tasks: heap.alloc_complex_no_freeze(x), + fragment_map, config_modules: RefCell::new(vec![]), } } @@ -70,6 +78,17 @@ impl<'v> ConfigContext<'v> { .collect() } + /// Get task values for iteration (used during config evaluation). + pub fn task_values(&self) -> Vec> { + let list = self.tasks.downcast_ref::().unwrap(); + list.0.borrow().content.clone() + } + + /// Get the fragment map value. + pub fn fragment_map_value(&self) -> values::Value<'v> { + self.fragment_map + } + /// Add a config module for lifetime management. pub fn add_config_module(&self, module: FrozenModule) { self.config_modules.borrow_mut().push(module); @@ -140,4 +159,16 @@ pub(crate) fn config_context_methods(registry: &mut MethodsBuilder) { let this = this.downcast_ref_err::()?; Ok(ValueOfUnchecked::new(this.tasks)) } + + /// Access to the fragment map for configuring fragment instances. + /// + /// Usage: + /// ```starlark + /// ctx.fragments[BazelFragment].extra_flags = ["--config=ci"] + /// ``` + #[starlark(attribute)] + fn fragments<'v>(this: values::Value<'v>) -> starlark::Result> { + let ctx = this.downcast_ref_err::()?; + Ok(ctx.fragment_map) + } } diff --git a/crates/axl-runtime/src/engine/config/fragment_map.rs b/crates/axl-runtime/src/engine/config/fragment_map.rs new file mode 100644 index 000000000..11d474072 --- /dev/null +++ b/crates/axl-runtime/src/engine/config/fragment_map.rs @@ -0,0 +1,271 @@ +//! FragmentMap - A Starlark value that maps fragment type IDs to instances. + +use std::cell::RefCell; +use std::fmt::{self, Display, Write}; + +use allocative::Allocative; +use starlark::starlark_simple_value; +use starlark::values::{ + AllocValue, Freeze, FreezeError, Freezer, FrozenValue, Heap, NoSerialize, ProvidesStaticType, + StarlarkValue, Trace, Tracer, Value, ValueLike, starlark_value, +}; +use starlark_map::small_map::SmallMap; + +use crate::engine::types::fragment::{FragmentType, FrozenFragmentType, extract_fragment_type_id}; + +/// A Starlark value that maps fragment type IDs to their instances. +/// +/// Used as `ctx.fragments` in both ConfigContext and TaskContext. +/// Supports `ctx.fragments[FragType]` for reading and +/// `ctx.fragments[FragType] = FragType(...)` for writing. +#[derive(Debug, ProvidesStaticType, NoSerialize, Allocative)] +pub struct FragmentMap<'v> { + /// Map from fragment type id → (type_value, instance_value) + #[allocative(skip)] + entries: RefCell, Value<'v>)>>, +} + +impl<'v> Display for FragmentMap<'v> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "FragmentMap([")?; + let entries = self.entries.borrow(); + let mut first = true; + for (_, (type_val, _)) in entries.iter() { + if !first { + write!(f, ", ")?; + } + first = false; + write!(f, "{}", type_val)?; + } + write!(f, "])") + } +} + +unsafe impl<'v> Trace<'v> for FragmentMap<'v> { + fn trace(&mut self, tracer: &Tracer<'v>) { + let entries = self.entries.get_mut(); + for (_, (type_val, instance_val)) in entries.iter_mut() { + type_val.trace(tracer); + instance_val.trace(tracer); + } + } +} + +impl<'v> AllocValue<'v> for FragmentMap<'v> { + fn alloc_value(self, heap: &'v Heap) -> Value<'v> { + heap.alloc_complex(self) + } +} + +impl<'v> Freeze for FragmentMap<'v> { + type Frozen = FrozenFragmentMap; + + fn freeze(self, freezer: &Freezer) -> Result { + let entries = self.entries.into_inner(); + let mut frozen_entries = SmallMap::with_capacity(entries.len()); + for (id, (type_val, instance_val)) in entries.into_iter() { + frozen_entries.insert( + id, + (type_val.freeze(freezer)?, instance_val.freeze(freezer)?), + ); + } + Ok(FrozenFragmentMap { + entries: frozen_entries, + }) + } +} + +impl<'v> FragmentMap<'v> { + /// Create a new empty FragmentMap. + pub fn new() -> Self { + FragmentMap { + entries: RefCell::new(SmallMap::new()), + } + } + + /// Insert a fragment type and its default instance. + pub fn insert(&self, type_id: u64, type_value: Value<'v>, instance: Value<'v>) { + self.entries + .borrow_mut() + .insert(type_id, (type_value, instance)); + } + + /// Check if a fragment type is already present. + pub fn contains(&self, type_id: u64) -> bool { + self.entries.borrow().contains_key(&type_id) + } + + /// Get instance for a given type ID. + pub fn get_instance(&self, type_id: u64) -> Option> { + self.entries.borrow().get(&type_id).map(|(_, v)| *v) + } + + /// Get all entries as (type_id, type_value, instance_value) tuples. + pub fn entries(&self) -> Vec<(u64, Value<'v>, Value<'v>)> { + self.entries + .borrow() + .iter() + .map(|(id, (tv, iv))| (*id, *tv, *iv)) + .collect() + } + + /// Create a new FragmentMap containing only the given type IDs, + /// copying instance references from this map. + pub fn scoped(&self, type_ids: &[u64], heap: &'v Heap) -> Value<'v> { + let scoped = FragmentMap::new(); + let entries = self.entries.borrow(); + for id in type_ids { + if let Some((type_val, instance_val)) = entries.get(id) { + scoped + .entries + .borrow_mut() + .insert(*id, (*type_val, *instance_val)); + } + } + heap.alloc(scoped) + } +} + +#[starlark_value(type = "FragmentMap")] +impl<'v> StarlarkValue<'v> for FragmentMap<'v> { + fn collect_repr(&self, collector: &mut String) { + write!(collector, "{}", self).unwrap(); + } + + fn at(&self, index: Value<'v>, _heap: &'v Heap) -> starlark::Result> { + let type_id = extract_fragment_type_id(index).ok_or_else(|| { + starlark::Error::new_other(anyhow::anyhow!( + "FragmentMap key must be a fragment type, got '{}'", + index.get_type() + )) + })?; + + let entries = self.entries.borrow(); + match entries.get(&type_id) { + Some((_, instance)) => Ok(*instance), + None => { + let type_name = if let Some(ft) = index.downcast_ref::() { + ft.name.as_deref().unwrap_or("anon") + } else if let Some(ft) = index.downcast_ref::() { + ft.name.as_deref().unwrap_or("anon") + } else { + "unknown" + }; + Err(starlark::Error::new_other(anyhow::anyhow!( + "Fragment type '{}' not found in FragmentMap. Is it declared in a task's fragments list?", + type_name + ))) + } + } + } + + fn set_at(&self, index: Value<'v>, new_value: Value<'v>) -> starlark::Result<()> { + let type_id = extract_fragment_type_id(index).ok_or_else(|| { + starlark::Error::new_other(anyhow::anyhow!( + "FragmentMap key must be a fragment type, got '{}'", + index.get_type() + )) + })?; + + let mut entries = self.entries.borrow_mut(); + match entries.get_mut(&type_id) { + Some(entry) => { + entry.1 = new_value; + Ok(()) + } + None => { + // Auto-insert if not already present + entries.insert(type_id, (index, new_value)); + Ok(()) + } + } + } +} + +/// Frozen version of FragmentMap. Read-only after freezing. +#[derive(Debug, ProvidesStaticType, NoSerialize, Allocative)] +pub struct FrozenFragmentMap { + #[allocative(skip)] + entries: SmallMap, +} + +impl Display for FrozenFragmentMap { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "FragmentMap([")?; + let mut first = true; + for (_, (type_val, _)) in self.entries.iter() { + if !first { + write!(f, ", ")?; + } + first = false; + write!(f, "{}", type_val)?; + } + write!(f, "])") + } +} + +unsafe impl<'v> Trace<'v> for FrozenFragmentMap { + fn trace(&mut self, _tracer: &Tracer<'v>) { + // Frozen values don't need tracing + } +} + +starlark_simple_value!(FrozenFragmentMap); + +#[starlark_value(type = "FragmentMap")] +impl<'v> StarlarkValue<'v> for FrozenFragmentMap { + type Canonical = FragmentMap<'v>; + + fn collect_repr(&self, collector: &mut String) { + write!(collector, "{}", self).unwrap(); + } + + fn at(&self, index: Value<'v>, _heap: &'v Heap) -> starlark::Result> { + let type_id = extract_fragment_type_id(index).ok_or_else(|| { + starlark::Error::new_other(anyhow::anyhow!( + "FragmentMap key must be a fragment type, got '{}'", + index.get_type() + )) + })?; + + match self.entries.get(&type_id) { + Some((_, instance)) => Ok(instance.to_value()), + None => { + let type_name = if let Some(ft) = index.downcast_ref::() { + ft.name.as_deref().unwrap_or("anon") + } else if let Some(ft) = index.downcast_ref::() { + ft.name.as_deref().unwrap_or("anon") + } else { + "unknown" + }; + Err(starlark::Error::new_other(anyhow::anyhow!( + "Fragment type '{}' not found in FragmentMap. Is it declared in a task's fragments list?", + type_name + ))) + } + } + } +} + +/// Auto-construct fragment instances by calling each fragment type with no arguments +/// (using defaults from attr() definitions). +pub fn construct_fragments<'v>( + fragment_types: &[(u64, Value<'v>)], + eval: &mut starlark::eval::Evaluator<'v, '_, '_>, + _heap: &'v Heap, +) -> Result, crate::eval::EvalError> { + let map = FragmentMap::new(); + for (type_id, type_value) in fragment_types { + if !map.contains(*type_id) { + let instance = eval.eval_function(*type_value, &[], &[]).map_err(|e| { + crate::eval::EvalError::UnknownError(anyhow::anyhow!( + "Failed to construct default fragment instance for {}: {:?}", + type_value, + e + )) + })?; + map.insert(*type_id, *type_value, instance); + } + } + Ok(map) +} diff --git a/crates/axl-runtime/src/engine/config/mod.rs b/crates/axl-runtime/src/engine/config/mod.rs index a69a11908..770846c6a 100644 --- a/crates/axl-runtime/src/engine/config/mod.rs +++ b/crates/axl-runtime/src/engine/config/mod.rs @@ -1,6 +1,8 @@ mod context; +pub mod fragment_map; mod tasks; pub use context::ConfigContext; +pub use fragment_map::FragmentMap; pub use tasks::configured_task::ConfiguredTask; pub use tasks::frozen::freeze_value; diff --git a/crates/axl-runtime/src/engine/config/tasks/configured_task.rs b/crates/axl-runtime/src/engine/config/tasks/configured_task.rs index 1c2926133..359184fb2 100644 --- a/crates/axl-runtime/src/engine/config/tasks/configured_task.rs +++ b/crates/axl-runtime/src/engine/config/tasks/configured_task.rs @@ -1,7 +1,4 @@ -//! ConfiguredTask - A task with its configuration, using frozen values. -//! -//! This type uses `OwnedFrozenValue` to manage heap lifetimes automatically, -//! following Buck2's pattern for safe frozen value management. +//! ConfiguredTask - A task with its fragment type IDs. use std::cell::RefCell; use std::path::PathBuf; @@ -9,7 +6,6 @@ use std::path::PathBuf; use allocative::Allocative; use anyhow::anyhow; use derive_more::Display; -use starlark::environment::FrozenModule; use starlark::environment::Methods; use starlark::environment::MethodsBuilder; use starlark::environment::MethodsStatic; @@ -32,16 +28,14 @@ use crate::engine::task::FrozenTask; use crate::engine::task::TaskLike; use crate::eval::EvalError; -use super::frozen::freeze_value; - -/// A task bundled with its configuration, using frozen values for safe heap management. +/// A task bundled with its fragment type IDs. /// /// This type: /// - Has no lifetime parameter (easy to store and pass around) -/// - Uses `OwnedFrozenValue` to keep heaps alive automatically +/// - Uses `OwnedFrozenValue` for frozen values (task definition) +/// - Stores fragment type IDs for fragment map scoping /// - Is a `StarlarkValue` that config functions can modify via `set_attr` -/// - Can be created from a `FrozenModule` -#[derive(Debug, Clone, ProvidesStaticType, Display, NoSerialize, Allocative)] +#[derive(Debug, ProvidesStaticType, Display, NoSerialize, Allocative, Clone)] #[display("")] pub struct ConfiguredTask { /// The frozen task definition (contains implementation function) @@ -51,28 +45,24 @@ pub struct ConfiguredTask { pub name: RefCell, /// Task group (may be overridden by config) pub group: RefCell>, - /// Configured config value - #[allocative(skip)] - pub config: RefCell, + /// Fragment type IDs this task opts into + pub fragment_type_ids: Vec, /// Symbol name in the module pub symbol: String, /// Path to the .axl file pub path: PathBuf, } -// ConfiguredTask doesn't need tracing since it only contains frozen values unsafe impl Trace<'_> for ConfiguredTask { fn trace(&mut self, _tracer: &values::Tracer<'_>) { - // OwnedFrozenValue manages its own heap lifetime, no tracing needed + // OwnedFrozenValue manages its own lifetime. } } impl ConfiguredTask { /// Create a ConfiguredTask from a FrozenModule. - /// - /// Extracts the task definition and initial config from the frozen module. pub fn from_frozen_module( - frozen: &FrozenModule, + frozen: &starlark::environment::FrozenModule, symbol: &str, path: PathBuf, ) -> Result { @@ -94,18 +84,37 @@ impl ConfiguredTask { frozen_task.name.clone() }; let group = frozen_task.group.clone(); - let initial_config = OwnedFrozenValue::alloc(frozen_task.config); + let fragment_type_ids = frozen_task.fragment_type_ids(); Ok(ConfiguredTask { task_def, name: RefCell::new(name), group: RefCell::new(group), - config: RefCell::new(initial_config), + fragment_type_ids, symbol: symbol.to_string(), path, }) } + /// Create a ConfiguredTask with known fragment type IDs. + pub fn new_with_fragments( + task_def: OwnedFrozenValue, + name: String, + group: Vec, + fragment_type_ids: Vec, + symbol: String, + path: PathBuf, + ) -> Self { + ConfiguredTask { + task_def, + name: RefCell::new(name), + group: RefCell::new(group), + fragment_type_ids, + symbol, + path, + } + } + /// Get a reference to the underlying FrozenTask. pub fn as_frozen_task(&self) -> Option<&FrozenTask> { self.task_def.value().downcast_ref::() @@ -122,11 +131,6 @@ impl ConfiguredTask { Some(self.task_def.map(|_| task.implementation())) } - /// Get the current config value. - pub fn get_config(&self) -> OwnedFrozenValue { - self.config.borrow().clone() - } - /// Get the current name. pub fn get_name(&self) -> String { self.name.borrow().clone() @@ -150,12 +154,6 @@ impl<'v> values::StarlarkValue<'v> for ConfiguredTask { .ok_or_else(|| anyhow!("groups must be a list of strings"))?; self.group.replace(unpack.items); } - "config" => { - // Freeze the config value so it can be safely stored - let frozen = - freeze_value(value).map_err(|e| anyhow!("failed to freeze config: {:?}", e))?; - self.config.replace(frozen); - } _ => return ValueError::unsupported(self, &format!(".{}=", attribute)), }; Ok(()) @@ -165,14 +163,6 @@ impl<'v> values::StarlarkValue<'v> for ConfiguredTask { match attribute { "name" => Some(heap.alloc_str(&self.name.borrow()).to_value()), "group" => Some(heap.alloc(AllocList(self.group.borrow().iter()))), - "config" => { - // Return the frozen config value - let config = self.config.borrow(); - let value = config.value(); - // SAFETY: The OwnedFrozenValue keeps its heap alive, and we're - // returning a Value that will be used within this evaluation. - Some(unsafe { std::mem::transmute::, Value<'v>>(value) }) - } "symbol" => Some(heap.alloc_str(&self.symbol).to_value()), "path" => Some(heap.alloc_str(&self.path.to_string_lossy()).to_value()), _ => None, @@ -183,7 +173,6 @@ impl<'v> values::StarlarkValue<'v> for ConfiguredTask { vec![ "name".into(), "group".into(), - "config".into(), "symbol".into(), "path".into(), ] diff --git a/crates/axl-runtime/src/engine/config/tasks/value.rs b/crates/axl-runtime/src/engine/config/tasks/value.rs index dafc08a8b..77325e44f 100644 --- a/crates/axl-runtime/src/engine/config/tasks/value.rs +++ b/crates/axl-runtime/src/engine/config/tasks/value.rs @@ -16,7 +16,6 @@ use starlark::typing::Ty; use starlark::values::AllocValue; use starlark::values::Heap; use starlark::values::NoSerialize; -use starlark::values::OwnedFrozenValue; use starlark::values::StarlarkValue; use starlark::values::Trace; use starlark::values::Value; @@ -28,8 +27,10 @@ use starlark::values::type_repr::StarlarkTypeRepr; use super::configured_task::ConfiguredTask; use super::r#ref::TaskListMut; +use crate::engine::config::fragment_map::FragmentMap; use crate::engine::store::AxlStore; use crate::engine::task::{AsTaskLike, FrozenTask, Task, TaskLike}; +use crate::engine::types::fragment::extract_fragment_type_id; #[derive(Clone, Default, Trace, Debug, ProvidesStaticType, NoSerialize, Allocative)] pub(crate) struct TaskListGen(pub(crate) T); @@ -80,22 +81,44 @@ pub(crate) fn task_list_methods(registry: &mut MethodsBuilder) { .get(&symbol) .map_err(|e| anyhow::anyhow!("failed to get frozen task: {:?}", e))?; - // Get initial config from the frozen task + // Get fragment type IDs from the frozen task let frozen_task = task_def .value() .downcast_ref::() .ok_or_else(|| anyhow::anyhow!("expected FrozenTask after freeze"))?; - let initial_config = OwnedFrozenValue::alloc(frozen_task.config); + let fragment_type_ids = frozen_task.fragment_type_ids(); + + // Auto-register any new fragment types into the FragmentMap + if let Some(fmap_value) = this.aref.fragment_map { + if let Some(fmap) = fmap_value.downcast_ref::() { + for frag_fv in frozen_task.fragments() { + let frag_value = frag_fv.to_value(); + if let Some(id) = extract_fragment_type_id(frag_value) { + if !fmap.contains(id) { + // Auto-construct default instance by calling the fragment type with no args + let instance = eval.eval_function(frag_value, &[], &[]).map_err(|e| { + anyhow::anyhow!( + "Failed to construct default fragment instance for {}: {:?}", + frag_value, + e + ) + })?; + fmap.insert(id, frag_value, instance); + } + } + } + } + } - // Create ConfiguredTask with frozen values - let task_mut = ConfiguredTask { + // Create ConfiguredTask with fragment type IDs + let task_mut = ConfiguredTask::new_with_fragments( task_def, - name: RefCell::new(name), - group: RefCell::new(task_like.group().to_vec()), - config: RefCell::new(initial_config), + name, + task_like.group().to_vec(), + fragment_type_ids, symbol, - path: PathBuf::from(store.script_path.to_string_lossy().to_string()), - }; + PathBuf::from(store.script_path.to_string_lossy().to_string()), + ); this.aref.content.push(eval.heap().alloc(task_mut)); Ok(NoneType) @@ -123,14 +146,27 @@ pub(crate) type MutableTaskList<'v> = TaskListGen>>; /// Unfrozen TaskList #[derive(Clone, Trace, Debug, ProvidesStaticType, Allocative)] -#[repr(transparent)] pub struct TaskList<'v> { pub(crate) content: Vec>, + /// Optional reference to the FragmentMap for auto-registering fragments + /// when tasks are added dynamically via ctx.tasks.add(). + #[allocative(skip)] + pub(crate) fragment_map: Option>, } impl<'v> TaskList<'v> { pub fn new(content: Vec>) -> Self { - TaskList { content } + TaskList { + content, + fragment_map: None, + } + } + + pub fn new_with_fragment_map(content: Vec>, fragment_map: Value<'v>) -> Self { + TaskList { + content, + fragment_map: Some(fragment_map), + } } } diff --git a/crates/axl-runtime/src/engine/mod.rs b/crates/axl-runtime/src/engine/mod.rs index ae9f398bb..836b774f3 100644 --- a/crates/axl-runtime/src/engine/mod.rs +++ b/crates/axl-runtime/src/engine/mod.rs @@ -9,7 +9,7 @@ mod globals; mod http; mod std; mod template; -mod types; +pub mod types; mod wasm; pub mod r#async; @@ -40,6 +40,7 @@ pub fn register_globals(globals: &mut GlobalsBuilder) { globals::register_globals(globals); r#async::register_globals(globals); task::register_globals(globals); + types::fragment::register_globals(globals); globals.namespace("args", task_arg::register_globals); globals.namespace("bazel", bazel::register_globals); diff --git a/crates/axl-runtime/src/engine/task.rs b/crates/axl-runtime/src/engine/task.rs index 6f1bba3ac..9eb33e81d 100644 --- a/crates/axl-runtime/src/engine/task.rs +++ b/crates/axl-runtime/src/engine/task.rs @@ -1,4 +1,5 @@ use crate::engine::task_context::TaskContext; +use crate::engine::types::fragment::{FragmentType, FrozenFragmentType, extract_fragment_type_id}; use super::task_arg::TaskArg; use allocative::Allocative; @@ -15,8 +16,8 @@ use starlark::values::ProvidesStaticType; use starlark::values::StarlarkValue; use starlark::values::Trace; use starlark::values::Value; +use starlark::values::ValueLike; use starlark::values::list::UnpackList; -use starlark::values::none::NoneOr; use starlark::values::none::NoneType; use starlark::values::starlark_value; use starlark::values::typing::StarlarkCallableParamSpec; @@ -52,7 +53,7 @@ pub struct Task<'v> { pub(super) description: String, pub(super) group: Vec, pub(super) name: String, - pub(super) config: values::Value<'v>, + pub(super) fragments: Vec>, } impl<'v> Task<'v> { @@ -71,8 +72,8 @@ impl<'v> Task<'v> { pub fn name(&self) -> &String { &self.name } - pub fn config(&self) -> values::Value<'v> { - self.config + pub fn fragments(&self) -> &[values::Value<'v>] { + &self.fragments } } @@ -104,14 +105,15 @@ impl<'v> values::Freeze for Task<'v> { type Frozen = FrozenTask; fn freeze(self, freezer: &values::Freezer) -> values::FreezeResult { let frozen_impl = self.r#impl.freeze(freezer)?; - let frozen_config = self.config.freeze(freezer)?; + let frozen_fragments: Result, _> = + self.fragments.iter().map(|f| f.freeze(freezer)).collect(); Ok(FrozenTask { args: self.args, r#impl: frozen_impl, description: self.description, group: self.group, name: self.name, - config: frozen_config, + fragments: frozen_fragments?, }) } } @@ -125,7 +127,7 @@ pub struct FrozenTask { pub(super) description: String, pub(super) group: Vec, pub(super) name: String, - pub(super) config: values::FrozenValue, + pub(super) fragments: Vec, } starlark_simple_value!(FrozenTask); @@ -139,8 +141,15 @@ impl FrozenTask { pub fn implementation(&self) -> values::FrozenValue { self.r#impl } - pub fn config(&self) -> values::FrozenValue { - self.config + pub fn fragments(&self) -> &[values::FrozenValue] { + &self.fragments + } + /// Get fragment type IDs this task opts into. + pub fn fragment_type_ids(&self) -> Vec { + self.fragments + .iter() + .filter_map(|f| extract_fragment_type_id(f.to_value())) + .collect() } } @@ -190,7 +199,7 @@ pub fn register_globals(globals: &mut GlobalsBuilder) { /// task_args = { /// "target": args.string(), /// }, - /// config = None # Optional user-defined config (e.g., a record); defaults to None if not provided + /// fragments = [BazelFragment] # Optional list of fragment types /// ) /// ``` fn task<'v>( @@ -203,7 +212,9 @@ pub fn register_globals(globals: &mut GlobalsBuilder) { #[starlark(require = named, default = String::new())] description: String, #[starlark(require = named, default = UnpackList::default())] group: UnpackList, #[starlark(require = named, default = String::new())] name: String, - #[starlark(require = named, default = NoneOr::None)] config: NoneOr>, + #[starlark(require = named, default = UnpackList::default())] fragments: UnpackList< + Value<'v>, + >, ) -> starlark::Result> { if group.items.len() > MAX_TASK_GROUPS { return Err(anyhow::anyhow!( @@ -216,13 +227,28 @@ pub fn register_globals(globals: &mut GlobalsBuilder) { for (arg, def) in args.entries { args_.insert(arg.to_owned(), def.clone()); } + + // Validate each element is a FragmentType or FrozenFragmentType + let all_fragments = fragments.items; + for frag in &all_fragments { + if frag.downcast_ref::().is_none() + && frag.downcast_ref::().is_none() + { + return Err(anyhow::anyhow!( + "fragments list must contain fragment types, got '{}'", + frag.get_type() + ) + .into()); + } + } + Ok(Task { args: args_, r#impl: implementation.0, description, group: group.items, name, - config: config.into_option().unwrap_or(values::Value::new_none()), + fragments: all_fragments, }) } } diff --git a/crates/axl-runtime/src/engine/task_context.rs b/crates/axl-runtime/src/engine/task_context.rs index 017baf3f8..c1f1a638c 100644 --- a/crates/axl-runtime/src/engine/task_context.rs +++ b/crates/axl-runtime/src/engine/task_context.rs @@ -26,14 +26,18 @@ use super::wasm::Wasm; #[display("")] pub struct TaskContext<'v> { pub args: TaskArgs<'v>, - pub config: values::Value<'v>, + pub fragments: values::Value<'v>, #[trace(unsafe_ignore)] pub task: TaskInfo, } impl<'v> TaskContext<'v> { - pub fn new(args: TaskArgs<'v>, config: values::Value<'v>, task: TaskInfo) -> Self { - Self { args, config, task } + pub fn new(args: TaskArgs<'v>, fragments: values::Value<'v>, task: TaskInfo) -> Self { + Self { + args, + fragments, + task, + } } } @@ -60,7 +64,7 @@ impl<'v> values::Freeze for TaskContext<'v> { Ok(FrozenTaskContext { args: args_value, - config: self.config.freeze(freezer)?, + fragments: self.fragments.freeze(freezer)?, task: self.task, }) } @@ -89,11 +93,11 @@ pub(crate) fn task_context_methods(registry: &mut MethodsBuilder) { Ok(ctx.args.clone()) } - /// Access to the task configuration. + /// Access to the fragment map for reading configured fragment values. #[starlark(attribute)] - fn config<'v>(this: values::Value<'v>) -> starlark::Result> { + fn fragments<'v>(this: values::Value<'v>) -> starlark::Result> { let ctx = this.downcast_ref_err::()?; - Ok(ctx.config) + Ok(ctx.fragments) } /// Expand template files. @@ -131,7 +135,7 @@ pub struct FrozenTaskContext { #[allocative(skip)] args: values::FrozenValue, #[allocative(skip)] - config: values::FrozenValue, + fragments: values::FrozenValue, task: TaskInfo, } @@ -167,9 +171,9 @@ fn frozen_task_context_methods(registry: &mut MethodsBuilder) { } #[starlark(attribute)] - fn config<'v>(this: values::Value<'v>) -> starlark::Result> { + fn fragments<'v>(this: values::Value<'v>) -> starlark::Result> { let ctx = this.downcast_ref_err::()?; - Ok(ctx.config.to_value()) + Ok(ctx.fragments.to_value()) } #[starlark(attribute)] diff --git a/crates/axl-runtime/src/engine/types/fragment.rs b/crates/axl-runtime/src/engine/types/fragment.rs new file mode 100644 index 000000000..bf4e9c670 --- /dev/null +++ b/crates/axl-runtime/src/engine/types/fragment.rs @@ -0,0 +1,928 @@ +use std::cell::Cell; +use std::fmt::{self, Display, Write}; +use std::sync::atomic::{AtomicU64, Ordering}; + +use allocative::Allocative; +use dupe::Dupe; +use starlark::environment::{GlobalsBuilder, Methods, MethodsBuilder, MethodsStatic}; +use starlark::starlark_module; +use starlark::values::dict::AllocDict; +use starlark::values::list::AllocList; +use starlark::values::typing::TypeCompiled; +use starlark::values::{ + AllocFrozenValue, AllocValue, Freeze, FreezeError, Freezer, FrozenHeap, FrozenValue, Heap, + NoSerialize, ProvidesStaticType, StarlarkValue, Trace, Tracer, Value, ValueLike, + starlark_value, +}; +use starlark_map::small_map::SmallMap; + +static FRAGMENT_TYPE_ID: AtomicU64 = AtomicU64::new(0); + +fn next_fragment_type_id() -> u64 { + FRAGMENT_TYPE_ID.fetch_add(1, Ordering::SeqCst) +} + +// ----------------------------------------------------------------------------- +// Field +// ----------------------------------------------------------------------------- + +/// A field definition for a fragment, containing a type and optional default value. +#[derive(Debug, Clone, ProvidesStaticType, Allocative)] +pub struct Field<'v> { + pub(crate) typ: TypeCompiled>, + pub(crate) typ_value: Value<'v>, + pub(crate) default: Option>, +} + +impl<'v> Display for Field<'v> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.default { + None => write!(f, "field({})", self.typ), + Some(d) => write!(f, "field({}, {})", self.typ, d), + } + } +} + +unsafe impl<'v> Trace<'v> for Field<'v> { + fn trace(&mut self, tracer: &Tracer<'v>) { + self.typ.trace(tracer); + self.typ_value.trace(tracer); + if let Some(ref mut d) = self.default { + d.trace(tracer); + } + } +} + +impl<'v> Field<'v> { + pub fn freeze(self, freezer: &Freezer) -> Result { + Ok(FrozenField { + typ: self.typ.freeze(freezer)?, + typ_value: self.typ_value.freeze(freezer)?, + default: self.default.map(|d| d.freeze(freezer)).transpose()?, + }) + } +} + +/// A frozen field definition. +#[derive(Debug, Clone, ProvidesStaticType, Allocative)] +pub struct FrozenField { + pub(crate) typ: TypeCompiled, + pub(crate) typ_value: FrozenValue, + pub(crate) default: Option, +} + +impl Display for FrozenField { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.default { + None => write!(f, "field({})", self.typ), + Some(d) => write!(f, "field({}, {})", self.typ, d), + } + } +} + +// ----------------------------------------------------------------------------- +// FieldValue - a wrapper for field() function return +// ----------------------------------------------------------------------------- + +#[derive(Debug, ProvidesStaticType, NoSerialize, Allocative)] +pub struct FieldValue<'v> { + pub(crate) typ: TypeCompiled>, + pub(crate) typ_value: Value<'v>, + pub(crate) default: Option>, +} + +impl<'v> Display for FieldValue<'v> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.default { + None => write!(f, "field({})", self.typ), + Some(d) => write!(f, "field({}, {})", self.typ, d), + } + } +} + +unsafe impl<'v> Trace<'v> for FieldValue<'v> { + fn trace(&mut self, tracer: &Tracer<'v>) { + self.typ.trace(tracer); + self.typ_value.trace(tracer); + if let Some(ref mut d) = self.default { + d.trace(tracer); + } + } +} + +impl<'v> AllocValue<'v> for FieldValue<'v> { + fn alloc_value(self, heap: &'v Heap) -> Value<'v> { + heap.alloc_complex(self) + } +} + +#[starlark_value(type = "field")] +impl<'v> StarlarkValue<'v> for FieldValue<'v> { + fn collect_repr(&self, collector: &mut String) { + write!(collector, "{}", self).unwrap(); + } +} + +/// Frozen version of FieldValue. +#[derive(Debug, ProvidesStaticType, NoSerialize, Allocative)] +pub struct FrozenFieldValue { + pub(crate) typ: TypeCompiled, + pub(crate) typ_value: FrozenValue, + pub(crate) default: Option, +} + +impl Display for FrozenFieldValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.default { + None => write!(f, "field({})", self.typ), + Some(d) => write!(f, "field({}, {})", self.typ, d), + } + } +} + +unsafe impl<'v> Trace<'v> for FrozenFieldValue { + fn trace(&mut self, _tracer: &Tracer<'v>) { + // Frozen values don't need tracing + } +} + +impl AllocFrozenValue for FrozenFieldValue { + fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue { + heap.alloc_simple(self) + } +} + +#[starlark_value(type = "field")] +impl<'v> StarlarkValue<'v> for FrozenFieldValue { + type Canonical = FieldValue<'v>; + + fn collect_repr(&self, collector: &mut String) { + write!(collector, "{}", self).unwrap(); + } +} + +impl Freeze for FieldValue<'_> { + type Frozen = FrozenFieldValue; + + fn freeze(self, freezer: &Freezer) -> Result { + Ok(FrozenFieldValue { + typ: self.typ.freeze(freezer)?, + typ_value: self.typ_value.freeze(freezer)?, + default: self.default.map(|d| d.freeze(freezer)).transpose()?, + }) + } +} + +/// Deep-copy a default value if it's a mutable container (list or dict). +/// This ensures each fragment instance gets its own mutable copy rather than +/// sharing the (potentially frozen) default. +fn copy_default_value<'v>(value: Value<'v>, heap: &'v Heap) -> starlark::Result> { + match value.get_type() { + "list" => { + let items: Vec> = value.iterate(heap)?.collect(); + Ok(heap.alloc(AllocList(items))) + } + "dict" => { + let keys: Vec> = value.iterate(heap)?.collect(); + let items: Vec<(Value<'v>, Value<'v>)> = keys + .into_iter() + .map(|k| { + let v = value.at(k, heap)?; + Ok((k, v)) + }) + .collect::>()?; + Ok(heap.alloc(AllocDict(items))) + } + _ => Ok(value), + } +} + +/// Create fresh TypeCompiled values from field type values at runtime. +/// This ensures type checking works correctly for types like starlark Records +/// whose frozen TypeCompiled matchers may not function properly. +fn build_type_checkers<'v>( + fields: impl Iterator>, + heap: &'v Heap, +) -> starlark::Result>>> { + fields + .map(|typ_value| TypeCompiled::new(typ_value, heap).map_err(starlark::Error::new_other)) + .collect() +} + +// ----------------------------------------------------------------------------- +// FragmentType +// ----------------------------------------------------------------------------- + +/// The type of a fragment, created by `fragment(field1=type1, field2=type2, ...)`. +/// Calling this type creates a `FragmentInstance` instance. +#[derive(Debug, ProvidesStaticType, NoSerialize, Allocative)] +pub struct FragmentType<'v> { + /// Unique identifier for this fragment type + pub(crate) id: u64, + /// Name of the fragment type (set when assigned to a variable) + pub(crate) name: Option, + /// Fields with their types and optional defaults + pub(crate) fields: SmallMap>, +} + +impl<'v> Display for FragmentType<'v> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.name { + Some(name) => write!(f, "fragment[{}]", name), + None => write!(f, "fragment[anon]"), + } + } +} + +unsafe impl<'v> Trace<'v> for FragmentType<'v> { + fn trace(&mut self, tracer: &Tracer<'v>) { + for (_, field) in self.fields.iter_mut() { + field.trace(tracer); + } + } +} + +impl<'v> AllocValue<'v> for FragmentType<'v> { + fn alloc_value(self, heap: &'v Heap) -> Value<'v> { + heap.alloc_complex(self) + } +} + +#[starlark_value(type = "fragment")] +impl<'v> StarlarkValue<'v> for FragmentType<'v> { + fn collect_repr(&self, collector: &mut String) { + write!(collector, "{}", self).unwrap(); + } + + fn export_as( + &self, + variable_name: &str, + _eval: &mut starlark::eval::Evaluator<'v, '_, '_>, + ) -> starlark::Result<()> { + // This is called when the fragment type is assigned to a variable. + // We use unsafe to mutate the name, which is safe because this is only + // called during module loading. + let this = self as *const Self as *mut Self; + unsafe { + (*this).name = Some(variable_name.to_string()); + } + Ok(()) + } + + fn invoke( + &self, + _me: Value<'v>, + args: &starlark::eval::Arguments<'v, '_>, + eval: &mut starlark::eval::Evaluator<'v, '_, '_>, + ) -> starlark::Result> { + // Build fresh type checkers from the original type values + let type_checkers = + build_type_checkers(self.fields.values().map(|f| f.typ_value), eval.heap())?; + + // Parse the arguments according to our field definitions + let mut values: Vec>> = Vec::with_capacity(self.fields.len()); + + // Get the named arguments + args.no_positional_args(eval.heap())?; + let kwargs = args.names_map()?; + + // Build values in field order + for ((field_name, field), tc) in self.fields.iter().zip(type_checkers.iter()) { + let value = if let Some(v) = kwargs.get(field_name.as_str()) { + *v + } else if let Some(default) = field.default { + copy_default_value(default, eval.heap())? + } else { + return Err(starlark::Error::new_other(anyhow::anyhow!( + "Missing required field `{}` for {}", + field_name, + self + ))); + }; + + // Type check the value using the fresh TypeCompiled + if !tc.matches(value) { + return Err(starlark::Error::new_other(anyhow::anyhow!( + "Field `{}` expected type `{}`, got `{}`", + field_name, + tc, + value.get_type() + ))); + } + + values.push(Cell::new(value)); + } + + // Check for unexpected kwargs + for (name, _) in kwargs.iter() { + if !self.fields.contains_key(name.as_str()) { + return Err(starlark::Error::new_other(anyhow::anyhow!( + "Unexpected field `{}` for {}", + name, + self + ))); + } + } + + let instance = FragmentInstance { + typ: _me, + values: values.into_boxed_slice(), + type_checkers: type_checkers.into_boxed_slice(), + }; + Ok(eval.heap().alloc(instance)) + } + + fn get_methods() -> Option<&'static Methods> { + static RES: MethodsStatic = MethodsStatic::new(); + RES.methods(fragment_type_methods) + } +} + +#[starlark_module] +fn fragment_type_methods(_builder: &mut MethodsBuilder) {} + +// ----------------------------------------------------------------------------- +// FrozenFragmentType +// ----------------------------------------------------------------------------- + +/// Frozen version of FragmentType. +#[derive(Debug, ProvidesStaticType, NoSerialize, Allocative)] +pub struct FrozenFragmentType { + pub(crate) id: u64, + pub(crate) name: Option, + pub(crate) fields: SmallMap, +} + +impl Display for FrozenFragmentType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.name { + Some(name) => write!(f, "fragment[{}]", name), + None => write!(f, "fragment[anon]"), + } + } +} + +unsafe impl<'v> Trace<'v> for FrozenFragmentType { + fn trace(&mut self, _tracer: &Tracer<'v>) { + // Frozen values don't need tracing + } +} + +impl AllocFrozenValue for FrozenFragmentType { + fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue { + heap.alloc_simple(self) + } +} + +#[starlark_value(type = "fragment")] +impl<'v> StarlarkValue<'v> for FrozenFragmentType { + type Canonical = FragmentType<'v>; + + fn collect_repr(&self, collector: &mut String) { + write!(collector, "{}", self).unwrap(); + } + + fn invoke( + &self, + _me: Value<'v>, + args: &starlark::eval::Arguments<'v, '_>, + eval: &mut starlark::eval::Evaluator<'v, '_, '_>, + ) -> starlark::Result> { + // Build fresh type checkers from the original type values + let type_checkers = build_type_checkers( + self.fields.values().map(|f| f.typ_value.to_value()), + eval.heap(), + )?; + + let mut values: Vec>> = Vec::with_capacity(self.fields.len()); + + args.no_positional_args(eval.heap())?; + let kwargs = args.names_map()?; + + for ((field_name, field), tc) in self.fields.iter().zip(type_checkers.iter()) { + let value = if let Some(v) = kwargs.get(field_name.as_str()) { + *v + } else if let Some(default) = field.default { + copy_default_value(default.to_value(), eval.heap())? + } else { + return Err(starlark::Error::new_other(anyhow::anyhow!( + "Missing required field `{}` for {}", + field_name, + self + ))); + }; + + // Type check using the fresh TypeCompiled + if !tc.matches(value) { + return Err(starlark::Error::new_other(anyhow::anyhow!( + "Field `{}` expected type `{}`, got `{}`", + field_name, + tc, + value.get_type() + ))); + } + + values.push(Cell::new(value)); + } + + for (name, _) in kwargs.iter() { + if !self.fields.contains_key(name.as_str()) { + return Err(starlark::Error::new_other(anyhow::anyhow!( + "Unexpected field `{}` for {}", + name, + self + ))); + } + } + + let instance = FragmentInstance { + typ: _me, + values: values.into_boxed_slice(), + type_checkers: type_checkers.into_boxed_slice(), + }; + Ok(eval.heap().alloc(instance)) + } + + fn get_methods() -> Option<&'static Methods> { + static RES: MethodsStatic = MethodsStatic::new(); + RES.methods(fragment_type_methods) + } +} + +impl Freeze for FragmentType<'_> { + type Frozen = FrozenFragmentType; + + fn freeze(self, freezer: &Freezer) -> Result { + let mut frozen_fields = SmallMap::with_capacity(self.fields.len()); + for (name, field) in self.fields.into_iter() { + frozen_fields.insert(name, field.freeze(freezer)?); + } + Ok(FrozenFragmentType { + id: self.id, + name: self.name, + fields: frozen_fields, + }) + } +} + +// ----------------------------------------------------------------------------- +// FragmentInstance +// ----------------------------------------------------------------------------- + +/// An instance of a fragment type, containing field values. +#[derive(Debug, ProvidesStaticType, NoSerialize, Allocative)] +pub struct FragmentInstance<'v> { + /// The fragment type this instance belongs to + pub(crate) typ: Value<'v>, + /// Field values in the same order as the type's field definitions (mutable via Cell) + #[allocative(skip)] + pub(crate) values: Box<[Cell>]>, + /// Fresh type checkers created at construction time for runtime type checking. + /// These are re-derived from the field type values to avoid issues with frozen TypeCompiled. + #[allocative(skip)] + pub(crate) type_checkers: Box<[TypeCompiled>]>, +} + +impl<'v> Display for FragmentInstance<'v> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}(", self.typ)?; + if let Some(frag_type) = self.typ.downcast_ref::() { + let mut first = true; + for ((name, _), value) in frag_type.fields.iter().zip(self.values.iter()) { + if !first { + write!(f, ", ")?; + } + first = false; + write!(f, "{}={}", name, value.get())?; + } + } else if let Some(frozen_type) = self.typ.downcast_ref::() { + let mut first = true; + for ((name, _), value) in frozen_type.fields.iter().zip(self.values.iter()) { + if !first { + write!(f, ", ")?; + } + first = false; + write!(f, "{}={}", name, value.get())?; + } + } + write!(f, ")") + } +} + +unsafe impl<'v> Trace<'v> for FragmentInstance<'v> { + fn trace(&mut self, tracer: &Tracer<'v>) { + self.typ.trace(tracer); + for cell in self.values.iter() { + let mut v = cell.get(); + v.trace(tracer); + cell.set(v); + } + for tc in self.type_checkers.iter_mut() { + tc.trace(tracer); + } + } +} + +impl<'v> AllocValue<'v> for FragmentInstance<'v> { + fn alloc_value(self, heap: &'v Heap) -> Value<'v> { + heap.alloc_complex(self) + } +} + +impl<'v> FragmentInstance<'v> { + fn get_field_names(&self) -> Vec<&str> { + if let Some(frag_type) = self.typ.downcast_ref::() { + frag_type.fields.keys().map(|s| s.as_str()).collect() + } else if let Some(frozen_type) = self.typ.downcast_ref::() { + frozen_type.fields.keys().map(|s| s.as_str()).collect() + } else { + vec![] + } + } +} + +#[starlark_value(type = "fragment")] +impl<'v> StarlarkValue<'v> for FragmentInstance<'v> { + fn collect_repr(&self, collector: &mut String) { + write!(collector, "{}", self).unwrap(); + } + + fn get_attr(&self, attribute: &str, _heap: &'v Heap) -> Option> { + if let Some(frag_type) = self.typ.downcast_ref::() { + if let Some(idx) = frag_type.fields.get_index_of(attribute) { + return Some(self.values[idx].get()); + } + } else if let Some(frozen_type) = self.typ.downcast_ref::() { + if let Some(idx) = frozen_type.fields.get_index_of(attribute) { + return Some(self.values[idx].get()); + } + } + None + } + + fn set_attr(&self, attribute: &str, value: Value<'v>) -> starlark::Result<()> { + // Get field index + let idx = if let Some(frag_type) = self.typ.downcast_ref::() { + frag_type.fields.get_index_of(attribute) + } else if let Some(frozen_type) = self.typ.downcast_ref::() { + frozen_type.fields.get_index_of(attribute) + } else { + return Err(starlark::Error::new_other(anyhow::anyhow!( + "Invalid fragment type" + ))); + }; + + let idx = match idx { + Some(idx) => idx, + None => { + return Err(starlark::Error::new_other(anyhow::anyhow!( + "Fragment {} has no field `{}`", + self.typ, + attribute + ))); + } + }; + + // Type check using the fresh type checker created at construction time + let tc = &self.type_checkers[idx]; + if !tc.matches(value) { + return Err(starlark::Error::new_other(anyhow::anyhow!( + "Field `{}` expected type `{}`, got `{}`", + attribute, + tc, + value.get_type() + ))); + } + + // Set the value + self.values[idx].set(value); + Ok(()) + } + + fn has_attr(&self, attribute: &str, _heap: &'v Heap) -> bool { + if let Some(frag_type) = self.typ.downcast_ref::() { + frag_type.fields.contains_key(attribute) + } else if let Some(frozen_type) = self.typ.downcast_ref::() { + frozen_type.fields.contains_key(attribute) + } else { + false + } + } + + fn dir_attr(&self) -> Vec { + self.get_field_names() + .into_iter() + .map(|s| s.to_string()) + .collect() + } + + fn equals(&self, other: Value<'v>) -> starlark::Result { + if let Some(other_instance) = other.downcast_ref::() { + // Check that they have the same fragment type + let self_id = self + .typ + .downcast_ref::() + .map(|t| t.id) + .or_else(|| self.typ.downcast_ref::().map(|t| t.id)); + let other_id = other_instance + .typ + .downcast_ref::() + .map(|t| t.id) + .or_else(|| { + other_instance + .typ + .downcast_ref::() + .map(|t| t.id) + }); + + if self_id != other_id { + return Ok(false); + } + + // Compare all values + if self.values.len() != other_instance.values.len() { + return Ok(false); + } + for (a, b) in self.values.iter().zip(other_instance.values.iter()) { + if !a.get().equals(b.get())? { + return Ok(false); + } + } + Ok(true) + } else if let Some(other_frozen) = other.downcast_ref::() { + let self_id = self + .typ + .downcast_ref::() + .map(|t| t.id) + .or_else(|| self.typ.downcast_ref::().map(|t| t.id)); + let other_id = other_frozen + .typ + .downcast_ref::() + .map(|t| t.id); + + if self_id != other_id { + return Ok(false); + } + + if self.values.len() != other_frozen.values.len() { + return Ok(false); + } + for (a, b) in self.values.iter().zip(other_frozen.values.iter()) { + if !a.get().equals(b.to_value())? { + return Ok(false); + } + } + Ok(true) + } else { + Ok(false) + } + } +} + +// ----------------------------------------------------------------------------- +// FrozenFragmentInstance +// ----------------------------------------------------------------------------- + +/// Frozen version of FragmentInstance. +#[derive(Debug, ProvidesStaticType, NoSerialize, Allocative)] +pub struct FrozenFragmentInstance { + pub(crate) typ: FrozenValue, + pub(crate) values: Box<[FrozenValue]>, +} + +impl Display for FrozenFragmentInstance { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}(", self.typ)?; + if let Some(frozen_type) = self.typ.downcast_ref::() { + let mut first = true; + for ((name, _), value) in frozen_type.fields.iter().zip(self.values.iter()) { + if !first { + write!(f, ", ")?; + } + first = false; + write!(f, "{}={}", name, value)?; + } + } + write!(f, ")") + } +} + +unsafe impl<'v> Trace<'v> for FrozenFragmentInstance { + fn trace(&mut self, _tracer: &Tracer<'v>) { + // Frozen values don't need tracing + } +} + +impl AllocFrozenValue for FrozenFragmentInstance { + fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue { + heap.alloc_simple(self) + } +} + +#[starlark_value(type = "fragment")] +impl<'v> StarlarkValue<'v> for FrozenFragmentInstance { + type Canonical = FragmentInstance<'v>; + + fn collect_repr(&self, collector: &mut String) { + write!(collector, "{}", self).unwrap(); + } + + fn get_attr(&self, attribute: &str, _heap: &'v Heap) -> Option> { + if let Some(frozen_type) = self.typ.downcast_ref::() { + if let Some(idx) = frozen_type.fields.get_index_of(attribute) { + return Some(self.values[idx].to_value()); + } + } + None + } + + fn has_attr(&self, attribute: &str, _heap: &'v Heap) -> bool { + if let Some(frozen_type) = self.typ.downcast_ref::() { + frozen_type.fields.contains_key(attribute) + } else { + false + } + } + + fn dir_attr(&self) -> Vec { + if let Some(frozen_type) = self.typ.downcast_ref::() { + frozen_type.fields.keys().map(|s| s.to_string()).collect() + } else { + vec![] + } + } + + fn equals(&self, other: Value<'v>) -> starlark::Result { + if let Some(other_frozen) = other.downcast_ref::() { + let self_id = self.typ.downcast_ref::().map(|t| t.id); + let other_id = other_frozen + .typ + .downcast_ref::() + .map(|t| t.id); + + if self_id != other_id { + return Ok(false); + } + + if self.values.len() != other_frozen.values.len() { + return Ok(false); + } + for (a, b) in self.values.iter().zip(other_frozen.values.iter()) { + if !a.to_value().equals(b.to_value())? { + return Ok(false); + } + } + Ok(true) + } else if let Some(other_instance) = other.downcast_ref::() { + let self_id = self.typ.downcast_ref::().map(|t| t.id); + let other_id = other_instance + .typ + .downcast_ref::() + .map(|t| t.id) + .or_else(|| { + other_instance + .typ + .downcast_ref::() + .map(|t| t.id) + }); + + if self_id != other_id { + return Ok(false); + } + + if self.values.len() != other_instance.values.len() { + return Ok(false); + } + for (a, b) in self.values.iter().zip(other_instance.values.iter()) { + if !a.to_value().equals(b.get())? { + return Ok(false); + } + } + Ok(true) + } else { + Ok(false) + } + } +} + +impl Freeze for FragmentInstance<'_> { + type Frozen = FrozenFragmentInstance; + + fn freeze(self, freezer: &Freezer) -> Result { + let typ = self.typ.freeze(freezer)?; + let values: Result, _> = self + .values + .iter() + .map(|v| v.get().freeze(freezer)) + .collect(); + Ok(FrozenFragmentInstance { + typ, + values: values?.into_boxed_slice(), + }) + } +} + +// ----------------------------------------------------------------------------- +// Helper: extract fragment type ID from a Value +// ----------------------------------------------------------------------------- + +/// Extract the fragment type ID from a Value that is either a FragmentType or FrozenFragmentType. +pub fn extract_fragment_type_id(value: Value) -> Option { + if let Some(ft) = value.downcast_ref::() { + Some(ft.id) + } else if let Some(ft) = value.downcast_ref::() { + Some(ft.id) + } else { + None + } +} + +// ----------------------------------------------------------------------------- +// Global functions +// ----------------------------------------------------------------------------- + +#[starlark_module] +pub fn register_globals(globals: &mut GlobalsBuilder) { + /// Creates a fragment type with the given fields. + /// + /// Each field can be a bare type (required, no default) or an `attr()` + /// definition (with type and optional default). + /// + /// Mutable defaults (lists, dicts) are deep-copied per instance, so each + /// instance gets its own independent copy. No `default_factory` needed. + /// + /// Example: + /// ```starlark + /// BazelFragment = fragment( + /// extra_flags = attr(list[str], []), + /// extra_startup_flags = attr(list[str], []), + /// ) + /// ``` + fn fragment<'v>( + #[starlark(kwargs)] kwargs: SmallMap<&str, Value<'v>>, + eval: &mut starlark::eval::Evaluator<'v, '_, '_>, + ) -> starlark::Result> { + let mut fields = SmallMap::with_capacity(kwargs.len()); + + for (name, value) in kwargs.into_iter() { + let field = if let Some(field_value) = value.downcast_ref::() { + // It's already a field() definition + Field { + typ: field_value.typ.dupe(), + typ_value: field_value.typ_value, + default: field_value.default, + } + } else { + // It's a type, convert to a field without default + let typ = TypeCompiled::new(value, eval.heap())?; + Field { + typ, + typ_value: value, + default: None, + } + }; + fields.insert(name.to_string(), field); + } + + Ok(FragmentType { + id: next_fragment_type_id(), + name: None, + fields, + }) + } + + /// Creates a field definition with a type and optional default value. + /// + /// Mutable defaults (lists, dicts) are deep-copied when a fragment instance is + /// created, so each instance gets its own independent copy. + /// + /// Example: + /// ```starlark + /// BazelFragment = fragment(host=str, port=attr(int, 80)) + /// r = BazelFragment(host="localhost") # port defaults to 80 + /// ``` + fn attr<'v>( + #[starlark(require = pos)] typ: Value<'v>, + #[starlark(require = pos)] default: Option>, + eval: &mut starlark::eval::Evaluator<'v, '_, '_>, + ) -> starlark::Result> { + let compiled_type = TypeCompiled::new(typ, eval.heap())?; + + // Validate that the default matches the type if provided + if let Some(d) = default { + if !compiled_type.matches(d) { + return Err(starlark::Error::new_other(anyhow::anyhow!( + "Default value `{}` does not match field type `{}`", + d, + compiled_type + ))); + } + } + + Ok(FieldValue { + typ: compiled_type, + typ_value: typ, + default, + }) + } +} diff --git a/crates/axl-runtime/src/engine/types/mod.rs b/crates/axl-runtime/src/engine/types/mod.rs index ad0049c4c..5c704224d 100644 --- a/crates/axl-runtime/src/engine/types/mod.rs +++ b/crates/axl-runtime/src/engine/types/mod.rs @@ -1 +1,2 @@ pub mod bytes; +pub mod fragment; diff --git a/crates/axl-runtime/src/eval/config.rs b/crates/axl-runtime/src/eval/config.rs index 70561b0e4..ddf285855 100644 --- a/crates/axl-runtime/src/eval/config.rs +++ b/crates/axl-runtime/src/eval/config.rs @@ -2,15 +2,27 @@ use anyhow::anyhow; use starlark::environment::Module; use starlark::eval::Evaluator; use starlark::values::{Value, ValueLike}; +use starlark_map::small_map::SmallMap; use std::path::Path; use std::path::PathBuf; +use crate::engine::config::fragment_map::{FragmentMap, construct_fragments}; use crate::engine::config::{ConfigContext, ConfiguredTask}; +use crate::engine::types::fragment::extract_fragment_type_id; use crate::eval::load::{AxlLoader, ModuleScope}; use crate::eval::load_path::join_confined; use super::error::EvalError; +/// Result of running all config evaluations. +pub struct ConfigResult { + /// The configured tasks. + pub tasks: Vec, + /// Fragment type IDs mapped to their (type_value, instance_value) pairs. + /// These are the globally-configured fragment instances that tasks will use. + pub fragment_data: Vec<(u64, Value<'static>, Value<'static>)>, +} + /// Evaluator for running config.axl files. #[derive(Debug)] pub struct ConfigEvaluator<'l, 'p> { @@ -46,32 +58,58 @@ impl<'l, 'p> ConfigEvaluator<'l, 'p> { /// Evaluates all config files with the given tasks. /// /// This method: - /// 1. Creates a ConfigContext with the tasks - /// 2. Evaluates each config file, calling its `config` function - /// 3. Returns references to the modified tasks - /// - /// The tasks are modified in place via set_attr calls from config functions. + /// 1. Collects fragment types from all tasks + /// 2. Auto-constructs default fragment instances + /// 3. Creates a FragmentMap and ConfigContext + /// 4. Evaluates each config file, calling its `config` function + /// 5. Returns the modified tasks and fragment data pub fn run_all( &self, - scope: ModuleScope, - config_paths: Vec, + scoped_configs: Vec<(ModuleScope, PathBuf, String)>, tasks: Vec, - ) -> Result, EvalError> { - self.loader.module_stack.borrow_mut().push(scope.clone()); - + ) -> Result { // Create temporary modules for evaluation let eval_module = Box::leak(Box::new(Module::new())); let context_module = Box::leak(Box::new(Module::new())); - // Create ConfigContext with tasks let heap = context_module.heap(); - let context_value = heap.alloc(ConfigContext::new(tasks, heap)); + + // Collect fragment types from all tasks + let mut fragment_types: SmallMap = SmallMap::new(); + for task in &tasks { + let frozen_task = task + .as_frozen_task() + .expect("tasks should be frozen at this point"); + for frag_fv in frozen_task.fragments() { + let frag_value = frag_fv.to_value(); + if let Some(id) = extract_fragment_type_id(frag_value) { + fragment_types.entry(id).or_insert(frag_value); + } + } + } + + // Auto-construct default fragment instances + let fragment_pairs: Vec<(u64, Value)> = + fragment_types.into_iter().map(|(id, v)| (id, v)).collect(); + + let fragment_map = { + let mut eval = Evaluator::new(eval_module); + eval.set_loader(self.loader); + construct_fragments(&fragment_pairs, &mut eval, heap)? + }; + + let fragment_map_value = heap.alloc(fragment_map); + + // Create ConfigContext with tasks and fragment map + let context_value = heap.alloc(ConfigContext::new(tasks, fragment_map_value, heap)); let ctx = context_value .downcast_ref::() .expect("just allocated ConfigContext"); - // Evaluate each config file - for path in &config_paths { + // Evaluate each config file with its associated scope + for (scope, path, function_name) in &scoped_configs { + self.loader.module_stack.borrow_mut().push(scope.clone()); + let rel_path = path .strip_prefix(&scope.path) .map_err(|e| EvalError::UnknownError(anyhow!("Failed to strip prefix: {e}")))? @@ -87,8 +125,8 @@ impl<'l, 'p> ConfigEvaluator<'l, 'p> { // Get the config function let def = frozen - .get("config") - .map_err(|_| EvalError::MissingSymbol("config".into()))?; + .get(function_name) + .map_err(|_| EvalError::MissingSymbol(function_name.clone()))?; let func = def.value(); @@ -107,12 +145,31 @@ impl<'l, 'p> ConfigEvaluator<'l, 'p> { // Keep the frozen module alive for the duration ctx.add_config_module(frozen); + + self.loader.module_stack.borrow_mut().pop(); } // Clone tasks from the context to return let result_tasks: Vec = ctx.tasks().iter().map(|t| (*t).clone()).collect(); - self.loader.module_stack.borrow_mut().pop(); - Ok(result_tasks) + // Extract fragment data from the FragmentMap + let fmap = fragment_map_value + .downcast_ref::() + .expect("just allocated FragmentMap"); + let fragment_data: Vec<(u64, Value<'static>, Value<'static>)> = fmap + .entries() + .into_iter() + .map(|(id, tv, iv)| { + // SAFETY: These values live on context_module's leaked heap + let tv: Value<'static> = unsafe { std::mem::transmute(tv) }; + let iv: Value<'static> = unsafe { std::mem::transmute(iv) }; + (id, tv, iv) + }) + .collect(); + + Ok(ConfigResult { + tasks: result_tasks, + fragment_data, + }) } } diff --git a/crates/axl-runtime/src/eval/load.rs b/crates/axl-runtime/src/eval/load.rs index aadecb26f..0a00a334b 100644 --- a/crates/axl-runtime/src/eval/load.rs +++ b/crates/axl-runtime/src/eval/load.rs @@ -58,13 +58,20 @@ impl<'p> AxlLoader<'p> { AxlStore::new(self.cli_version.clone(), self.repo_root.clone(), path) } + /// Caches a frozen module by its absolute path so that subsequent `load()` calls + /// for the same path return the cached module instead of re-evaluating. + pub fn cache_module(&self, path: PathBuf, module: FrozenModule) { + self.loaded_modules.borrow_mut().insert(path, module); + } + pub(super) fn eval_module(&self, path: &Path) -> Result { assert!(path.is_absolute()); // Push the script path onto the LOAD_STACK (used to detect circular loads) self.load_stack.borrow_mut().push(path.to_path_buf()); // Load and evaluate the script - let raw = fs::read_to_string(&path)?; + let raw = + fs::read_to_string(&path).map_err(|e| anyhow::anyhow!("{}: {}", path.display(), e))?; let ast = AstModule::parse(&path.to_string_lossy(), raw, &self.dialect)?; let module = Module::new(); @@ -124,15 +131,29 @@ impl<'p> FileLoader for AxlLoader<'p> { .last() .expect("module name stack should not be empty"); + // Track whether we need to push/pop a new module scope for dependency loads. + let new_module_scope = match &load_path { + LoadPath::ModuleSpecifier { module, .. } => Some(ModuleScope { + name: module.clone(), + path: self.deps_root.join(module), + }), + _ => None, + }; + let resolved_script_path = match &load_path { LoadPath::ModuleSpecifier { module, subpath } => { self.resolve_in_deps_root(&module, &subpath)? } LoadPath::ModuleSubpath(subpath) => self.resolve(&module_info.path, subpath)?, LoadPath::RelativePath(relpath) => { - let parent = parent_script_path - .strip_prefix(&module_info.path) - .expect("parent script path should have same prefix as current module"); + let parent = parent_script_path.strip_prefix(&module_info.path).expect( + format!( + "parent script path {} should have same prefix as current module {}", + parent_script_path.display(), + module_info.path.display(), + ) + .as_str(), + ); if let Some(parent) = parent.parent() { self.resolve(&module_info.path, &parent.join(relpath))? } else { @@ -167,8 +188,13 @@ impl<'p> FileLoader for AxlLoader<'p> { drop(load_stack); - // Push the resolved path to the stack so that relative imports from the file still works. - // load_stack.push(resolved_script_path.clone()); + // If loading a dependency module, push its scope so relative imports resolve correctly. + if let Some(scope) = &new_module_scope { + drop(module_stack); + self.module_stack.borrow_mut().push(scope.clone()); + } else { + drop(module_stack); + } // Read and parse the file content into an AST. let frozen_module = self @@ -176,6 +202,11 @@ impl<'p> FileLoader for AxlLoader<'p> { .map_err(|e| Into::::into(e))? .freeze()?; + // Pop the dependency module scope if we pushed one. + if new_module_scope.is_some() { + self.module_stack.borrow_mut().pop(); + } + // Pop the load stack after successful load // self.load_stack.borrow_mut().pop(); diff --git a/crates/axl-runtime/src/eval/mod.rs b/crates/axl-runtime/src/eval/mod.rs index cbbd3af95..7e4265242 100644 --- a/crates/axl-runtime/src/eval/mod.rs +++ b/crates/axl-runtime/src/eval/mod.rs @@ -13,5 +13,4 @@ pub(crate) use load_path::validate_module_name; // Task execution and introspection pub use task::FrozenTaskModuleLike; -pub use task::execute_task; pub use task::execute_task_with_args; diff --git a/crates/axl-runtime/src/eval/task.rs b/crates/axl-runtime/src/eval/task.rs index 50c31602f..7ef76e57e 100644 --- a/crates/axl-runtime/src/eval/task.rs +++ b/crates/axl-runtime/src/eval/task.rs @@ -4,11 +4,12 @@ use starlark::environment::Module; use starlark::eval::Evaluator; use starlark::values::Heap; use starlark::values::OwnedFrozenValue; +use starlark::values::Value; use starlark::values::ValueLike; -use std::collections::HashMap; use std::path::Path; use crate::engine::config::ConfiguredTask; +use crate::engine::config::fragment_map::FragmentMap; use crate::engine::store::AxlStore; use crate::engine::task::FrozenTask; use crate::engine::task_args::TaskArgs; @@ -62,58 +63,23 @@ impl FrozenTaskModuleLike for FrozenModule { } } -/// Executes a task using Buck2's temporary Module pattern. -/// -/// This creates a temporary Module for the execution heap, allowing us to: -/// 1. Keep task implementations frozen (immutable, thread-safe) -/// 2. Allocate execution-time values on a temporary heap -/// 3. Drop the temporary heap after execution -/// -/// The TaskContext is pre-frozen so WASM can access it directly via -/// `ctx.wasm` without needing runtime freezing. -pub fn execute_task( - task: &ConfiguredTask, - store: AxlStore, - args: HashMap, -) -> Result, EvalError> { - // Get config first - it needs to outlive the evaluator - let config = task.get_config(); - let config_value = config.value(); - - // Get the task implementation function - let task_impl = task - .implementation() - .ok_or_else(|| EvalError::UnknownError(anyhow!("task has no implementation")))?; - - // Create a module for TaskContext and freeze it immediately - // This allows WASM to access ctx directly without runtime freezing - let ctx_module = Module::new(); - let heap = ctx_module.heap(); - let task_args = TaskArgs::from_map(args, heap); - let task_info = TaskInfo { - name: task.get_name(), - group: task.get_group(), - }; - let context = heap.alloc(TaskContext::new(task_args, config_value, task_info)); - ctx_module.set("__ctx__", context); - - let frozen_ctx_module = ctx_module - .freeze() - .map_err(|e| EvalError::UnknownError(anyhow!("{:?}", e)))?; - // OwnedFrozenValue keeps the frozen heap alive for the duration of this function - let frozen_context = frozen_ctx_module - .get("__ctx__") - .map_err(|e| EvalError::UnknownError(anyhow!("failed to get frozen context: {:?}", e)))?; - - // Create execution module for the evaluator - let exec_module = Module::new(); - let mut eval = Evaluator::new(&exec_module); - eval.extra = Some(&store); - - // Call frozen task implementation with frozen context - let ret = eval.eval_function(task_impl.value(), &[frozen_context.value()], &[])?; - - Ok(ret.unpack_i32().map(|ex| ex as u8)) +/// Build a task-scoped FragmentMap on the given heap, containing only the +/// fragments the task opts into. +fn build_task_fragment_map<'v>( + fragment_data: &[(u64, Value<'static>, Value<'static>)], + task_fragment_ids: &[u64], + heap: &'v Heap, +) -> Value<'v> { + let map = FragmentMap::new(); + for (id, type_val, instance_val) in fragment_data { + if task_fragment_ids.contains(id) { + // SAFETY: fragment_data values live on a leaked heap that outlives this call + let tv: Value<'v> = unsafe { std::mem::transmute(*type_val) }; + let iv: Value<'v> = unsafe { std::mem::transmute(*instance_val) }; + map.insert(*id, tv, iv); + } + } + heap.alloc(map) } /// Executes a task with pre-built TaskArgs. @@ -123,12 +89,9 @@ pub fn execute_task( pub fn execute_task_with_args( task: &ConfiguredTask, store: AxlStore, + fragment_data: &[(u64, Value<'static>, Value<'static>)], args_builder: impl FnOnce(&Heap) -> TaskArgs, ) -> Result, EvalError> { - // Get config first - it needs to outlive the evaluator - let config = task.get_config(); - let config_value = config.value(); - // Get the task implementation function let task_impl = task .implementation() @@ -143,7 +106,11 @@ pub fn execute_task_with_args( name: task.get_name(), group: task.get_group(), }; - let context = heap.alloc(TaskContext::new(task_args, config_value, task_info)); + + // Build a task-scoped fragment map + let fragment_map = build_task_fragment_map(fragment_data, &task.fragment_type_ids, heap); + + let context = heap.alloc(TaskContext::new(task_args, fragment_map, task_info)); ctx_module.set("__ctx__", context); let frozen_ctx_module = ctx_module @@ -201,8 +168,15 @@ impl<'l, 'p> TaskEvaluator<'l, 'p> { .expect("just pushed a scope"); // Freeze immediately - module + let frozen = module .freeze() - .map_err(|e| EvalError::UnknownError(anyhow!(e))) + .map_err(|e| EvalError::UnknownError(anyhow!(e)))?; + + // Cache the frozen module so that subsequent load() calls for the same + // path (e.g., from config files) return this module instead of + // re-evaluating and creating new type instances with different IDs. + self.loader.cache_module(abs_path, frozen.clone()); + + Ok(frozen) } } diff --git a/crates/axl-runtime/src/lib.rs b/crates/axl-runtime/src/lib.rs index 3ee84e009..669dbdaa1 100644 --- a/crates/axl-runtime/src/lib.rs +++ b/crates/axl-runtime/src/lib.rs @@ -1,5 +1,4 @@ #![allow(clippy::new_without_default)] -mod builtins; pub mod engine; pub mod eval; pub mod module; diff --git a/crates/axl-runtime/src/module/disk_store.rs b/crates/axl-runtime/src/module/disk_store.rs index f7613062f..c1b17ecd5 100644 --- a/crates/axl-runtime/src/module/disk_store.rs +++ b/crates/axl-runtime/src/module/disk_store.rs @@ -12,8 +12,6 @@ use thiserror::Error; use tokio::fs::{self, File}; use tokio::io::AsyncWriteExt; -use crate::builtins; - use super::store::ModuleStore; use super::{AxlArchiveDep, AxlLocalDep, Dep}; @@ -223,34 +221,38 @@ impl DiskStore { fs::symlink(&dep.path, dep_path).await } + pub fn builtins_path(&self) -> PathBuf { + self.root().join("builtins") + } + pub async fn expand_store( &self, store: &ModuleStore, - ) -> Result, StoreError> { + builtins: Vec<(String, PathBuf)>, + ) -> Result, StoreError> { let root = self.root(); fs::create_dir_all(&root).await?; fs::create_dir_all(self.deps_path()).await?; fs::create_dir_all(&root.join("cas")).await?; fs::create_dir_all(&root.join("dl")).await?; - fs::create_dir_all(&root.join("builtins")).await?; let client = reqwest::Client::new(); - let mut all: HashMap = - builtins::expand_builtins(self.root.clone(), root.join("builtins"))? - .into_iter() - .map(|(name, path)| { - ( - name.clone(), - Dep::Local(AxlLocalDep { - name: name, - path: path, - // Builtins tasks are always auto used - auto_use_tasks: true, - }), - ) - }) - .collect(); + let mut all: HashMap = builtins + .into_iter() + .map(|(name, path)| { + ( + name.clone(), + Dep::Local(AxlLocalDep { + name: name, + path: path, + // Builtins tasks are always auto used + auto_use_tasks: true, + use_config: true, + }), + ) + }) + .collect(); all.extend(store.deps.take()); @@ -261,11 +263,11 @@ impl DiskStore { let dep_path = self.dep_path(dep.name()); match dep { - Dep::Local(local) if local.auto_use_tasks => { - module_roots.push((local.name.clone(), dep_path.clone())) + Dep::Local(local) if local.auto_use_tasks || local.use_config => { + module_roots.push((local.name.clone(), dep_path.clone(), local.use_config)) } - Dep::Remote(remote) if remote.auto_use_tasks => { - module_roots.push((remote.name.clone(), dep_path.clone())) + Dep::Remote(remote) if remote.auto_use_tasks || remote.use_config => { + module_roots.push((remote.name.clone(), dep_path.clone(), remote.use_config)) } _ => {} }; diff --git a/crates/axl-runtime/src/module/eval.rs b/crates/axl-runtime/src/module/eval.rs index 4e8fa55cf..34864c4e8 100644 --- a/crates/axl-runtime/src/module/eval.rs +++ b/crates/axl-runtime/src/module/eval.rs @@ -21,7 +21,7 @@ use crate::module::Dep; use super::super::eval::{EvalError, validate_module_name}; -use super::store::{AxlArchiveDep, ModuleStore}; +use super::store::{AxlArchiveDep, ModuleStore, UseConfigEntry}; #[starlark_module] pub fn register_globals(globals: &mut GlobalsBuilder) { @@ -69,6 +69,7 @@ pub fn register_globals(globals: &mut GlobalsBuilder) { #[starlark(require = named)] urls: UnpackList, #[starlark(require = named)] dev: bool, #[starlark(require = named, default = false)] auto_use_tasks: bool, + #[starlark(require = named, default = false)] use_config: bool, #[starlark(require = named, default = String::new())] strip_prefix: String, eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { @@ -107,6 +108,7 @@ pub fn register_globals(globals: &mut GlobalsBuilder) { integrity, dev: true, auto_use_tasks, + use_config, }), ); @@ -121,6 +123,7 @@ pub fn register_globals(globals: &mut GlobalsBuilder) { #[starlark(require = named)] name: String, #[starlark(require = named)] path: String, #[starlark(require = named, default = false)] auto_use_tasks: bool, + #[starlark(require = named, default = false)] use_config: bool, eval: &mut Evaluator<'v, '_, '_>, ) -> anyhow::Result { if name == AXL_ROOT_MODULE_NAME { @@ -149,6 +152,7 @@ pub fn register_globals(globals: &mut GlobalsBuilder) { name: name.clone(), path: abs_path, auto_use_tasks, + use_config, }), ); @@ -174,6 +178,59 @@ pub fn register_globals(globals: &mut GlobalsBuilder) { Ok(values::none::NoneType) } + + fn use_config<'v>( + #[starlark(require = pos)] path: String, + #[starlark(require = pos)] function: String, + #[starlark(require = named, default = UnpackList::default())] requires: UnpackList< + values::Value<'v>, + >, + #[starlark(require = named, default = UnpackList::default())] conflicts: UnpackList, + eval: &mut Evaluator<'v, '_, '_>, + ) -> anyhow::Result { + let store = ModuleStore::from_eval(eval)?; + let heap = eval.heap(); + + let mut parsed_requires = Vec::new(); + for req in requires.items { + if let Some(s) = req.unpack_str() { + parsed_requires.push((s.to_string(), None)); + } else if req.get_type() == "tuple" { + let len = req.length().map_err(|e| anyhow::anyhow!("{}", e))?; + if len != 2 { + anyhow::bail!( + "requires tuple must have exactly 2 elements (package, version_constraint)" + ); + } + let pkg = req + .at(heap.alloc(0), heap) + .map_err(|e| anyhow::anyhow!("{}", e))?; + let constraint = req + .at(heap.alloc(1), heap) + .map_err(|e| anyhow::anyhow!("{}", e))?; + let pkg = pkg.unpack_str().ok_or_else(|| { + anyhow::anyhow!("requires tuple first element must be a string") + })?; + let constraint = constraint.unpack_str().ok_or_else(|| { + anyhow::anyhow!("requires tuple second element must be a string") + })?; + parsed_requires.push((pkg.to_string(), Some(constraint.to_string()))); + } else { + anyhow::bail!( + "requires elements must be strings or tuples of (package, version_constraint)" + ); + } + } + + store.configs.borrow_mut().push(UseConfigEntry { + path, + function, + requires: parsed_requires, + conflicts: conflicts.items, + }); + + Ok(values::none::NoneType) + } } pub const AXL_MODULE_FILE: &str = "MODULE.aspect"; diff --git a/crates/axl-runtime/src/module/mod.rs b/crates/axl-runtime/src/module/mod.rs index 0dfc876b2..2c9298778 100644 --- a/crates/axl-runtime/src/module/mod.rs +++ b/crates/axl-runtime/src/module/mod.rs @@ -7,4 +7,4 @@ pub use eval::{ AXL_CONFIG_EXTENSION, AXL_MODULE_FILE, AXL_ROOT_MODULE_NAME, AXL_SCRIPT_EXTENSION, AXL_VERSION_EXTENSION, AxlModuleEvaluator, register_globals, }; -pub use store::{AxlArchiveDep, AxlLocalDep, Dep, ModuleStore}; +pub use store::{AxlArchiveDep, AxlLocalDep, Dep, ModuleStore, UseConfigEntry}; diff --git a/crates/axl-runtime/src/module/store.rs b/crates/axl-runtime/src/module/store.rs index 5b72d41ac..eeadd2501 100644 --- a/crates/axl-runtime/src/module/store.rs +++ b/crates/axl-runtime/src/module/store.rs @@ -14,6 +14,14 @@ use starlark::values::ProvidesStaticType; use starlark::values::StarlarkValue; use starlark::values::starlark_value; +#[derive(Clone, Debug)] +pub struct UseConfigEntry { + pub path: String, + pub function: String, + pub requires: Vec<(String, Option)>, + pub conflicts: Vec, +} + #[derive(Debug, ProvidesStaticType, Default)] pub struct ModuleStore { pub root_dir: PathBuf, @@ -21,6 +29,7 @@ pub struct ModuleStore { pub module_root: PathBuf, pub deps: Rc>>, pub tasks: Rc)>>>, + pub configs: Rc>>, } impl ModuleStore { @@ -31,6 +40,7 @@ impl ModuleStore { module_root, deps: Rc::new(RefCell::new(HashMap::new())), tasks: Rc::new(RefCell::new(HashMap::new())), + configs: Rc::new(RefCell::new(Vec::new())), } } @@ -46,6 +56,7 @@ impl ModuleStore { module_root: value.module_root.clone(), deps: Rc::clone(&value.deps), tasks: Rc::clone(&value.tasks), + configs: Rc::clone(&value.configs), }) } } @@ -63,6 +74,13 @@ impl Dep { Dep::Remote(remote) => &remote.name, } } + + pub fn use_config(&self) -> bool { + match self { + Dep::Local(local) => local.use_config, + Dep::Remote(remote) => remote.use_config, + } + } } #[derive(Clone, Debug, ProvidesStaticType, NoSerialize, Allocative, Display)] @@ -71,6 +89,7 @@ pub struct AxlLocalDep { pub name: String, pub path: PathBuf, pub auto_use_tasks: bool, + pub use_config: bool, } #[starlark_value(type = "AxlLocalDep")] @@ -88,6 +107,7 @@ pub struct AxlArchiveDep { pub name: String, pub strip_prefix: String, pub auto_use_tasks: bool, + pub use_config: bool, } #[starlark_value(type = "AxlArchiveDep")] From 17031e05840f47624cb0c006e488cf9da5104fcd Mon Sep 17 00:00:00 2001 From: thesayyn Date: Fri, 27 Feb 2026 15:09:49 -0800 Subject: [PATCH 2/6] separate rules_lint integration into axel-f-rules-lint branch Remove lint/format task stubs, linting strategy, SARIF utilities, and GitHub review helpers that depend on aspect_rules_lint. These require separate changes in aspect_rules_lint to land and are tracked on the axel-f-rules-lint branch. Co-Authored-By: Claude Sonnet 4.6 --- .../src/builtins/aspect/MODULE.aspect | 13 - .../src/builtins/aspect/config/lint.axl | 55 -- .../src/builtins/aspect/config/nolint.axl | 8 - .../src/builtins/aspect/lib/github.axl | 727 ------------------ .../src/builtins/aspect/lib/linting.axl | 393 ---------- .../src/builtins/aspect/lib/sarif.axl | 228 ------ .../builtins/aspect/tasks/dummy_format.axl | 22 - .../src/builtins/aspect/tasks/dummy_lint.axl | 22 - crates/aspect-cli/src/builtins/mod.rs | 16 - 9 files changed, 1484 deletions(-) delete mode 100644 crates/aspect-cli/src/builtins/aspect/config/lint.axl delete mode 100644 crates/aspect-cli/src/builtins/aspect/config/nolint.axl delete mode 100644 crates/aspect-cli/src/builtins/aspect/lib/github.axl delete mode 100644 crates/aspect-cli/src/builtins/aspect/lib/linting.axl delete mode 100644 crates/aspect-cli/src/builtins/aspect/lib/sarif.axl delete mode 100644 crates/aspect-cli/src/builtins/aspect/tasks/dummy_format.axl delete mode 100644 crates/aspect-cli/src/builtins/aspect/tasks/dummy_lint.axl diff --git a/crates/aspect-cli/src/builtins/aspect/MODULE.aspect b/crates/aspect-cli/src/builtins/aspect/MODULE.aspect index 87cfb746c..a904782b9 100644 --- a/crates/aspect-cli/src/builtins/aspect/MODULE.aspect +++ b/crates/aspect-cli/src/builtins/aspect/MODULE.aspect @@ -7,16 +7,3 @@ use_config("config/delivery.axl", "configure_delivery") # Configure builtins use_config("config/builtins.axl", "configure_builtins") - -# Configure rules_lint if its declared by user -use_config( - "config/lint.axl", - "configure_rules_lint", - requires = ["aspect_rules_lint"] -) - -use_config( - "config/nolint.axl", - "configure_dummy_lint", - conflicts = ["aspect_rules_lint"] -) diff --git a/crates/aspect-cli/src/builtins/aspect/config/lint.axl b/crates/aspect-cli/src/builtins/aspect/config/lint.axl deleted file mode 100644 index e62b47295..000000000 --- a/crates/aspect-cli/src/builtins/aspect/config/lint.axl +++ /dev/null @@ -1,55 +0,0 @@ -"""Configures rules_lint if its available""" - -load("../lib/platform.axl", - "read_platform_config", - "read_host_config", - "DEFAULT_PLATFORM_DIR" -) -load( - "../lib/github.axl", - "create_check_run", - "update_check_run", - "complete_check_run", - "build_output", - "build_annotation", - "create_review", - "build_suggestion", -) -load( - "../lib/sarif.axl", - "sarif_to_annotations", - "get_sarif_summary" -) -load( - "@aspect_rules_lint//lint/lint.axl", - "StrategyHoldTheLine", -) -load("../lib/linting.axl", "make_github_strategy", "make_github_changed_files_provider") - - - -def configure_rules_lint(ctx: ConfigContext): - - for task in ctx.tasks: - if task.name == "lint": - github_token = ctx.std.env.var("ASPECT_WORKFLOWS_PR_GITHUB_TOKEN") or ctx.std.env.var("GITHUB_TOKEN") - if github_token: - # CI mode: GitHub-aware strategy with hold-the-line - github_repository = ctx.std.env.var("GITHUB_REPOSITORY") or "" - repo_parts = github_repository.split("/") - gh_owner = repo_parts[0] if len(repo_parts) >= 2 else "" - gh_repo = repo_parts[1] if len(repo_parts) >= 2 else "" - - task.config.strategy = make_github_strategy( - StrategyHoldTheLine, - token = github_token, - owner = gh_owner, - repo = gh_repo, - mode = "streaming", - ) - task.config.changed_files_provider = make_github_changed_files_provider( - token = github_token, - owner = gh_owner, - repo = gh_repo, - ) - # else: local dev uses defaults (StrategyHoldTheLine + GitDiffProvider) diff --git a/crates/aspect-cli/src/builtins/aspect/config/nolint.axl b/crates/aspect-cli/src/builtins/aspect/config/nolint.axl deleted file mode 100644 index bf7ae89df..000000000 --- a/crates/aspect-cli/src/builtins/aspect/config/nolint.axl +++ /dev/null @@ -1,8 +0,0 @@ -"""Configures a dummy lint verb for migration.""" - -load("../tasks/dummy_lint.axl", "lint") -load("../tasks/dummy_format.axl", "format") - -def configure_dummy_lint(ctx: ConfigContext): - ctx.tasks.add(lint) - ctx.tasks.add(format) \ No newline at end of file diff --git a/crates/aspect-cli/src/builtins/aspect/lib/github.axl b/crates/aspect-cli/src/builtins/aspect/lib/github.axl deleted file mode 100644 index 2f86c050e..000000000 --- a/crates/aspect-cli/src/builtins/aspect/lib/github.axl +++ /dev/null @@ -1,727 +0,0 @@ -""" -GitHub Check Runs Client Library - -Client for creating and updating GitHub Check Runs via the GitHub API. -""" - -DEFAULT_GITHUB_API = "https://api.github.com" - - -def _normalize_output(output): - """ - Normalize output parameter to the required dict format. - - If output is a string, wraps it in a dict with title and summary. - If output is already a dict, returns it as-is. - """ - if output == None: - return None - if type(output) == "string": - return { - "title": "Check Run Output", - "summary": output, - } - return output - - -def _do_request(ctx, method, url, token, payload = None): - """ - Make an HTTP request to GitHub API. - - Args: - ctx: Context with http() - method: HTTP method ("POST" or "PATCH") - url: Full URL to request - token: GitHub token (PAT or Actions token) - payload: Optional dict to send as JSON body - - Returns: - (success: bool, status: int, body: dict or str) - """ - http = ctx.http() - - headers = { - "Authorization": "Bearer " + token, - "Accept": "application/vnd.github+json", - "Content-Type": "application/json", - "X-GitHub-Api-Version": "2022-11-28", - } - - if method == "POST": - response = http.post( - url = url, - headers = headers, - data = json.encode(payload) if payload else None, - ).block() - elif method == "PATCH": - response = http.patch( - url = url, - headers = headers, - data = json.encode(payload) if payload else None, - ).block() - else: - return (False, 0, "unsupported method: " + method) - - success = response.status >= 200 and response.status < 300 - - # Try to parse response as JSON - body = response.body - if body: - body = json.decode(body) - - return (success, response.status, body) - - -def _do_get_request(ctx, url, token): - """ - Make a GET request to GitHub API. - - Args: - ctx: Context with http() - url: Full URL to request - token: GitHub token (PAT or Actions token) - - Returns: - (success: bool, status: int, body: dict or str) - """ - http = ctx.http() - - headers = { - "Authorization": "Bearer " + token, - "Accept": "application/vnd.github+json", - "X-GitHub-Api-Version": "2022-11-28", - } - - response = http.get( - url = url, - headers = headers, - ).block() - - success = response.status >= 200 and response.status < 300 - - body = response.body - if body: - body = json.decode(body) - - return (success, response.status, body) - - -def _do_delete_request(ctx, url, token): - """ - Make a DELETE request to GitHub API. - - Args: - ctx: Context with http() - url: Full URL to request - token: GitHub token (PAT or Actions token) - - Returns: - (success: bool, status: int, body: str or None) - """ - http = ctx.http() - - headers = { - "Authorization": "Bearer " + token, - "Accept": "application/vnd.github+json", - "X-GitHub-Api-Version": "2022-11-28", - } - - response = http.delete( - url = url, - headers = headers, - ).block() - - success = response.status >= 200 and response.status < 300 - - return (success, response.status, response.body) - - -def get_pull_request(ctx, token, owner, repo, pull_number, api_base = DEFAULT_GITHUB_API): - """ - Get a pull request by number. - - Args: - ctx: Context with http() - token: GitHub token - owner: Repository owner - repo: Repository name - pull_number: The PR number - api_base: GitHub API base URL - - Returns: - dict with "success" (bool), "pull_request" (dict) on success - dict with "success" (False), "error" (str), "status" (int) on failure - """ - url = api_base + "/repos/" + owner + "/" + repo + "/pulls/" + str(pull_number) - - success, status_code, body = _do_get_request(ctx, url, token) - - if success: - return { - "success": True, - "pull_request": body, - } - - error_msg = "request failed: " + str(status_code) - if body and type(body) == "dict" and body.get("message"): - error_msg = error_msg + " - " + body["message"] - - return {"success": False, "error": error_msg, "status": status_code} - - -def list_review_comments(ctx, token, owner, repo, pull_number, api_base = DEFAULT_GITHUB_API): - """ - List all review comments on a pull request. - - Handles pagination to retrieve all comments. - - Args: - ctx: Context with http() - token: GitHub token - owner: Repository owner - repo: Repository name - pull_number: The PR number - api_base: GitHub API base URL - - Returns: - dict with "success" (bool), "comments" (list) on success - dict with "success" (False), "error" (str), "status" (int) on failure - """ - all_comments = [] - - for page in range(1, 101): # max 100 pages (10,000 comments) - url = api_base + "/repos/" + owner + "/" + repo + "/pulls/" + str(pull_number) + "/comments?per_page=100&page=" + str(page) - - success, status_code, body = _do_get_request(ctx, url, token) - - if not success: - error_msg = "request failed: " + str(status_code) - if body and type(body) == "dict" and body.get("message"): - error_msg = error_msg + " - " + body["message"] - return {"success": False, "error": error_msg, "status": status_code} - - if not body or len(body) == 0: - break - - all_comments.extend(body) - - if len(body) < 100: - break - - return { - "success": True, - "comments": all_comments, - } - - -def delete_review_comment(ctx, token, owner, repo, comment_id, api_base = DEFAULT_GITHUB_API): - """ - Delete a review comment on a pull request. - - Args: - ctx: Context with http() - token: GitHub token - owner: Repository owner - repo: Repository name - comment_id: The comment ID to delete - api_base: GitHub API base URL - - Returns: - dict with "success" (bool) on success - dict with "success" (False), "error" (str), "status" (int) on failure - """ - url = api_base + "/repos/" + owner + "/" + repo + "/pulls/comments/" + str(comment_id) - - success, status_code, body = _do_delete_request(ctx, url, token) - - if success: - return {"success": True} - - error_msg = "request failed: " + str(status_code) - return {"success": False, "error": error_msg, "status": status_code} - - -def list_pull_request_files(ctx, token, owner, repo, pull_number, api_base = DEFAULT_GITHUB_API): - """ - List files changed in a pull request. - - Handles pagination to retrieve all files. - - Args: - ctx: Context with http() - token: GitHub token - owner: Repository owner - repo: Repository name - pull_number: The PR number - api_base: GitHub API base URL - - Returns: - dict with "success" (bool), "files" (list) on success - dict with "success" (False), "error" (str), "status" (int) on failure - """ - all_files = [] - - for page in range(1, 101): # max 100 pages (10,000 files) - url = api_base + "/repos/" + owner + "/" + repo + "/pulls/" + str(pull_number) + "/files?per_page=100&page=" + str(page) - - success, status_code, body = _do_get_request(ctx, url, token) - - if not success: - error_msg = "request failed: " + str(status_code) - if body and type(body) == "dict" and body.get("message"): - error_msg = error_msg + " - " + body["message"] - return {"success": False, "error": error_msg, "status": status_code} - - if not body or len(body) == 0: - break - - all_files.extend(body) - - if len(body) < 100: - break - - return { - "success": True, - "files": all_files, - } - - -def create_check_run(ctx, token, owner, repo, name, head_sha, status = None, output = None, details_url = None, external_id = None, started_at = None, api_base = DEFAULT_GITHUB_API): - """ - Create a new check run on a commit. - - Args: - ctx: Context with http() - token: GitHub token (PAT or GITHUB_TOKEN from Actions) - owner: Repository owner - repo: Repository name - name: Name of the check run - head_sha: The SHA of the commit to create the check on - status: Optional status ("queued", "in_progress", "completed") - output: Optional dict with "title", "summary", and optionally "text", "annotations" - details_url: Optional URL for more details - external_id: Optional external identifier - started_at: Optional ISO 8601 timestamp - api_base: GitHub API base URL (default: https://api.github.com) - - Returns: - dict with "success" (bool), "check_run_id" (int), "html_url" (str) on success - dict with "success" (False), "error" (str), "status" (int) on failure - """ - url = api_base + "/repos/" + owner + "/" + repo + "/check-runs" - - payload = { - "name": name, - "head_sha": head_sha, - } - - if status: - payload["status"] = status - if output: - payload["output"] = output - if details_url: - payload["details_url"] = details_url - if external_id: - payload["external_id"] = external_id - if started_at: - payload["started_at"] = started_at - - success, status_code, body = _do_request(ctx, "POST", url, token, payload) - - if success: - return { - "success": True, - "check_run_id": body.get("id"), - "html_url": body.get("html_url"), - "response": body, - } - - error_msg = "request failed: " + str(status_code) - if body and type(body) == "dict" and body.get("message"): - error_msg = error_msg + " - " + body["message"] - - return {"success": False, "error": error_msg, "status": status_code} - - -def update_check_run(ctx, token, owner, repo, check_run_id, status = None, conclusion = None, output = None, details_url = None, completed_at = None, api_base = DEFAULT_GITHUB_API): - """ - Update an existing check run. - - Args: - ctx: Context with http() - token: GitHub token - owner: Repository owner - repo: Repository name - check_run_id: The ID of the check run to update - status: Optional new status ("queued", "in_progress", "completed") - conclusion: Required if status is "completed". One of: - "action_required", "cancelled", "failure", "neutral", - "success", "skipped", "stale", "timed_out" - output: Optional dict with "title", "summary", and optionally "text", "annotations" - details_url: Optional URL for more details - completed_at: Optional ISO 8601 timestamp (required if conclusion is set) - api_base: GitHub API base URL - - Returns: - dict with "success" (bool), "check_run_id" (int) on success - dict with "success" (False), "error" (str), "status" (int) on failure - """ - url = api_base + "/repos/" + owner + "/" + repo + "/check-runs/" + str(check_run_id) - - payload = {} - - if status: - payload["status"] = status - if conclusion: - payload["conclusion"] = conclusion - if output: - payload["output"] = output - if details_url: - payload["details_url"] = details_url - if completed_at: - payload["completed_at"] = completed_at - - success, status_code, body = _do_request(ctx, "PATCH", url, token, payload) - - if success: - return { - "success": True, - "check_run_id": body.get("id"), - "html_url": body.get("html_url"), - "response": body, - } - - error_msg = "request failed: " + str(status_code) - if body and type(body) == "dict" and body.get("message"): - error_msg = error_msg + " - " + body["message"] - - return {"success": False, "error": error_msg, "status": status_code} - - -def complete_check_run(ctx, token, owner, repo, check_run_id, conclusion, output = None, api_base = DEFAULT_GITHUB_API): - """ - Complete a check run with a conclusion. - - Convenience wrapper around update_check_run for completing checks. - - Args: - ctx: Context with http() - token: GitHub token - owner: Repository owner - repo: Repository name - check_run_id: The ID of the check run to complete - conclusion: One of: "action_required", "cancelled", "failure", - "neutral", "success", "skipped", "stale", "timed_out" - output: Optional dict with "title", "summary" - api_base: GitHub API base URL - - Returns: - dict with "success" (bool), "check_run_id" (int) on success - dict with "success" (False), "error" (str), "status" (int) on failure - """ - return update_check_run( - ctx, - token, - owner, - repo, - check_run_id, - status = "completed", - conclusion = conclusion, - output = output, - api_base = api_base, - ) - - -def build_output(title, summary, text = None, annotations = None): - """ - Helper to build an output object for check runs. - - Args: - title: Title of the check run output - summary: Summary (supports markdown) - text: Optional detailed text (supports markdown) - annotations: Optional list of annotation dicts - - Returns: - dict suitable for the "output" parameter - """ - output = { - "title": title, - "summary": summary, - } - if text: - output["text"] = text - if annotations: - output["annotations"] = annotations - return output - - -def build_annotation(path, start_line, end_line, message, annotation_level = "warning", start_column = None, end_column = None, title = None, raw_details = None): - """ - Helper to build an annotation for check run output. - - Args: - path: Path of the file to annotate (relative to repo root) - start_line: Start line of the annotation - end_line: End line of the annotation - message: Short description of the feedback - annotation_level: "notice", "warning", or "failure" (default: "warning") - start_column: Optional start column - end_column: Optional end column - title: Optional title for the annotation - raw_details: Optional raw details string - - Returns: - dict suitable for the "annotations" list - """ - annotation = { - "path": path, - "start_line": start_line, - "end_line": end_line, - "annotation_level": annotation_level, - "message": message, - } - if start_column: - annotation["start_column"] = start_column - if end_column: - annotation["end_column"] = end_column - if title: - annotation["title"] = title - if raw_details: - annotation["raw_details"] = raw_details - return annotation - - -# ============================================================================= -# Pull Request Review Comments API -# ============================================================================= - -def create_review(ctx, token, owner, repo, pull_number, body = None, event = "COMMENT", comments = None, commit_id = None, api_base = DEFAULT_GITHUB_API): - """ - Create a pull request review with optional comments. - - This creates comments that appear directly on the PR diff page. - - Args: - ctx: Context with http() - token: GitHub token - owner: Repository owner - repo: Repository name - pull_number: The PR number - body: Optional review body text (shown at top of review) - event: Review action - "APPROVE", "REQUEST_CHANGES", or "COMMENT" (default) - comments: Optional list of review comment dicts (use build_review_comment) - commit_id: Optional commit SHA to review (defaults to PR head) - api_base: GitHub API base URL - - Returns: - dict with "success" (bool), "review_id" (int) on success - dict with "success" (False), "error" (str), "status" (int) on failure - """ - url = api_base + "/repos/" + owner + "/" + repo + "/pulls/" + str(pull_number) + "/reviews" - - payload = { - "event": event, - } - - if body: - payload["body"] = body - if comments: - payload["comments"] = comments - if commit_id: - payload["commit_id"] = commit_id - - success, status_code, response_body = _do_request(ctx, "POST", url, token, payload) - - if success: - return { - "success": True, - "review_id": response_body.get("id"), - "html_url": response_body.get("html_url"), - "response": response_body, - } - - error_msg = "request failed: " + str(status_code) - if response_body and type(response_body) == "dict" and response_body.get("message"): - error_msg = error_msg + " - " + response_body["message"] - - return {"success": False, "error": error_msg, "status": status_code} - - -def create_review_comment(ctx, token, owner, repo, pull_number, body, path, line = None, commit_id = None, side = "RIGHT", start_line = None, start_side = None, subject_type = None, api_base = DEFAULT_GITHUB_API): - """ - Create a single review comment on a PR diff. - - Args: - ctx: Context with http() - token: GitHub token - owner: Repository owner - repo: Repository name - pull_number: The PR number - body: The comment text (supports markdown) - path: File path relative to repo root - line: Line number in the diff to comment on (required unless using subject_type="file") - commit_id: Optional commit SHA (defaults to PR head) - side: "LEFT" (deletion) or "RIGHT" (addition, default) - start_line: For multi-line comments, the first line - start_side: Side for start_line ("LEFT" or "RIGHT") - subject_type: "line" (default) or "file" for file-level comments - api_base: GitHub API base URL - - Returns: - dict with "success" (bool), "comment_id" (int) on success - dict with "success" (False), "error" (str), "status" (int) on failure - """ - url = api_base + "/repos/" + owner + "/" + repo + "/pulls/" + str(pull_number) + "/comments" - - payload = { - "body": body, - "path": path, - } - - if subject_type: - payload["subject_type"] = subject_type - if line: - payload["line"] = line - payload["side"] = side - if commit_id: - payload["commit_id"] = commit_id - if start_line: - payload["start_line"] = start_line - if start_side: - payload["start_side"] = start_side - - success, status_code, response_body = _do_request(ctx, "POST", url, token, payload) - - if success: - return { - "success": True, - "comment_id": response_body.get("id"), - "html_url": response_body.get("html_url"), - "response": response_body, - } - - error_msg = "request failed: " + str(status_code) - if response_body and type(response_body) == "dict" and response_body.get("message"): - error_msg = error_msg + " - " + response_body["message"] - - return {"success": False, "error": error_msg, "status": status_code} - - -def build_review_comment(path, body, line = None, side = "RIGHT", start_line = None, start_side = None): - """ - Helper to build a review comment for use with create_review. - - Args: - path: File path relative to repo root - body: Comment text (supports markdown) - line: Line number in the diff (the ending line for multi-line) - side: "LEFT" (deletion) or "RIGHT" (addition, default) - start_line: For multi-line comments, the starting line - start_side: Side for start_line - - Returns: - dict suitable for the "comments" list in create_review - """ - comment = { - "path": path, - "body": body, - } - if line: - comment["line"] = line - comment["side"] = side - if start_line: - comment["start_line"] = start_line - if start_side: - comment["start_side"] = start_side - return comment - - -def build_suggestion(path, line, suggested_code, message = None, start_line = None): - """ - Helper to build a code suggestion comment that shows "Apply suggestion" button. - - Args: - path: File path relative to repo root - line: Line number to suggest replacement for (end line if multi-line) - suggested_code: The replacement code (what the line(s) should become) - message: Optional message to show above the suggestion - start_line: For multi-line suggestions, the starting line - - Returns: - dict suitable for the "comments" list in create_review - - Example: - # Single line suggestion - build_suggestion( - path = "src/main.py", - line = 42, - suggested_code = "const FOO = 'bar'", - message = "Use const instead of let for constants", - ) - - # Multi-line suggestion (replace lines 10-12 with new code) - build_suggestion( - path = "src/main.py", - start_line = 10, - line = 12, - suggested_code = "function foo() {\\n return bar\\n}", - ) - """ - body = "" - if message: - body = message + "\n\n" - body = body + "```suggestion\n" + suggested_code + "\n```" - - comment = { - "path": path, - "body": body, - "line": line, - "side": "RIGHT", - } - if start_line: - comment["start_line"] = start_line - comment["start_side"] = "RIGHT" - return comment - - -def create_suggestion(ctx, token, owner, repo, pull_number, path, line, suggested_code, message = None, start_line = None, commit_id = None, api_base = DEFAULT_GITHUB_API): - """ - Create a single code suggestion on a PR. - - This creates an "Apply suggestion" button on the PR diff. - - Args: - ctx: Context with http() - token: GitHub token - owner: Repository owner - repo: Repository name - pull_number: The PR number - path: File path relative to repo root - line: Line number to suggest replacement for - suggested_code: The replacement code - message: Optional message above the suggestion - start_line: For multi-line suggestions, the starting line - commit_id: Optional commit SHA (defaults to PR head) - api_base: GitHub API base URL - - Returns: - dict with "success" (bool), "comment_id" (int) on success - dict with "success" (False), "error" (str), "status" (int) on failure - """ - body = "" - if message: - body = message + "\n\n" - body = body + "```suggestion\n" + suggested_code + "\n```" - - return create_review_comment( - ctx, token, owner, repo, pull_number, - body = body, - path = path, - line = line, - commit_id = commit_id, - side = "RIGHT", - start_line = start_line, - start_side = "RIGHT" if start_line else None, - api_base = api_base, - ) diff --git a/crates/aspect-cli/src/builtins/aspect/lib/linting.axl b/crates/aspect-cli/src/builtins/aspect/lib/linting.axl deleted file mode 100644 index 155810725..000000000 --- a/crates/aspect-cli/src/builtins/aspect/lib/linting.axl +++ /dev/null @@ -1,393 +0,0 @@ -"""GitHub-aware lint strategy and changed files provider.""" - -load("./github.axl", "create_review", "create_review_comment", "get_pull_request", "list_review_comments", "delete_review_comment", "list_pull_request_files") -load("./sarif.axl", "parse_sarif", "sarif_to_review_comments") -load("@aspect_rules_lint//lint/lint.axl", "Strategy", "ChangedFilesProvider") - - -def _parse_github_diff_patch(patch): - """ - Parse a GitHub file patch string to extract added line numbers. - - Args: - patch: The patch string from GitHub's files API - - Returns: - List of 0-based line numbers of added lines - """ - if not patch: - return [] - - lines = [] - current_line = 0 - for line in patch.split("\n"): - if line.startswith("@@"): - # Parse hunk header: @@ -old,count +new,count @@ - parts = line.split(" ") - for part in parts: - if part.startswith("+") and part != "+++": - plus = part.removeprefix("+") - if "," in plus: - current_line = int(plus.split(",")[0]) - else: - current_line = int(plus) - break - elif line.startswith("+"): - # Added line (0-based) - lines.append(current_line - 1) - current_line += 1 - elif line.startswith("-"): - # Deleted line, don't increment current_line - pass - else: - # Context line - current_line += 1 - - return lines - - -def make_github_changed_files_provider(token, owner, repo): - """ - Create a ChangedFilesProvider that fetches changed files from the GitHub API. - - Args: - token: GitHub token - owner: Repository owner - repo: Repository name - - Returns: - ChangedFilesProvider instance - """ - def get_changed_files(ctx, state): - ref = ctx.std.env.var("GITHUB_REF") or "" - if not (ref.startswith("refs/pull/") and ref.endswith("/merge")): - return [] # not a PR build - - pr_number = int(ref.removeprefix("refs/pull/").removesuffix("/merge")) - state["pr_number"] = pr_number - - # Fetch changed files from GitHub API - result = list_pull_request_files(ctx, token, owner, repo, pr_number) - if not result["success"]: - return [] - - all_files = [] - for f in result["files"]: - if f.get("status", "") == "removed": - continue - filename = f.get("filename", "") - patch = f.get("patch", "") - added_lines = _parse_github_diff_patch(patch) - all_files.append({"file": filename, "lines": added_lines}) - - state["changed_lines"] = {f["file"]: f["lines"] for f in all_files} - return all_files - - return ChangedFilesProvider(get_changed_files = get_changed_files) - - -def _enrich_with_suggestions(ctx, comments): - """Read source files and append suggestion blocks for fixable comments.""" - file_cache = {} - for comment in comments: - fixes = comment.get("_fixes") - if not fixes: - continue - - path = comment["path"] - if path not in file_cache: - file_cache[path] = ctx.std.fs.read_to_string(path) - content = file_cache[path] - if not content: - continue - - lines = content.split("\n") - line_num = comment["line"] - if line_num < 1 or line_num > len(lines): - continue - - # Calculate byte offset of the target line start - line_byte_start = 0 - for i in range(line_num - 1): - line_byte_start += len(lines[i]) + 1 # +1 for \n - - original_line = lines[line_num - 1] - - # Convert absolute byte offsets to line-relative, filter to this line - applicable = [] - for f in fixes: - rel_start = f["byteOffset"] - line_byte_start - rel_end = rel_start + f["byteLength"] - if 0 <= rel_start and rel_end <= len(original_line): - applicable.append({ - "start": rel_start, - "end": rel_end, - "replacement": f["replacement"], - }) - - if not applicable: - continue - - # Apply in reverse position order to preserve earlier offsets - applicable = sorted(applicable, key = lambda f: f["start"], reverse = True) - fixed = original_line - for f in applicable: - fixed = fixed[:f["start"]] + f["replacement"] + fixed[f["end"]:] - - if fixed != original_line: - comment["body"] += "\n\n```suggestion\n" + fixed + "\n```" - - # Clean up internal metadata - comment.pop("_fixes", None) - - -# ============================================================================= -# GitHub Strategy Wrapper -# ============================================================================= - -def _build_comment_marker(tool, file, line, rule_id): - """Build a hidden HTML comment marker for identifying lint comments.""" - return "".format(tool, file, line, rule_id) - - -def _extract_comment_marker(body): - """Extract the aspect-lint marker from a comment body, or None.""" - prefix = "" - if not body: - return None - idx = body.find(prefix) - if idx < 0: - return None - end = body.find(suffix, idx) - if end < 0: - return None - return body[idx:end + len(suffix)] - - -def _check_staleness(ctx, state): - """ - Check if the current run is stale (PR HEAD has moved past our commit). - - Returns True if stale, False otherwise. - On API failure, assumes NOT stale. - """ - gh = state["github"] - pr_number = state.get("pr_number") - if not pr_number: - return False - - result = get_pull_request( - ctx, - token = gh["token"], - owner = gh["owner"], - repo = gh["repo"], - pull_number = pr_number, - ) - - if not result["success"]: - # API failure: assume not stale (better to post stale comments than lose results) - return False - - pr = result["pull_request"] - head_sha = pr.get("head", {}).get("sha", "") - return head_sha != gh["head_sha"] - - -def _filter_by_diff(comments, changed_lines): - """Keep only comments that target lines within the PR diff.""" - if not changed_lines: - return list(comments) - return [ - c for c in comments - if (c.get("line", 0) - 1) in (changed_lines.get(c.get("path", "")) or []) - ] - - -def _get_existing_markers(ctx, gh, pr_number): - """Fetch all aspect-lint markers currently on the PR. Returns {marker: True}.""" - result = list_review_comments( - ctx, token = gh["token"], owner = gh["owner"], - repo = gh["repo"], pull_number = pr_number, - ) - if not result["success"]: - return {} - markers = {} - for c in result["comments"]: - marker = _extract_comment_marker(c.get("body", "")) - if marker: - markers[marker] = True - return markers - - -def _post_as_review(ctx, gh, pr_number, comments, existing_markers): - """Post comments as a single grouped review, skipping duplicates.""" - to_post = [ - c for c in comments - if _extract_comment_marker(c.get("body", "")) not in existing_markers - ] - if not to_post: - return - create_review( - ctx, token = gh["token"], owner = gh["owner"], - repo = gh["repo"], pull_number = pr_number, - body = "Lint findings", event = "COMMENT", - comments = to_post, commit_id = gh["head_sha"], - ) - - -def _post_individually(ctx, gh, pr_number, comments, existing_markers): - """Post comments one at a time, skipping duplicates.""" - for c in comments: - marker = _extract_comment_marker(c.get("body", "")) - if marker and marker in existing_markers: - continue - result = create_review_comment( - ctx, token = gh["token"], owner = gh["owner"], - repo = gh["repo"], pull_number = pr_number, - body = c["body"], path = c["path"], - line = c.get("line"), commit_id = gh["head_sha"], - side = c.get("side", "RIGHT"), - start_line = c.get("start_line"), - start_side = c.get("start_side"), - ) - if result["success"] and marker: - existing_markers[marker] = True - - -def _cleanup_comments(ctx, state): - """Delete stale comments and deduplicate.""" - gh = state["github"] - pr_number = state.get("pr_number") - if not pr_number: - return - - # Desired markers: diagnostics that are within the diff - changed_lines = state.get("changed_lines", {}) - desired = {} - for diag in state.get("diagnostics", []): - lines = changed_lines.get(diag["file"]) - if lines and (diag["line"] - 1) in lines: - marker = _build_comment_marker(diag["tool"], diag["file"], diag["line"], diag["rule_id"]) - desired[marker] = True - - # Fetch fresh state of comments on PR - result = list_review_comments( - ctx, token = gh["token"], owner = gh["owner"], - repo = gh["repo"], pull_number = pr_number, - ) - if not result["success"]: - return - - # Group by marker - by_marker = {} - for c in result["comments"]: - marker = _extract_comment_marker(c.get("body", "")) - if not marker: - continue - if marker not in by_marker: - by_marker[marker] = [] - by_marker[marker].append(c) - - # Delete stale (not desired) and duplicates (keep newest) - for marker, comments in by_marker.items(): - if marker not in desired: - for c in comments: - delete_review_comment( - ctx, token = gh["token"], owner = gh["owner"], - repo = gh["repo"], comment_id = c["id"], - ) - elif len(comments) > 1: - by_id = sorted(comments, key = lambda c: c["id"]) - for c in by_id[:-1]: - delete_review_comment( - ctx, token = gh["token"], owner = gh["owner"], - repo = gh["repo"], comment_id = c["id"], - ) - - -def make_github_strategy(base_strategy, token, owner, repo, mode = "grouped"): - """ - Create a GitHub-aware strategy that wraps a base strategy with GitHub reporting. - - Args: - base_strategy: The underlying Strategy to delegate to - token: GitHub token - owner: Repository owner - repo: Repository name - mode: "grouped" posts one review at the end, - "streaming" posts comments individually as linters finish - - Returns: - Strategy instance with GitHub integration - """ - def setup(ctx, state): - base_strategy.setup(ctx, state) - state["github"] = { - "token": token, - "owner": owner, - "repo": repo, - "head_sha": ctx.std.env.var("GITHUB_SHA") or "", - "pending_comments": [], - "stale": False, - } - - def process(ctx, state, filepath): - # Accumulate diagnostics and build review comments - diag_count_before = len(state.get("diagnostics", [])) - base_strategy.process(ctx, state, filepath) - - gh = state["github"] - if gh["stale"]: - return - - content = ctx.std.fs.read_to_string(filepath) - sarif = parse_sarif(content) - comments = sarif_to_review_comments(sarif) - _enrich_with_suggestions(ctx, comments) - - # Stamp each comment with a hidden marker for identity tracking - new_diagnostics = state.get("diagnostics", [])[diag_count_before:] - for i, comment in enumerate(comments): - if i < len(new_diagnostics): - diag = new_diagnostics[i] - marker = _build_comment_marker( - diag["tool"], diag["file"], diag["line"], diag["rule_id"]) - comment["body"] = marker + "\n" + comment["body"] - - gh["pending_comments"].extend(comments) - - # In streaming mode, post comments as they arrive - if mode == "streaming": - pr_number = state.get("pr_number") - if not pr_number: - return - if "existing_markers" not in gh: - gh["existing_markers"] = _get_existing_markers(ctx, gh, pr_number) - ready = _filter_by_diff(gh["pending_comments"], state.get("changed_lines", {})) - _post_individually(ctx, gh, pr_number, ready, gh["existing_markers"]) - gh["pending_comments"] = [] - - def finish(ctx, state): - gh = state["github"] - - if gh["stale"] or _check_staleness(ctx, state): - gh["stale"] = True - return base_strategy.finish(ctx, state) - - pr_number = state.get("pr_number") - if pr_number: - if mode == "grouped": - existing = _get_existing_markers(ctx, gh, pr_number) - ready = _filter_by_diff(gh["pending_comments"], state.get("changed_lines", {})) - _post_as_review(ctx, gh, pr_number, ready, existing) - _cleanup_comments(ctx, state) - - return base_strategy.finish(ctx, state) - - return Strategy( - needs_machine = base_strategy.needs_machine, - setup = setup, - process = process, - finish = finish, - ) diff --git a/crates/aspect-cli/src/builtins/aspect/lib/sarif.axl b/crates/aspect-cli/src/builtins/aspect/lib/sarif.axl deleted file mode 100644 index 2acdb2941..000000000 --- a/crates/aspect-cli/src/builtins/aspect/lib/sarif.axl +++ /dev/null @@ -1,228 +0,0 @@ -""" -SARIF (Static Analysis Results Interchange Format) GitHub Translation - -Converts SARIF output from linters into GitHub PR review comments and annotations. -Base parsing utilities (parse_sarif, get_sarif_summary) are loaded from rules_lint. -""" - -load("@aspect_rules_lint//lint/sarif.axl", "parse_sarif", "get_sarif_summary") - - -def _get_level_emoji(level): - """Map SARIF level to display text.""" - if level == "error": - return "error" - elif level == "warning": - return "warning" - elif level == "note": - return "note" - return level or "warning" - - -def sarif_result_to_comment(result, tool_name): - """ - Convert a single SARIF result to a GitHub review comment dict. - - Args: - result: A single result from runs[].results[] - tool_name: Name of the tool (from runs[].tool.driver.name) - - Returns: - dict suitable for create_review comments list, or None if invalid - """ - locations = result.get("locations", []) - if not locations: - return None - - location = locations[0] - physical = location.get("physicalLocation") - if not physical: - return None - - artifact = physical.get("artifactLocation", {}) - path = artifact.get("uri") - if not path: - return None - - region = physical.get("region", {}) - start_line = region.get("startLine") - end_line = region.get("endLine", start_line) - - if not start_line: - return None - - # Build comment body - level = _get_level_emoji(result.get("level", "warning")) - message_obj = result.get("message", {}) - message = message_obj.get("text", "") - - body = "**{}** ({})".format(tool_name, level) - if message: - body = body + "\n\n" + message - - comment = { - "path": path, - "line": end_line, - "side": "RIGHT", - "body": body, - } - - # Multi-line comment if start != end - if start_line != end_line: - comment["start_line"] = start_line - comment["start_side"] = "RIGHT" - - # Extract fix hints from relatedLocations - related = result.get("relatedLocations", []) - fixes = [] - for loc in related: - msg = loc.get("message", {}).get("text", "") - if not msg.startswith("try"): - continue - region = loc.get("physicalLocation", {}).get("region", {}) - byte_offset = region.get("byteOffset") - byte_length = region.get("byteLength") - if byte_offset == None or byte_length == None: - continue - # Parse replacement text from "try" message - if msg == "try": - replacement = "" - else: - text = msg[4:] # strip "try " - # Strip decorative outer quotes (clippy wraps replacements in quotes) - if len(text) >= 2 and text[0] == '"' and text[-1] == '"': - text = text[1:-1] - replacement = text - fixes.append({ - "byteOffset": byte_offset, - "byteLength": byte_length, - "replacement": replacement, - }) - if fixes: - comment["_fixes"] = fixes - - return comment - - -def sarif_to_review_comments(sarif): - """ - Convert SARIF output to GitHub review comments. - - Args: - sarif: Parsed SARIF dict (or JSON string) - - Returns: - List of comment dicts suitable for create_review - """ - if type(sarif) == "string": - sarif = json.decode(sarif) - - comments = [] - runs = sarif.get("runs", []) - - for run in runs: - tool = run.get("tool", {}) - driver = tool.get("driver", {}) - tool_name = driver.get("name", "Linter") - - results = run.get("results", []) - for result in results: - comment = sarif_result_to_comment(result, tool_name) - if comment: - comments.append(comment) - - return comments - - -def sarif_to_annotations(sarif): - """ - Convert SARIF output to GitHub Check Run annotations. - - Args: - sarif: Parsed SARIF dict (or JSON string) - - Returns: - List of annotation dicts suitable for build_output - """ - if type(sarif) == "string": - sarif = json.decode(sarif) - - annotations = [] - runs = sarif.get("runs", []) - - for run in runs: - tool = run.get("tool", {}) - driver = tool.get("driver", {}) - tool_name = driver.get("name", "Linter") - - results = run.get("results", []) - for result in results: - annotation = sarif_result_to_annotation(result, tool_name) - if annotation: - annotations.append(annotation) - - return annotations - - -def sarif_result_to_annotation(result, tool_name): - """ - Convert a single SARIF result to a GitHub Check Run annotation. - - Args: - result: A single result from runs[].results[] - tool_name: Name of the tool - - Returns: - dict suitable for check run annotations list, or None if invalid - """ - locations = result.get("locations", []) - if not locations: - return None - - location = locations[0] - physical = location.get("physicalLocation") - if not physical: - return None - - artifact = physical.get("artifactLocation", {}) - path = artifact.get("uri") - if not path: - return None - - region = physical.get("region", {}) - start_line = region.get("startLine") - end_line = region.get("endLine", start_line) - - if not start_line: - return None - - # Map SARIF level to GitHub annotation level - sarif_level = result.get("level", "warning") - if sarif_level == "error": - annotation_level = "failure" - elif sarif_level == "warning": - annotation_level = "warning" - else: - annotation_level = "notice" - - message_obj = result.get("message", {}) - message = message_obj.get("text", "") - - annotation = { - "path": path, - "start_line": start_line, - "end_line": end_line, - "annotation_level": annotation_level, - "message": message, - "title": tool_name, - } - - # Add column info if available - start_column = region.get("startColumn") - end_column = region.get("endColumn") - if start_column: - annotation["start_column"] = start_column - if end_column: - annotation["end_column"] = end_column - - return annotation diff --git a/crates/aspect-cli/src/builtins/aspect/tasks/dummy_format.axl b/crates/aspect-cli/src/builtins/aspect/tasks/dummy_format.axl deleted file mode 100644 index 5d6470063..000000000 --- a/crates/aspect-cli/src/builtins/aspect/tasks/dummy_format.axl +++ /dev/null @@ -1,22 +0,0 @@ -""" -A stub 'format' task registered when aspect_rules_lint is not installed. -Prints a helpful message directing the user to install the lint package. -""" - -def _format_impl(ctx: TaskContext) -> int: - ctx.std.io.stderr.write("Error: The format task requires the aspect_rules_lint package.\n") - ctx.std.io.stderr.write("\n") - ctx.std.io.stderr.write("Install it by running:\n") - ctx.std.io.stderr.write("\n") - ctx.std.io.stderr.write(" aspect axl add gh:aspect-build/rules_lint\n") - ctx.std.io.stderr.write("\n") - return 1 - -format = task( - name = "format", - implementation = _format_impl, - description = "Format source code (requires aspect_rules_lint)", - args = { - "all": args.positional(minimum = 0, maximum = 1000) - } -) diff --git a/crates/aspect-cli/src/builtins/aspect/tasks/dummy_lint.axl b/crates/aspect-cli/src/builtins/aspect/tasks/dummy_lint.axl deleted file mode 100644 index 345211053..000000000 --- a/crates/aspect-cli/src/builtins/aspect/tasks/dummy_lint.axl +++ /dev/null @@ -1,22 +0,0 @@ -""" -A stub 'lint' task registered when aspect_rules_lint is not installed. -Prints a helpful message directing the user to install the lint package. -""" - -def _lint_impl(ctx: TaskContext) -> int: - ctx.std.io.stderr.write("Error: The lint task requires the aspect_rules_lint package.\n") - ctx.std.io.stderr.write("\n") - ctx.std.io.stderr.write("Install it by running:\n") - ctx.std.io.stderr.write("\n") - ctx.std.io.stderr.write(" aspect axl add gh:aspect-build/rules_lint\n") - ctx.std.io.stderr.write("\n") - return 1 - -lint = task( - name = "lint", - implementation = _lint_impl, - description = "Run linters (requires aspect_rules_lint)", - args = { - "all": args.positional(minimum = 0, maximum = 1000) - } -) diff --git a/crates/aspect-cli/src/builtins/mod.rs b/crates/aspect-cli/src/builtins/mod.rs index 03dd6533c..8f9873dbc 100644 --- a/crates/aspect-cli/src/builtins/mod.rs +++ b/crates/aspect-cli/src/builtins/mod.rs @@ -26,11 +26,6 @@ const ASPECT: Builtin = Builtin { "config/delivery.axl", include_str!("./aspect/config/delivery.axl"), ), - ("config/lint.axl", include_str!("./aspect/config/lint.axl")), - ( - "config/nolint.axl", - include_str!("./aspect/config/nolint.axl"), - ), ( "config/artifacts.axl", include_str!("./aspect/config/artifacts.axl"), @@ -40,26 +35,15 @@ const ASPECT: Builtin = Builtin { "tasks/delivery.axl", include_str!("./aspect/tasks/delivery.axl"), ), - ( - "tasks/dummy_lint.axl", - include_str!("./aspect/tasks/dummy_lint.axl"), - ), - ( - "tasks/dummy_format.axl", - include_str!("./aspect/tasks/dummy_format.axl"), - ), // lib/ ( "lib/deliveryd.axl", include_str!("./aspect/lib/deliveryd.axl"), ), - ("lib/github.axl", include_str!("./aspect/lib/github.axl")), - ("lib/linting.axl", include_str!("./aspect/lib/linting.axl")), ( "lib/platform.axl", include_str!("./aspect/lib/platform.axl"), ), - ("lib/sarif.axl", include_str!("./aspect/lib/sarif.axl")), ( "lib/health_check.axl", include_str!("./aspect/lib/health_check.axl"), From bf16c48b028a0d27cbdefdcb95bc6d7288ef56e0 Mon Sep 17 00:00:00 2001 From: thesayyn Date: Fri, 27 Feb 2026 15:37:12 -0800 Subject: [PATCH 3/6] healthcheck fragment --- crates/aspect-cli/src/builtins/aspect/build.axl | 17 ++++++++++------- .../src/builtins/aspect/config/builtins.axl | 17 +++++++++-------- .../src/builtins/aspect/config/delivery.axl | 2 +- .../builtins/aspect/{tasks => }/delivery.axl | 16 +++++++++++++--- .../src/builtins/aspect/fragments.axl | 6 +++++- .../src/builtins/aspect/lib/environment.axl | 2 +- .../src/builtins/aspect/lib/health_check.axl | 13 ++++++++++--- crates/aspect-cli/src/builtins/aspect/test.axl | 17 ++++++++++------- crates/aspect-cli/src/builtins/mod.rs | 6 +----- 9 files changed, 60 insertions(+), 36 deletions(-) rename crates/aspect-cli/src/builtins/aspect/{tasks => }/delivery.axl (95%) diff --git a/crates/aspect-cli/src/builtins/aspect/build.axl b/crates/aspect-cli/src/builtins/aspect/build.axl index f85838d21..2dedf4245 100644 --- a/crates/aspect-cli/src/builtins/aspect/build.axl +++ b/crates/aspect-cli/src/builtins/aspect/build.axl @@ -1,7 +1,7 @@ """ A default 'build' task that wraps a 'bazel build' command. """ -load("./fragments.axl", "BazelFragment") +load("./fragments.axl", "BazelFragment", "HealthCheckFragment") def _collect_bes_from_args(ctx): """Collect BES sinks from CLI args (--bes_backend/--bes_header).""" @@ -20,14 +20,16 @@ def _collect_bes_from_args(ctx): return sinks def impl(ctx: TaskContext) -> int: - health = ctx.bazel.health_check() + hc_fragment = ctx.fragments[HealthCheckFragment] fragment = ctx.fragments[BazelFragment] - if fragment.post_health_check: - fragment.post_health_check(ctx, health) + for hook in hc_fragment.pre_health_check: + hook(ctx) - if health.outcome == "unhealthy": - fail("Bazel server is unhealthy: " + health.message) + for hook in hc_fragment.post_health_check: + result = hook(ctx) + if result != None: + fail(result) # Flags: accumulate data, then optionally transform flags = ["--isatty=" + str(int(ctx.std.io.stdout.is_tty))] @@ -89,7 +91,8 @@ def impl(ctx: TaskContext) -> int: build = task( implementation = impl, fragments = [ - BazelFragment + BazelFragment, + HealthCheckFragment, ], args = { "target_pattern": args.positional(minimum = 1, maximum = 512, default = ["..."]), diff --git a/crates/aspect-cli/src/builtins/aspect/config/builtins.axl b/crates/aspect-cli/src/builtins/aspect/config/builtins.axl index b9699c01d..8ffb3a041 100644 --- a/crates/aspect-cli/src/builtins/aspect/config/builtins.axl +++ b/crates/aspect-cli/src/builtins/aspect/config/builtins.axl @@ -7,14 +7,14 @@ load("../lib/platform.axl", "DEFAULT_PLATFORM_DIR" ) load("../lib/health_check.axl", "agent_health_check") -load("../lib/environment.axl", "configure_workflows_env") +load("../lib/environment.axl", "workflows_runner_env") load("../lib/build_metadata.axl", "get_build_metadata_flags") -load("../fragments.axl", "BazelFragment") +load("../fragments.axl", "BazelFragment", "HealthCheckFragment") load("./artifacts.axl", "configure_artifacts") -def _post_health_check(ctx, health): +def _post_health_check(ctx): print("--- :aspect: Agent Health Check") - agent_health_check(ctx, health) + return agent_health_check(ctx) def configure_builtins(ctx: ConfigContext): UNDER_WORKFLOWS = ctx.std.env.var("ASPECT_WORKFLOWS_RUNNER_VERSION") @@ -22,14 +22,15 @@ def configure_builtins(ctx: ConfigContext): if UNDER_WORKFLOWS: if is_buildkite: print("--- :aspect: Workflows Runner Environment") - configure_workflows_env(ctx.std.fs) + workflows_runner_env(ctx.std.fs) # Read platform config from disk platform_config = read_platform_config(ctx.std.fs) # Read host config from environment host_config = read_host_config(ctx.std.env, ctx.std.io) - # Configure BazelFragment globally + # Configure fragments globally bazel_fragment = ctx.fragments[BazelFragment] + hc_fragment = ctx.fragments[HealthCheckFragment] if UNDER_WORKFLOWS: # Generate bazelrc content @@ -59,9 +60,9 @@ def configure_builtins(ctx: ConfigContext): bazel_fragment.build_start.append(lambda ctx, state: print("+++ :bazel: Building")) bazel_fragment.build_event_sinks.extend(bessie_sinks) if is_buildkite: - bazel_fragment.post_health_check = _post_health_check + hc_fragment.post_health_check.append(_post_health_check) else: - bazel_fragment.post_health_check = agent_health_check + hc_fragment.post_health_check.append(agent_health_check) # Wire artifact uploads last so it wraps existing hooks configure_artifacts(ctx) diff --git a/crates/aspect-cli/src/builtins/aspect/config/delivery.axl b/crates/aspect-cli/src/builtins/aspect/config/delivery.axl index 8ba1bb9bb..420b48dd1 100644 --- a/crates/aspect-cli/src/builtins/aspect/config/delivery.axl +++ b/crates/aspect-cli/src/builtins/aspect/config/delivery.axl @@ -1,6 +1,6 @@ """Configures delivery task for Workflows""" -load("../tasks/delivery.axl", "delivery") +load("../delivery.axl", "delivery") load("../fragments.axl", "DeliveryFragment") load("../lib/deliveryd.axl", deliveryd_health = "health") diff --git a/crates/aspect-cli/src/builtins/aspect/tasks/delivery.axl b/crates/aspect-cli/src/builtins/aspect/delivery.axl similarity index 95% rename from crates/aspect-cli/src/builtins/aspect/tasks/delivery.axl rename to crates/aspect-cli/src/builtins/aspect/delivery.axl index 4c5d77696..a77cf629a 100644 --- a/crates/aspect-cli/src/builtins/aspect/tasks/delivery.axl +++ b/crates/aspect-cli/src/builtins/aspect/delivery.axl @@ -7,10 +7,10 @@ to prevent re-delivery. Uses deliveryd (Unix socket HTTP server) for all delivery state operations. """ -load("../fragments.axl", "DeliveryFragment") +load("./fragments.axl", "DeliveryFragment", "HealthCheckFragment") load( - "../lib/deliveryd.axl", + "./lib/deliveryd.axl", deliveryd_query = "query", deliveryd_deliver = "deliver", deliveryd_record = "record", @@ -89,6 +89,16 @@ def _deliver_target(ctx, endpoint, ci_host, workspace, build_url, bazel_flags, l return ("success", "Delivered successfully") def _delivery_impl(ctx): + hc_fragment = ctx.fragments[HealthCheckFragment] + + for hook in hc_fragment.pre_health_check: + hook(ctx) + + for hook in hc_fragment.post_health_check: + result = hook(ctx) + if result != None: + fail(result) + fragment = ctx.fragments[DeliveryFragment] fragment.delivery_start() @@ -224,7 +234,7 @@ def _delivery_impl(ctx): delivery = task( name = "delivery", implementation = _delivery_impl, - fragments = [DeliveryFragment], + fragments = [DeliveryFragment, HealthCheckFragment], args = { "ci_host": args.string(default = "bk"), "commit_sha": args.string(), diff --git a/crates/aspect-cli/src/builtins/aspect/fragments.axl b/crates/aspect-cli/src/builtins/aspect/fragments.axl index 152409a36..7eeee36a1 100644 --- a/crates/aspect-cli/src/builtins/aspect/fragments.axl +++ b/crates/aspect-cli/src/builtins/aspect/fragments.axl @@ -14,13 +14,17 @@ BazelFragment = fragment( startup_flags = attr(typing.Callable[[list[str]], list[str]] | None, None), # Lifecycle hooks — lists of callables, all receive (ctx, state, ...) - post_health_check = attr(typing.Callable | None, None), build_start = attr(list[typing.Callable[[TaskContext, dict], None]], []), build_event = attr(list[typing.Callable[[TaskContext, dict, dict], None]], []), build_retry = attr(typing.Callable[[int], bool], default_retry), build_end = attr(list[typing.Callable[[TaskContext, dict, int], None]], []), ) +HealthCheckFragment = fragment( + pre_health_check = attr(list[typing.Callable[[TaskContext], None]], []), + post_health_check = attr(list[typing.Callable[[TaskContext], str | None]], []), +) + DeliveryFragment = fragment( delivery_start = attr(typing.Callable[[], None], lambda: None), delivery_end = attr(typing.Callable[[], None], lambda: None), diff --git a/crates/aspect-cli/src/builtins/aspect/lib/environment.axl b/crates/aspect-cli/src/builtins/aspect/lib/environment.axl index 18613fa60..bae40eb92 100644 --- a/crates/aspect-cli/src/builtins/aspect/lib/environment.axl +++ b/crates/aspect-cli/src/builtins/aspect/lib/environment.axl @@ -89,7 +89,7 @@ def _print_gcp_info(meta): print("\tCLI: 'gcloud logging read --project " + meta["account_id"] + " \"resource.type=gce_instance resource.labels.instance_id=" + meta["instance_id"] + " log_name=projects/" + meta["account_id"] + "/logs/google_metadata_script_runner\" --format=\"value(jsonPayload.message)\" --freshness=30d | tac'") -def configure_workflows_env(fs): +def workflows_runner_env(fs): """ Print debug/diagnostic information about the runner environment. diff --git a/crates/aspect-cli/src/builtins/aspect/lib/health_check.axl b/crates/aspect-cli/src/builtins/aspect/lib/health_check.axl index 632561777..63d066b05 100644 --- a/crates/aspect-cli/src/builtins/aspect/lib/health_check.axl +++ b/crates/aspect-cli/src/builtins/aspect/lib/health_check.axl @@ -157,20 +157,27 @@ def _display_runner_health(fs): print(data["output"]) -def agent_health_check(ctx, health): +def agent_health_check(ctx): """ - Post health check hook for BazelFragment. + Post health check hook for HealthCheckFragment. Runs the agent health check at the start of every job: 1. Waits for warming to complete 2. Displays the last runner health check + 3. Runs the Bazel health check and displays the result Args: ctx: TaskContext - health: bazel.HealthCheckResult from the built-in health check + + Returns: + None if healthy, or a str error message if the Bazel server is unhealthy. """ fs = ctx.std.fs config = read_platform_config(fs) _wait_for_warming(fs, config) _display_runner_health(fs) + health = ctx.bazel.health_check() _display_bazel_health(health) + if health.outcome == "unhealthy": + return health.message or "Bazel server is unhealthy" + return None diff --git a/crates/aspect-cli/src/builtins/aspect/test.axl b/crates/aspect-cli/src/builtins/aspect/test.axl index 61cb59746..4c112e42e 100644 --- a/crates/aspect-cli/src/builtins/aspect/test.axl +++ b/crates/aspect-cli/src/builtins/aspect/test.axl @@ -1,7 +1,7 @@ """ A default 'test' task that wraps a 'bazel test' command. """ -load("./fragments.axl", "BazelFragment") +load("./fragments.axl", "BazelFragment", "HealthCheckFragment") def _collect_bes_from_args(ctx): """Collect BES sinks from CLI args (--bes_backend/--bes_header).""" @@ -20,14 +20,16 @@ def _collect_bes_from_args(ctx): return sinks def _test_impl(ctx: TaskContext) -> int: - health = ctx.bazel.health_check() + hc_fragment = ctx.fragments[HealthCheckFragment] fragment = ctx.fragments[BazelFragment] - if fragment.post_health_check: - fragment.post_health_check(ctx, health) + for hook in hc_fragment.pre_health_check: + hook(ctx) - if health.outcome == "unhealthy": - fail("Bazel server is unhealthy: " + health.message) + for hook in hc_fragment.post_health_check: + result = hook(ctx) + if result != None: + fail(result) # Flags: accumulate data, then optionally transform flags = ["--isatty=" + str(int(ctx.std.io.stdout.is_tty))] @@ -89,7 +91,8 @@ def _test_impl(ctx: TaskContext) -> int: test = task( implementation = _test_impl, fragments = [ - BazelFragment + BazelFragment, + HealthCheckFragment, ], args = { # TODO: Support a long --pattern_file like bazel does (@./targets) diff --git a/crates/aspect-cli/src/builtins/mod.rs b/crates/aspect-cli/src/builtins/mod.rs index 8f9873dbc..64fdc462d 100644 --- a/crates/aspect-cli/src/builtins/mod.rs +++ b/crates/aspect-cli/src/builtins/mod.rs @@ -16,6 +16,7 @@ const ASPECT: Builtin = Builtin { ("fragments.axl", include_str!("./aspect/fragments.axl")), ("test.axl", include_str!("./aspect/test.axl")), ("axl_add.axl", include_str!("./aspect/axl_add.axl")), + ("delivery.axl", include_str!("./aspect/delivery.axl")), ("MODULE.aspect", include_str!("./aspect/MODULE.aspect")), // config/ ( @@ -30,11 +31,6 @@ const ASPECT: Builtin = Builtin { "config/artifacts.axl", include_str!("./aspect/config/artifacts.axl"), ), - // tasks/ - ( - "tasks/delivery.axl", - include_str!("./aspect/tasks/delivery.axl"), - ), // lib/ ( "lib/deliveryd.axl", From 498b534b295d1e90cc61d4b15989613b39dcca39 Mon Sep 17 00:00:00 2001 From: thesayyn Date: Fri, 27 Feb 2026 19:20:01 -0800 Subject: [PATCH 4/6] cut dependency on platform files --- .buildkite/hooks/pre-command | 17 +- .../src/builtins/aspect/MODULE.aspect | 3 + .../src/builtins/aspect/config/builtins.axl | 50 +-- .../builtins/aspect/lib/build_metadata.axl | 36 +- .../src/builtins/aspect/lib/environment.axl | 393 +++++++++++++++--- .../src/builtins/aspect/lib/health_check.axl | 81 ++-- .../src/builtins/aspect/lib/platform.axl | 234 ----------- crates/aspect-cli/src/builtins/mod.rs | 4 - crates/axl-runtime/Cargo.toml | 1 - 9 files changed, 407 insertions(+), 412 deletions(-) delete mode 100644 crates/aspect-cli/src/builtins/aspect/lib/platform.axl diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command index ef99d8ef7..195c884d1 100755 --- a/.buildkite/hooks/pre-command +++ b/.buildkite/hooks/pre-command @@ -20,26 +20,13 @@ if ! command -v bazel >/dev/null 2>&1; then exit 0 fi -# If the bazel server is busy, the runner is in a bad state — reap it so the autoscaler -# replaces it with a fresh instance. -reap_runner() { - echo "--- Bazel server is unhealthy, reaping runner" - REGION=$(cat /etc/aspect/workflows/platform/region) - INSTANCE_ID=$(cat /etc/aspect/workflows/platform/instance_id) - /etc/aspect/workflows/lib/aws.sh autoscaling set-instance-health \ - --region "${REGION}" \ - --instance-id "${INSTANCE_ID}" \ - --health-status Unhealthy \ - --no-should-respect-grace-period -} - # We use a short timeout so we capture the "Another command (pid=X)" message without # blocking indefinitely. LOCK_OUTPUT=$(timeout 5 bazel $BAZEL_STARTUP_OPTS info 2>&1) || true BUSY_PID=$(echo "$LOCK_OUTPUT" | grep -o '(pid=[0-9]*)' | grep -o '[0-9]*') || true if [ -n "$BUSY_PID" ]; then - reap_runner - exit 78 + /etc/aspect/workflows/bin/signal_instance_unhealthy + exit 78 fi # Build aspect-cli so version.axl can pick it up from bazel-bin/ diff --git a/crates/aspect-cli/src/builtins/aspect/MODULE.aspect b/crates/aspect-cli/src/builtins/aspect/MODULE.aspect index a904782b9..4082814bc 100644 --- a/crates/aspect-cli/src/builtins/aspect/MODULE.aspect +++ b/crates/aspect-cli/src/builtins/aspect/MODULE.aspect @@ -7,3 +7,6 @@ use_config("config/delivery.axl", "configure_delivery") # Configure builtins use_config("config/builtins.axl", "configure_builtins") + +# Configure artifact uploads +use_config("config/artifacts.axl", "configure_artifacts") diff --git a/crates/aspect-cli/src/builtins/aspect/config/builtins.axl b/crates/aspect-cli/src/builtins/aspect/config/builtins.axl index 8ffb3a041..be9d86a1c 100644 --- a/crates/aspect-cli/src/builtins/aspect/config/builtins.axl +++ b/crates/aspect-cli/src/builtins/aspect/config/builtins.axl @@ -1,47 +1,34 @@ """Configures builtin tasks for Workflows""" -load("../lib/platform.axl", - "read_platform_config", - "read_host_config", +load("../lib/environment.axl", + "get_environment", "get_bazelrc_flags", - "DEFAULT_PLATFORM_DIR" + "print_environment_info", ) load("../lib/health_check.axl", "agent_health_check") -load("../lib/environment.axl", "workflows_runner_env") load("../lib/build_metadata.axl", "get_build_metadata_flags") load("../fragments.axl", "BazelFragment", "HealthCheckFragment") -load("./artifacts.axl", "configure_artifacts") - -def _post_health_check(ctx): - print("--- :aspect: Agent Health Check") - return agent_health_check(ctx) def configure_builtins(ctx: ConfigContext): - UNDER_WORKFLOWS = ctx.std.env.var("ASPECT_WORKFLOWS_RUNNER_VERSION") - is_buildkite = bool(ctx.std.env.var("BUILDKITE")) - if UNDER_WORKFLOWS: - if is_buildkite: - print("--- :aspect: Workflows Runner Environment") - workflows_runner_env(ctx.std.fs) - # Read platform config from disk - platform_config = read_platform_config(ctx.std.fs) - # Read host config from environment - host_config = read_host_config(ctx.std.env, ctx.std.io) + environment = get_environment(ctx.std) # Configure fragments globally bazel_fragment = ctx.fragments[BazelFragment] hc_fragment = ctx.fragments[HealthCheckFragment] - if UNDER_WORKFLOWS: - # Generate bazelrc content + if environment: + hc_fragment.pre_health_check.append(lambda ctx: print_environment_info(environment)) + hc_fragment.pre_health_check.append(lambda ctx: agent_health_check(ctx, environment)) + + # Flags to optimize build & test (startup_flags, build_flags) = get_bazelrc_flags( - platform_config = platform_config, - host_config = host_config, - bazel_version = "9.0.0", - root_dir = ctx.std.env.root_dir, + environment = environment, + root_dir = ctx.std.env.root_dir(), ) - bessie_endpoint = platform_config.get("bessie_endpoint", None) + metadata_flags = get_build_metadata_flags(ctx.std) + + bessie_endpoint = environment.build_events.backend bessie_sinks = [] if bessie_endpoint: bessie_sinks.append(bazel.build_events.grpc( @@ -55,14 +42,5 @@ def configure_builtins(ctx: ConfigContext): bazel_fragment.extra_startup_flags.extend(startup_flags) bazel_fragment.extra_flags.extend(build_flags) - metadata_flags = get_build_metadata_flags(ctx.std.env, ctx.std.process, workspace = ".") bazel_fragment.extra_flags.extend(metadata_flags) - bazel_fragment.build_start.append(lambda ctx, state: print("+++ :bazel: Building")) bazel_fragment.build_event_sinks.extend(bessie_sinks) - if is_buildkite: - hc_fragment.post_health_check.append(_post_health_check) - else: - hc_fragment.post_health_check.append(agent_health_check) - - # Wire artifact uploads last so it wraps existing hooks - configure_artifacts(ctx) diff --git a/crates/aspect-cli/src/builtins/aspect/lib/build_metadata.axl b/crates/aspect-cli/src/builtins/aspect/lib/build_metadata.axl index 6f921fec1..7056ff439 100644 --- a/crates/aspect-cli/src/builtins/aspect/lib/build_metadata.axl +++ b/crates/aspect-cli/src/builtins/aspect/lib/build_metadata.axl @@ -219,16 +219,12 @@ def _collect_gitlab(env, meta): meta[k] = v -def get_build_metadata_flags(env, process, workspace, task_id = None, task_name = None): +def get_build_metadata_flags(std): """ Generate --build_metadata=KEY=VALUE flags for Bazel invocations. Args: - env: Environment interface (ctx.std.env) - process: Process interface (ctx.std.process) - workspace: Workspace name (e.g. ".") - task_id: Optional task ID string - task_name: Optional task name string + std: Standard context (ctx.std) Returns: list of --build_metadata=KEY=VALUE strings (empty values are skipped) @@ -236,17 +232,17 @@ def get_build_metadata_flags(env, process, workspace, task_id = None, task_name meta = {} # Step 1: Collect CI host metadata (provides CI-specific fields as fallback) - if env.var("GITHUB_ACTIONS"): - _collect_github(env, meta) - elif env.var("BUILDKITE"): - _collect_buildkite(env, meta) - elif env.var("CIRCLECI"): - _collect_circleci(env, meta) - elif env.var("GITLAB_CI"): - _collect_gitlab(env, meta) + if std.env.var("GITHUB_ACTIONS"): + _collect_github(std.env, meta) + elif std.env.var("BUILDKITE"): + _collect_buildkite(std.env, meta) + elif std.env.var("CIRCLECI"): + _collect_circleci(std.env, meta) + elif std.env.var("GITLAB_CI"): + _collect_gitlab(std.env, meta) # Step 2: Run git show and overwrite commit fields (git is the primary source) - git_data = _git_show(process) + git_data = _git_show(std.process) for k, v in git_data.items(): meta[k] = v @@ -254,15 +250,7 @@ def get_build_metadata_flags(env, process, workspace, task_id = None, task_name if not meta.get("USER") and meta.get("COMMIT_AUTHOR"): meta["USER"] = meta["COMMIT_AUTHOR"] - # Step 4: Set workspace and task context if provided - if workspace: - meta["WORKSPACE"] = workspace - if task_id: - meta["ASPECT_TASK_ID"] = task_id - if task_name: - meta["ASPECT_TASK_NAME"] = task_name - - # Step 5: Build --build_metadata=KEY=VALUE strings, skipping empty values + # Step 4: Build --build_metadata=KEY=VALUE strings, skipping empty values flags = [] for key, value in meta.items(): if value: diff --git a/crates/aspect-cli/src/builtins/aspect/lib/environment.axl b/crates/aspect-cli/src/builtins/aspect/lib/environment.axl index bae40eb92..0a1bc3678 100644 --- a/crates/aspect-cli/src/builtins/aspect/lib/environment.axl +++ b/crates/aspect-cli/src/builtins/aspect/lib/environment.axl @@ -1,20 +1,315 @@ """ -Configure Workflows Environment Library +Workflows Environment Library -Prints debug/diagnostic information about the runner: the Workflows product -version, whether warming is enabled, and cloud platform metadata (region, -instance type, spot/preemptible status, bootstrap log URLs, etc.). - -This is purely informational output for debugging — no side effects. +Reads runner environment from environment variables and exposes it as typed +records. Also provides bazelrc flag generation and host/CI detection. """ -load("./platform.axl", "DEFAULT_PLATFORM_DIR") +DEFAULT_WORKFLOWS_DIR = "/etc/aspect/workflows" +DEFAULT_STORAGE_PATH = "/mnt/ephemeral" +DEFAULT_PLATFORM_DIR = DEFAULT_WORKFLOWS_DIR + "/platform" +DEFAULT_BIN_DIR = DEFAULT_WORKFLOWS_DIR + "/bin" # AWS CloudWatch log group AWS_LOG_GROUP = "/aw/runner/cloud-init/output" -def _url_encode(s): +_ENV_VARS = { + "bessie_endpoint": "ASPECT_WORKFLOWS_BES_BACKEND", + "build_result_ui_base_url": "ASPECT_WORKFLOWS_BES_RESULTS_URL", + "remote_cache_address": "ASPECT_WORKFLOWS_REMOTE_BYTESTREAM_URI_PREFIX", + "remote_cache_endpoint": "ASPECT_WORKFLOWS_REMOTE_CACHE", + "storage_path": "ASPECT_WORKFLOWS_RUNNER_STORAGE_PATH", + "instance_id": "ASPECT_WORKFLOWS_RUNNER_INSTANCE_ID", + "instance_name": "ASPECT_WORKFLOWS_RUNNER_INSTANCE_NAME", + "instance_type": "ASPECT_WORKFLOWS_RUNNER_INSTANCE_TYPE", + "account": "ASPECT_WORKFLOWS_RUNNER_CLOUD_ACCOUNT", + "region": "ASPECT_WORKFLOWS_RUNNER_REGION", + "az": "ASPECT_WORKFLOWS_RUNNER_AZ", + "product_version": "ASPECT_WORKFLOWS_RUNNER_VERSION", +} + +_MARKER_ENV_VARS = { + "preemptible": "ASPECT_WORKFLOWS_RUNNER_PREEMPTIBLE", + "warming_enabled": "ASPECT_WORKFLOWS_RUNNER_WARMING_ENABLED", +} + +_CLOUD_PROVIDER_ENV = "ASPECT_WORKFLOWS_RUNNER_CLOUD_PROVIDER" + +RemoteCache = record( + endpoint = field(str, default = ""), + address = field(str, default = ""), +) + +BuildEvents = record( + backend = field(str, default = ""), + results_url = field(str, default = ""), +) + +Runner = record( + storage_path = field(str, default = DEFAULT_STORAGE_PATH), + product_version = field(str, default = ""), + instance_id = field(str, default = ""), + instance_name = field(str, default = ""), + instance_type = field(str, default = ""), + account = field(str, default = ""), + region = field(str, default = ""), + az = field(str, default = ""), + preemptible = field(bool, default = False), + warming_enabled = field(bool, default = False), + cloud_provider = field(str, default = ""), + # TODO: replace with agent http api call once available + warming_complete = field(bool, default = False), + warming_current_cache = field(str, default = ""), + runner_job_history = field(str, default = ""), + last_health_check = field(str, default = ""), +) + +CI = record( + host = field(str, default = ""), + scm_repo_name = field(str, default = ""), + supports_curses = field(bool, default = False), +) + +Environment = record( + remote_cache = RemoteCache, + build_events = BuildEvents, + runner = Runner, + ci = CI, +) + +# --- Reading environment --- + +def _read_ci(std) -> CI: + ci_host = "" + ci_scm_repo_name = "" + ci_supports_curses = std.io.stdout.is_tty + if std.env.var("BUILDKITE_REPO"): + ci_host = "buildkite" + ci_scm_repo_name = _parse_git_url_name(std.env.var("BUILDKITE_REPO")) + ci_supports_curses = True + elif std.env.var("GITHUB_REPOSITORY"): + ci_host = "github" + repo = std.env.var("GITHUB_REPOSITORY") + ci_scm_repo_name = repo.split("/")[-1] if "/" in repo else repo + elif std.env.var("CIRCLE_PROJECT_REPONAME"): + ci_host = "circleci" + ci_scm_repo_name = std.env.var("CIRCLE_PROJECT_REPONAME") + elif std.env.var("CI_PROJECT_NAME"): + ci_host = "gitlab" + ci_scm_repo_name = std.env.var("CI_PROJECT_NAME") + return CI( + host = ci_host, + scm_repo_name = ci_scm_repo_name, + supports_curses = ci_supports_curses, + ) + + +def _build_environment(config: dict, ci: CI = CI()) -> Environment: + return Environment( + remote_cache = RemoteCache( + endpoint = config.get("remote_cache_endpoint", ""), + address = config.get("remote_cache_address", ""), + ), + build_events = BuildEvents( + backend = config.get("bessie_endpoint", ""), + results_url = config.get("build_result_ui_base_url", ""), + ), + runner = Runner( + storage_path = config.get("storage_path", DEFAULT_STORAGE_PATH), + product_version = config.get("product_version", ""), + instance_id = config.get("instance_id", ""), + instance_name = config.get("instance_name", ""), + instance_type = config.get("instance_type", ""), + account = config.get("account", ""), + region = config.get("region", ""), + az = config.get("az", ""), + preemptible = bool(config.get("preemptible")), + warming_enabled = bool(config.get("warming_enabled")), + cloud_provider = config.get("cloud_provider", ""), + warming_complete = bool(config.get("warming_complete")), + warming_current_cache = config.get("warming_current_cache", ""), + runner_job_history = config.get("runner_job_history"), + last_health_check = config.get("last_health_check", ""), + ), + ci = ci, + ) + + +# TODO: flip legacy default to False once all deployments expose ASPECT_WORKFLOWS_* env vars. +def get_environment(std, legacy: bool = True, platform_dir: str = DEFAULT_PLATFORM_DIR) -> Environment | None: + """ + Build an Environment from environment variables, or from the platform + directory when legacy = True. + + Args: + std: Standard context (ctx.std) + legacy: when True, reads from platform directory files instead of env vars + platform_dir: Path to platform config directory; only used when legacy = True + + Returns: + Environment record + """ + if not std.env.var("ASPECT_WORKFLOWS_RUNNER_VERSION"): + return None + + if legacy: + return legacy_get_environment(std, platform_dir) + + config = {} + + for key, env_var in _ENV_VARS.items(): + value = std.env.var(env_var) + if value: + config[key] = value + + for key, env_var in _MARKER_ENV_VARS.items(): + if std.env.var(env_var): + config[key] = "1" + + cloud_provider = std.env.var(_CLOUD_PROVIDER_ENV) + if cloud_provider: + config["cloud_provider"] = cloud_provider + + if "storage_path" not in config: + config["storage_path"] = DEFAULT_STORAGE_PATH + + return _build_environment(config, _read_ci(std)) + + +def legacy_get_environment(std, platform_dir: str = DEFAULT_PLATFORM_DIR) -> Environment: + """ + Build an Environment by reading from the platform directory. + + Deprecated: prefer get_environment(legacy = False) which reads from + environment variables instead. + + Args: + std: Standard context (ctx.std) + platform_dir: Path to platform config directory + + Returns: + Environment record + """ + config = {} + + for key in _ENV_VARS: + path = platform_dir + "/" + key + if std.fs.exists(path): + content = std.fs.read_to_string(path) + if content: + config[key] = content.strip() + + if "storage_path" not in config: + config["storage_path"] = DEFAULT_STORAGE_PATH + + for key in _MARKER_ENV_VARS: + if std.fs.exists(platform_dir + "/" + key): + config[key] = "1" + + if std.fs.exists(platform_dir + "/aws"): + config["cloud_provider"] = "aws" + elif std.fs.exists(platform_dir + "/gcp"): + config["cloud_provider"] = "gcp" + + # TODO: replace with agent http api call once available + warming_complete = std.fs.exists(platform_dir + "/warming_complete") + if warming_complete: + config["warming_complete"] = True + warming_current_cache_path = DEFAULT_WORKFLOWS_DIR + "/warming_current_cache" + if std.fs.exists(warming_current_cache_path): + config["warming_current_cache"] = std.fs.read_to_string(warming_current_cache_path).strip() + runner_job_history_path = platform_dir + "/runner_job_history" + if std.fs.exists(runner_job_history_path): + config["runner_job_history"] = std.fs.read_to_string(runner_job_history_path) + last_health_check_path = platform_dir + "/last_health_check" + if std.fs.exists(last_health_check_path): + config["last_health_check"] = std.fs.read_to_string(last_health_check_path) + + return _build_environment(config, _read_ci(std)) + + +def is_warming_complete(std, platform_dir: str = DEFAULT_PLATFORM_DIR) -> bool: + """Check whether cache warming has completed, without reading full environment.""" + return std.fs.exists(platform_dir + "/warming_complete") + + +def _parse_git_url_name(url: str) -> str: + if not url: + return None + name = url.rstrip("/") + if name.endswith(".git"): + name = name[:-4] + return name.split("/")[-1].split(":")[-1] + + +def _sanitize_filename(name: str) -> str: + if not name: + return "" + result = "" + for c in name.elems(): + if c.isalnum() or c in "-_.": + result += c + else: + result += "_" + return result + + +def get_bazelrc_flags(environment: Environment, root_dir: str) -> (list, list): + """ + Generate bazelrc flags from platform configuration. + + Args: + environment: Environment from read_environment() + root_dir: absolute path to the workspace root directory + + Returns: + (startup_flags, build_flags): two lists of flag strings + """ + storage_path = environment.runner.storage_path + repo_name = environment.ci.scm_repo_name + subdir = _sanitize_filename(root_dir.rstrip("/").split("/")[-1]) if root_dir else "__main__" + + build_flags = [] + + build_flags.append("--remote_upload_local_results") + build_flags.append("--heap_dump_on_oom") + build_flags.append("--generate_json_trace_profile") + build_flags.append("--experimental_repository_cache_hardlinks") + build_flags.append("--remote_accept_cached") + build_flags.append("--disk_cache=") + build_flags.append("--remote_timeout=3600") + build_flags.append("--remote_retries=360") + build_flags.append("--grpc_keepalive_timeout=30s") + build_flags.append(("--noexperimental_remote_cache_compression", "<8.0.0")) + build_flags.append(("--noremote_cache_compression", ">=8.0.0")) + build_flags.append(("--incompatible_remote_results_ignore_disk", "<7.0.0")) + + if environment.remote_cache.endpoint: + build_flags.append("--remote_cache=" + environment.remote_cache.endpoint) + + if environment.remote_cache.address: + build_flags.append("--remote_bytestream_uri_prefix=" + environment.remote_cache.address) + + build_flags.append("--repository_cache=" + storage_path + "/caches/repository") + + startup_flags = [] + + if repo_name: + sanitized = _sanitize_filename(repo_name) + startup_flags.append("--output_user_root=" + storage_path + "/bazel/" + sanitized + "/" + subdir) + startup_flags.append("--output_base=" + storage_path + "/output/" + sanitized + "/" + subdir) + else: + startup_flags.append("--output_user_root=" + storage_path + "/bazel/" + subdir) + startup_flags.append("--output_base=" + storage_path + "/output/" + subdir) + + return (startup_flags, build_flags) + + + +# --- Display helpers --- + +def _url_encode(s: str) -> str: """Percent-encode a string for use in URLs.""" result = "" for c in s.elems(): @@ -33,73 +328,51 @@ def _url_encode(s): return result -def _read_file(fs, path): - """Read a text file and strip whitespace, or return empty string if missing.""" - if fs.exists(path): - return fs.read_to_string(path).strip() - return "" - - -def _read_platform_metadata(fs): - """Read all platform metadata from files.""" - return { - "region": _read_file(fs, DEFAULT_PLATFORM_DIR + "/region"), - "az": _read_file(fs, DEFAULT_PLATFORM_DIR + "/az"), - "instance_id": _read_file(fs, DEFAULT_PLATFORM_DIR + "/instance_id"), - "instance_name": _read_file(fs, DEFAULT_PLATFORM_DIR + "/instance_name"), - "instance_type": _read_file(fs, DEFAULT_PLATFORM_DIR + "/instance_type"), - "account_id": _read_file(fs, DEFAULT_PLATFORM_DIR + "/account"), - "preemptible": fs.exists(DEFAULT_PLATFORM_DIR + "/preemptible"), - } - - -def _print_workflows_info(fs): +def _print_workflows_info(env: Environment) -> None: """Print Workflows version and warming status.""" - version = _read_file(fs, DEFAULT_PLATFORM_DIR + "/product_version") - warming_enabled = fs.exists(DEFAULT_PLATFORM_DIR + "/warming_enabled") - + if env.ci.host == "buildkite": + print("--- :aspect: Workflows Runner Environment") print("Workflows Information") - print("\tVersion: " + version) - print("\tWarming enabled: " + ("true" if warming_enabled else "false")) + print("\tVersion: " + env.runner.product_version) + print("\tWarming enabled: " + ("true" if env.runner.warming_enabled else "false")) -def _print_aws_info(meta): +def _print_aws_info(env: Environment) -> None: """Print AWS-specific runner information.""" print("AWS Information") - print("\tRegion: " + meta["region"]) - print("\tAvailability Zone: " + meta["az"]) - print("\tAccount ID: " + meta["account_id"]) - print("\tInstance ID: " + meta["instance_id"]) - print("\tInstance Name: " + meta["instance_name"]) - print("\tInstance Type: " + meta["instance_type"]) - print("\tSpot Instance: " + ("yes" if meta["preemptible"] else "no")) - print("\tCLI: 'aws logs tail \"/aw/runner/cloud-init/output\" --log-stream-names \"" + meta["instance_id"] + "\" --since=30d'") + print("\tRegion: " + env.runner.region) + print("\tAvailability Zone: " + env.runner.az) + print("\tAccount ID: " + env.runner.account) + print("\tInstance ID: " + env.runner.instance_id) + print("\tInstance Name: " + env.runner.instance_name) + print("\tInstance Type: " + env.runner.instance_type) + print("\tSpot Instance: " + ("yes" if env.runner.preemptible else "no")) + print("\tCLI: 'aws logs tail \"/aw/runner/cloud-init/output\" --log-stream-names \"" + env.runner.instance_id + "\" --since=30d'") -def _print_gcp_info(meta): +def _print_gcp_info(env: Environment) -> None: """Print GCP-specific runner information.""" print("GCP Information") - print("\tRegion: " + meta["region"]) - print("\tAvailability Zone: " + meta["az"]) - print("\tProject ID: " + meta["account_id"]) - print("\tInstance ID: " + meta["instance_id"]) - print("\tInstance Name: " + meta["instance_name"]) - print("\tInstance Type: " + meta["instance_type"]) - print("\tPreemptible: " + ("yes" if meta["preemptible"] else "no")) - print("\tCLI: 'gcloud logging read --project " + meta["account_id"] + " \"resource.type=gce_instance resource.labels.instance_id=" + meta["instance_id"] + " log_name=projects/" + meta["account_id"] + "/logs/google_metadata_script_runner\" --format=\"value(jsonPayload.message)\" --freshness=30d | tac'") + print("\tRegion: " + env.runner.region) + print("\tAvailability Zone: " + env.runner.az) + print("\tProject ID: " + env.runner.account) + print("\tInstance ID: " + env.runner.instance_id) + print("\tInstance Name: " + env.runner.instance_name) + print("\tInstance Type: " + env.runner.instance_type) + print("\tPreemptible: " + ("yes" if env.runner.preemptible else "no")) + print("\tCLI: 'gcloud logging read --project " + env.runner.account + " \"resource.type=gce_instance resource.labels.instance_id=" + env.runner.instance_id + " log_name=projects/" + env.runner.account + "/logs/google_metadata_script_runner\" --format=\"value(jsonPayload.message)\" --freshness=30d | tac'") -def workflows_runner_env(fs): +def print_environment_info(env: Environment) -> None: """ Print debug/diagnostic information about the runner environment. Args: - fs: Filesystem interface (ctx.std.fs) + env: Environment """ - _print_workflows_info(fs) - meta = _read_platform_metadata(fs) + _print_workflows_info(env) - if fs.exists(DEFAULT_PLATFORM_DIR + "/aws"): - _print_aws_info(meta) - elif fs.exists(DEFAULT_PLATFORM_DIR + "/gcp"): - _print_gcp_info(meta) + if env.runner.cloud_provider == "aws": + _print_aws_info(env) + elif env.runner.cloud_provider == "gcp": + _print_gcp_info(env) diff --git a/crates/aspect-cli/src/builtins/aspect/lib/health_check.axl b/crates/aspect-cli/src/builtins/aspect/lib/health_check.axl index 63d066b05..0ebe5700e 100644 --- a/crates/aspect-cli/src/builtins/aspect/lib/health_check.axl +++ b/crates/aspect-cli/src/builtins/aspect/lib/health_check.axl @@ -10,10 +10,7 @@ job. It does two things: fleet service between jobs and prints its contents. """ -load("./platform.axl", "DEFAULT_WORKFLOWS_DIR", "DEFAULT_PLATFORM_DIR", "read_platform_config") - -# AWS CloudWatch log group -AWS_LOG_GROUP = "/aw/runner/cloud-init/output" +load("./environment.axl", "AWS_LOG_GROUP", "DEFAULT_BIN_DIR", "get_environment", "is_warming_complete") def _url_encode(s): @@ -35,23 +32,23 @@ def _url_encode(s): return result -def _bootstrap_log_url(fs, config): +def _bootstrap_log_url(environment): """ Construct the bootstrap log URL based on the cloud provider. Returns the URL string, or None if the provider cannot be determined. """ - if fs.exists(DEFAULT_PLATFORM_DIR + "/aws"): - return _aws_bootstrap_log_url(config) - elif fs.exists(DEFAULT_PLATFORM_DIR + "/gcp"): - return _gcp_bootstrap_log_url(config) + if environment.cloud_provider == "aws": + return _aws_bootstrap_log_url(environment) + elif environment.cloud_provider == "gcp": + return _gcp_bootstrap_log_url(environment) return None -def _aws_bootstrap_log_url(config): +def _aws_bootstrap_log_url(environment): """Construct AWS CloudWatch Logs URL for bootstrap logs.""" - region = config["region"] - instance_id = config["instance_id"] + region = environment.region + instance_id = environment.instance_id # URL-encode the log group: replace "/" with "%252F" (double-encoded) escaped_log_group = AWS_LOG_GROUP.replace("/", "%252F") @@ -64,10 +61,10 @@ def _aws_bootstrap_log_url(config): ) -def _gcp_bootstrap_log_url(config): +def _gcp_bootstrap_log_url(environment): """Construct GCP Cloud Logging URL for bootstrap logs.""" - instance_id = config["instance_id"] - project_id = config["account"] + instance_id = environment.instance_id + project_id = environment.account query = ( 'resource.type="gce_instance"\n' @@ -83,42 +80,44 @@ def _gcp_bootstrap_log_url(config): ) -def _wait_for_warming(fs, config): +def _wait_for_warming(std, environment): """ Wait for warming to complete and report the result. If warming is not configured, returns immediately. """ # If warming is not configured, skip entirely - if not fs.exists(DEFAULT_PLATFORM_DIR + "/warming_enabled"): - return + if not environment.runner.warming_enabled: + return environment # If warming hasn't completed yet, block until it does. # The bootstrap process runs concurrently. If it hits a critical error, # it terminates the runner — so this loop will not hang indefinitely. - if not fs.exists(DEFAULT_PLATFORM_DIR + "/warming_complete"): + if not environment.runner.warming_complete: print("Waiting for warming to complete...") for _ in forever(1000): - if fs.exists(DEFAULT_PLATFORM_DIR + "/warming_complete"): + if is_warming_complete(std): break - # Report warming result - cache_version_file = DEFAULT_WORKFLOWS_DIR + "/warming_current_cache" - job_history_file = DEFAULT_PLATFORM_DIR + "/runner_job_history" + # Re-read environment to pick up warming result fields. + # TODO: replace with agent http api call once available + environment = get_environment(std) - if fs.exists(cache_version_file): - version = fs.read_to_string(cache_version_file).strip() - print("Runner warmed from cache version: " + version + "\n") - elif fs.exists(job_history_file) and fs.metadata(job_history_file).size == 0: + # Report warming result + if environment.runner.warming_current_cache: + print("Runner warmed from cache version: " + environment.runner.warming_current_cache + "\n") + elif environment.runner.runner_job_history == "": # This is the first job on the runner and warming failed. # On subsequent jobs, previous work has already populated caches, # so no warning is needed. print("Warming was unsuccessful. This first build on this runner will be cold.") - url = _bootstrap_log_url(fs, config) + url = _bootstrap_log_url(environment.runner) if url: print("See bootstrap logs for more details:") print(url + "\n") + return environment + def _display_bazel_health(health): """ @@ -135,7 +134,7 @@ def _display_bazel_health(health): print("\t? bazel health check inconclusive: " + (health.message or "unknown")) -def _display_runner_health(fs): +def _display_runner_health(environment): """ Display the last runner health check results. @@ -143,13 +142,11 @@ def _display_runner_health(fs): """ print("\x1b[1;4;34mRunner Health\x1b[0m") - last_health_check_file = DEFAULT_PLATFORM_DIR + "/last_health_check" - if not fs.exists(last_health_check_file): + if not environment.runner.last_health_check: print("Health check has not yet been run on this runner") return - content = fs.read_to_string(last_health_check_file) - data = json.decode(content) + data = json.decode(environment.runner.last_health_check) # data.timestamp is a unix epoch integer # data.output is a pre-formatted multi-line string @@ -157,7 +154,7 @@ def _display_runner_health(fs): print(data["output"]) -def agent_health_check(ctx): +def agent_health_check(ctx, environment): """ Post health check hook for HealthCheckFragment. @@ -172,12 +169,20 @@ def agent_health_check(ctx): Returns: None if healthy, or a str error message if the Bazel server is unhealthy. """ - fs = ctx.std.fs - config = read_platform_config(fs) - _wait_for_warming(fs, config) - _display_runner_health(fs) + if environment.ci.host == "buildkite": + print("--- :aspect: Runner Health Check") + + environment = _wait_for_warming(ctx.std, environment) + _display_runner_health(environment) health = ctx.bazel.health_check() _display_bazel_health(health) + + if environment.ci.host == "buildkite": + print("--- :bazel: Running %s" % ctx.task.name) + if health.outcome == "unhealthy": + signal_bin = DEFAULT_BIN_DIR + "/signal_instance_unhealthy" + if ctx.std.fs.exists(signal_bin): + ctx.std.process.command(signal_bin).spawn().wait() return health.message or "Bazel server is unhealthy" return None diff --git a/crates/aspect-cli/src/builtins/aspect/lib/platform.axl b/crates/aspect-cli/src/builtins/aspect/lib/platform.axl deleted file mode 100644 index 9dd1a711e..000000000 --- a/crates/aspect-cli/src/builtins/aspect/lib/platform.axl +++ /dev/null @@ -1,234 +0,0 @@ -""" -Platform Configuration and Bazelrc Generation Library - -Pure functions for reading platform/host configuration and generating bazelrc flags. -""" - -DEFAULT_WORKFLOWS_DIR = "/etc/aspect/workflows" -DEFAULT_STORAGE_PATH = "/mnt/ephemeral" -DEFAULT_PLATFORM_DIR = DEFAULT_WORKFLOWS_DIR + "/platform" -DEFAULT_BIN_DIR = DEFAULT_WORKFLOWS_DIR + "/bin" - -PLATFORM_CONFIG_KEYS = { - "remote_cache_endpoint": "remote_cache_endpoint", - "remote_cache_address": "remote_cache_address", - "storage_path": "storage_path", - "bessie_endpoint": "bessie_endpoint", - "build_result_ui_base_url": "build_result_ui_base_url", - "instance_id": "instance_id", - "instance_name": "instance_name", - "account": "account", - "region": "region", -} - - -def read_platform_config(fs, platform_dir = DEFAULT_PLATFORM_DIR): - """ - Read platform configuration from disk. - - Args: - fs: Filesystem interface (ctx.std.fs) - platform_dir: Path to platform config directory - - Returns: - dict with platform config keys - """ - config = {} - - for key, filename in PLATFORM_CONFIG_KEYS.items(): - path = platform_dir + "/" + filename - if fs.exists(path): - content = fs.read_to_string(path) - if content: - config[key] = content.strip() - - tokens_path = platform_dir + "/rosetta_api_tokens" - if fs.exists(tokens_path): - content = fs.read_to_string(tokens_path) - if content: - config["rosetta_api_tokens"] = json.decode(content) - - if "storage_path" not in config: - config["storage_path"] = DEFAULT_STORAGE_PATH - - return config - - -def read_warming_config(fs, platform_dir = DEFAULT_PLATFORM_DIR): - """ - Read warming-specific configuration from platform config files. - - Args: - fs: Filesystem interface (ctx.std.fs) - platform_dir: Path to platform config directory - - Returns: - dict with optional keys: warming_bucket, warming_additional_paths - """ - config = {} - - bucket_path = platform_dir + "/warming_bucket" - if fs.exists(bucket_path): - content = fs.read_to_string(bucket_path) - if content: - config["warming_bucket"] = content.strip() - - paths_path = platform_dir + "/warming_additional_paths" - if fs.exists(paths_path): - content = fs.read_to_string(paths_path) - if content: - config["warming_additional_paths"] = content.strip() - - return config - - -def read_host_config(env, io): - """ - Read host/CI configuration from environment. - - Args: - env: Environment interface (ctx.std.env) - io: IO interface (ctx.std.io) - - Returns: - dict with keys: supports_curses, scm_repo_name, ci_host - """ - config = { - "supports_curses": io.stdout.is_tty, - "scm_repo_name": None, - "ci_host": None, - } - - if env.var("BUILDKITE_REPO"): - config["ci_host"] = "buildkite" - config["scm_repo_name"] = _parse_git_url_name(env.var("BUILDKITE_REPO")) - config["supports_curses"] = True - elif env.var("GITHUB_REPOSITORY"): - config["ci_host"] = "github" - repo = env.var("GITHUB_REPOSITORY") - config["scm_repo_name"] = repo.split("/")[-1] if "/" in repo else repo - elif env.var("CIRCLE_PROJECT_REPONAME"): - config["ci_host"] = "circleci" - config["scm_repo_name"] = env.var("CIRCLE_PROJECT_REPONAME") - elif env.var("CI_PROJECT_NAME"): - config["ci_host"] = "gitlab" - config["scm_repo_name"] = env.var("CI_PROJECT_NAME") - - return config - - -def _parse_git_url_name(url): - if not url: - return None - name = url.rstrip("/") - if name.endswith(".git"): - name = name[:-4] - return name.split("/")[-1].split(":")[-1] - - -def parse_version(version_str): - parts = version_str.split(".") - major = int(parts[0]) if len(parts) > 0 else 0 - minor = int(parts[1]) if len(parts) > 1 else 0 - patch_str = parts[2].split("-")[0] if len(parts) > 2 else "0" - patch = int(patch_str) if patch_str else 0 - return (major, minor, patch) - - -def version_satisfies(version, constraint): - if not version or constraint == "*": - return True - - v = parse_version(version) - parts = constraint.split() - for i in range(0, len(parts), 2): - if i + 1 >= len(parts): - break - op = parts[i] - target = parse_version(parts[i + 1]) - - if op == "<" and not (v < target): - return False - elif op == "<=" and not (v <= target): - return False - elif op == ">" and not (v > target): - return False - elif op == ">=" and not (v >= target): - return False - elif op == "=" and v != target: - return False - - return True - - -def _sanitize_filename(name): - if not name: - return "" - result = "" - for c in name.elems(): - if c.isalnum() or c in "-_.": - result += c - else: - result += "_" - return result - - -def get_bazelrc_flags(platform_config, host_config, bazel_version = None, root_dir = None): - """ - Generate bazelrc flags from platform and host configuration. - - Args: - platform_config: dict from read_platform_config() - host_config: dict from read_host_config() - bazel_version: str like "7.0.0" or None - root_dir: absolute path to the workspace root directory - - Returns: - (startup_flags, build_flags): two lists of flag strings - """ - storage_path = platform_config.get("storage_path", DEFAULT_STORAGE_PATH) - repo_name = host_config.get("scm_repo_name") - subdir = _sanitize_filename(root_dir.rstrip("/").split("/")[-1]) if root_dir else "__main__" - - build_flags = [] - - build_flags.append("--remote_upload_local_results") - build_flags.append("--heap_dump_on_oom") - build_flags.append("--generate_json_trace_profile") - build_flags.append("--experimental_repository_cache_hardlinks") - build_flags.append("--remote_accept_cached") - - if version_satisfies(bazel_version, "< 7"): - build_flags.append("--incompatible_remote_results_ignore_disk") - - build_flags.append("--disk_cache=") - build_flags.append("--remote_timeout=3600") - build_flags.append("--remote_retries=360") - build_flags.append("--grpc_keepalive_timeout=30s") - - if version_satisfies(bazel_version, "< 8"): - build_flags.append("--noexperimental_remote_cache_compression") - else: - build_flags.append("--noremote_cache_compression") - - remote_cache_endpoint = platform_config.get("remote_cache_endpoint") - if remote_cache_endpoint: - build_flags.append("--remote_cache=" + remote_cache_endpoint) - - remote_cache_address = platform_config.get("remote_cache_address") - if remote_cache_address: - build_flags.append("--remote_bytestream_uri_prefix=" + remote_cache_address) - - build_flags.append("--repository_cache=" + storage_path + "/caches/repository") - - startup_flags = [] - - if repo_name: - sanitized = _sanitize_filename(repo_name) - startup_flags.append("--output_user_root=" + storage_path + "/bazel/" + sanitized + "/" + subdir) - startup_flags.append("--output_base=" + storage_path + "/output/" + sanitized + "/" + subdir) - else: - startup_flags.append("--output_user_root=" + storage_path + "/bazel/" + subdir) - startup_flags.append("--output_base=" + storage_path + "/output/" + subdir) - - return (startup_flags, build_flags) diff --git a/crates/aspect-cli/src/builtins/mod.rs b/crates/aspect-cli/src/builtins/mod.rs index 64fdc462d..c80651c4e 100644 --- a/crates/aspect-cli/src/builtins/mod.rs +++ b/crates/aspect-cli/src/builtins/mod.rs @@ -36,10 +36,6 @@ const ASPECT: Builtin = Builtin { "lib/deliveryd.axl", include_str!("./aspect/lib/deliveryd.axl"), ), - ( - "lib/platform.axl", - include_str!("./aspect/lib/platform.axl"), - ), ( "lib/health_check.axl", include_str!("./aspect/lib/health_check.axl"), diff --git a/crates/axl-runtime/Cargo.toml b/crates/axl-runtime/Cargo.toml index cd790570b..6c1e1693e 100644 --- a/crates/axl-runtime/Cargo.toml +++ b/crates/axl-runtime/Cargo.toml @@ -61,7 +61,6 @@ dirs = "6.0.0" fibre = "0.5.0" flate2 = "1.1.2" rand = "0.8.5" -semver = "1" sha256 = "1.6.0" ssri = "9.2.0" base64 = "0.22.1" From ea7312c6c4f089a206c7aa2ebc085907f7240e80 Mon Sep 17 00:00:00 2001 From: thesayyn Date: Sun, 1 Mar 2026 13:03:24 -0800 Subject: [PATCH 5/6] cut platform directory dependency completely --- .../src/builtins/aspect/lib/environment.axl | 200 ++++++------------ .../src/builtins/aspect/lib/health_check.axl | 6 +- 2 files changed, 65 insertions(+), 141 deletions(-) diff --git a/crates/aspect-cli/src/builtins/aspect/lib/environment.axl b/crates/aspect-cli/src/builtins/aspect/lib/environment.axl index 0a1bc3678..9c6c7b98b 100644 --- a/crates/aspect-cli/src/builtins/aspect/lib/environment.axl +++ b/crates/aspect-cli/src/builtins/aspect/lib/environment.axl @@ -5,37 +5,13 @@ Reads runner environment from environment variables and exposes it as typed records. Also provides bazelrc flag generation and host/CI detection. """ -DEFAULT_WORKFLOWS_DIR = "/etc/aspect/workflows" +# Default storage path on Workflows Runners DEFAULT_STORAGE_PATH = "/mnt/ephemeral" -DEFAULT_PLATFORM_DIR = DEFAULT_WORKFLOWS_DIR + "/platform" -DEFAULT_BIN_DIR = DEFAULT_WORKFLOWS_DIR + "/bin" # AWS CloudWatch log group AWS_LOG_GROUP = "/aw/runner/cloud-init/output" -_ENV_VARS = { - "bessie_endpoint": "ASPECT_WORKFLOWS_BES_BACKEND", - "build_result_ui_base_url": "ASPECT_WORKFLOWS_BES_RESULTS_URL", - "remote_cache_address": "ASPECT_WORKFLOWS_REMOTE_BYTESTREAM_URI_PREFIX", - "remote_cache_endpoint": "ASPECT_WORKFLOWS_REMOTE_CACHE", - "storage_path": "ASPECT_WORKFLOWS_RUNNER_STORAGE_PATH", - "instance_id": "ASPECT_WORKFLOWS_RUNNER_INSTANCE_ID", - "instance_name": "ASPECT_WORKFLOWS_RUNNER_INSTANCE_NAME", - "instance_type": "ASPECT_WORKFLOWS_RUNNER_INSTANCE_TYPE", - "account": "ASPECT_WORKFLOWS_RUNNER_CLOUD_ACCOUNT", - "region": "ASPECT_WORKFLOWS_RUNNER_REGION", - "az": "ASPECT_WORKFLOWS_RUNNER_AZ", - "product_version": "ASPECT_WORKFLOWS_RUNNER_VERSION", -} - -_MARKER_ENV_VARS = { - "preemptible": "ASPECT_WORKFLOWS_RUNNER_PREEMPTIBLE", - "warming_enabled": "ASPECT_WORKFLOWS_RUNNER_WARMING_ENABLED", -} - -_CLOUD_PROVIDER_ENV = "ASPECT_WORKFLOWS_RUNNER_CLOUD_PROVIDER" - RemoteCache = record( endpoint = field(str, default = ""), address = field(str, default = ""), @@ -58,6 +34,8 @@ Runner = record( preemptible = field(bool, default = False), warming_enabled = field(bool, default = False), cloud_provider = field(str, default = ""), + data_dir = field(str, default = ""), + has_nvme_storage = field(bool, default = False), # TODO: replace with agent http api call once available warming_complete = field(bool, default = False), warming_current_cache = field(str, default = ""), @@ -105,133 +83,77 @@ def _read_ci(std) -> CI: ) -def _build_environment(config: dict, ci: CI = CI()) -> Environment: - return Environment( - remote_cache = RemoteCache( - endpoint = config.get("remote_cache_endpoint", ""), - address = config.get("remote_cache_address", ""), - ), - build_events = BuildEvents( - backend = config.get("bessie_endpoint", ""), - results_url = config.get("build_result_ui_base_url", ""), - ), - runner = Runner( - storage_path = config.get("storage_path", DEFAULT_STORAGE_PATH), - product_version = config.get("product_version", ""), - instance_id = config.get("instance_id", ""), - instance_name = config.get("instance_name", ""), - instance_type = config.get("instance_type", ""), - account = config.get("account", ""), - region = config.get("region", ""), - az = config.get("az", ""), - preemptible = bool(config.get("preemptible")), - warming_enabled = bool(config.get("warming_enabled")), - cloud_provider = config.get("cloud_provider", ""), - warming_complete = bool(config.get("warming_complete")), - warming_current_cache = config.get("warming_current_cache", ""), - runner_job_history = config.get("runner_job_history"), - last_health_check = config.get("last_health_check", ""), - ), - ci = ci, - ) - - -# TODO: flip legacy default to False once all deployments expose ASPECT_WORKFLOWS_* env vars. -def get_environment(std, legacy: bool = True, platform_dir: str = DEFAULT_PLATFORM_DIR) -> Environment | None: +def get_environment(std) -> Environment | None: """ - Build an Environment from environment variables, or from the platform - directory when legacy = True. + Build an Environment from environment variables. Args: std: Standard context (ctx.std) - legacy: when True, reads from platform directory files instead of env vars - platform_dir: Path to platform config directory; only used when legacy = True Returns: - Environment record + Environment record, or None if not running on a Workflows runner """ - if not std.env.var("ASPECT_WORKFLOWS_RUNNER_VERSION"): + if not std.env.var("ASPECT_WORKFLOWS_RUNNER"): return None - if legacy: - return legacy_get_environment(std, platform_dir) - - config = {} - - for key, env_var in _ENV_VARS.items(): - value = std.env.var(env_var) - if value: - config[key] = value - - for key, env_var in _MARKER_ENV_VARS.items(): - if std.env.var(env_var): - config[key] = "1" - - cloud_provider = std.env.var(_CLOUD_PROVIDER_ENV) - if cloud_provider: - config["cloud_provider"] = cloud_provider - - if "storage_path" not in config: - config["storage_path"] = DEFAULT_STORAGE_PATH - - return _build_environment(config, _read_ci(std)) - - -def legacy_get_environment(std, platform_dir: str = DEFAULT_PLATFORM_DIR) -> Environment: - """ - Build an Environment by reading from the platform directory. - - Deprecated: prefer get_environment(legacy = False) which reads from - environment variables instead. - - Args: - std: Standard context (ctx.std) - platform_dir: Path to platform config directory - - Returns: - Environment record - """ - config = {} - - for key in _ENV_VARS: - path = platform_dir + "/" + key - if std.fs.exists(path): - content = std.fs.read_to_string(path) - if content: - config[key] = content.strip() - - if "storage_path" not in config: - config["storage_path"] = DEFAULT_STORAGE_PATH + # Read dynamic state from files at env-var-provided paths + # TODO: replace with agent http api call once available + warming_complete = False + warming_complete_path = std.env.var("ASPECT_WORKFLOWS_RUNNER_WARMING_COMPLETE_MARKER_FILE") + if warming_complete_path and std.fs.exists(warming_complete_path): + warming_complete = True + + warming_current_cache = "" + cache_path = std.env.var("ASPECT_WORKFLOWS_RUNNER_WARMING_CACHE_VERSION_FILE") + if cache_path and std.fs.exists(cache_path): + warming_current_cache = std.fs.read_to_string(cache_path).strip() + + runner_job_history = "" + job_history_path = std.env.var("ASPECT_WORKFLOWS_RUNNER_JOB_HISTORY_FILE") + if job_history_path and std.fs.exists(job_history_path): + runner_job_history = std.fs.read_to_string(job_history_path) + + last_health_check = "" + last_hc_path = std.env.var("ASPECT_WORKFLOWS_RUNNER_LAST_HEALTH_CHECK_FILE") + if last_hc_path and std.fs.exists(last_hc_path): + last_health_check = std.fs.read_to_string(last_hc_path) - for key in _MARKER_ENV_VARS: - if std.fs.exists(platform_dir + "/" + key): - config[key] = "1" + return Environment( + remote_cache = RemoteCache( + endpoint = std.env.var("ASPECT_WORKFLOWS_REMOTE_CACHE") or "", + address = std.env.var("ASPECT_WORKFLOWS_REMOTE_BYTESTREAM_URI_PREFIX") or "", + ), + build_events = BuildEvents( + backend = std.env.var("ASPECT_WORKFLOWS_BES_BACKEND") or "", + results_url = std.env.var("ASPECT_WORKFLOWS_BES_RESULTS_URL") or "", + ), + runner = Runner( + storage_path = std.env.var("ASPECT_WORKFLOWS_RUNNER_STORAGE_PATH") or DEFAULT_STORAGE_PATH, + product_version = std.env.var("ASPECT_WORKFLOWS_RUNNER_VERSION") or "", + instance_id = std.env.var("ASPECT_WORKFLOWS_RUNNER_INSTANCE_ID") or "", + instance_name = std.env.var("ASPECT_WORKFLOWS_RUNNER_INSTANCE_NAME") or "", + instance_type = std.env.var("ASPECT_WORKFLOWS_RUNNER_INSTANCE_TYPE") or "", + account = std.env.var("ASPECT_WORKFLOWS_RUNNER_CLOUD_ACCOUNT") or "", + region = std.env.var("ASPECT_WORKFLOWS_RUNNER_REGION") or "", + az = std.env.var("ASPECT_WORKFLOWS_RUNNER_AZ") or "", + preemptible = bool(std.env.var("ASPECT_WORKFLOWS_RUNNER_PREEMPTIBLE")), + warming_enabled = bool(std.env.var("ASPECT_WORKFLOWS_RUNNER_WARMING_ENABLED")), + cloud_provider = std.env.var("ASPECT_WORKFLOWS_RUNNER_CLOUD_PROVIDER") or "", + data_dir = std.env.var("ASPECT_WORKFLOWS_RUNNER_DATA_DIR") or "", + has_nvme_storage = bool(std.env.var("ASPECT_WORKFLOWS_RUNNER_HAS_NVME_STORAGE")), + warming_complete = warming_complete, + warming_current_cache = warming_current_cache, + runner_job_history = runner_job_history, + last_health_check = last_health_check, + ), + ci = _read_ci(std), + ) - if std.fs.exists(platform_dir + "/aws"): - config["cloud_provider"] = "aws" - elif std.fs.exists(platform_dir + "/gcp"): - config["cloud_provider"] = "gcp" - # TODO: replace with agent http api call once available - warming_complete = std.fs.exists(platform_dir + "/warming_complete") - if warming_complete: - config["warming_complete"] = True - warming_current_cache_path = DEFAULT_WORKFLOWS_DIR + "/warming_current_cache" - if std.fs.exists(warming_current_cache_path): - config["warming_current_cache"] = std.fs.read_to_string(warming_current_cache_path).strip() - runner_job_history_path = platform_dir + "/runner_job_history" - if std.fs.exists(runner_job_history_path): - config["runner_job_history"] = std.fs.read_to_string(runner_job_history_path) - last_health_check_path = platform_dir + "/last_health_check" - if std.fs.exists(last_health_check_path): - config["last_health_check"] = std.fs.read_to_string(last_health_check_path) - - return _build_environment(config, _read_ci(std)) - - -def is_warming_complete(std, platform_dir: str = DEFAULT_PLATFORM_DIR) -> bool: +def is_warming_complete(std) -> bool: """Check whether cache warming has completed, without reading full environment.""" - return std.fs.exists(platform_dir + "/warming_complete") + path = std.env.var("ASPECT_WORKFLOWS_RUNNER_WARMING_COMPLETE_MARKER_FILE") + return bool(path) and std.fs.exists(path) def _parse_git_url_name(url: str) -> str: diff --git a/crates/aspect-cli/src/builtins/aspect/lib/health_check.axl b/crates/aspect-cli/src/builtins/aspect/lib/health_check.axl index 0ebe5700e..83963f410 100644 --- a/crates/aspect-cli/src/builtins/aspect/lib/health_check.axl +++ b/crates/aspect-cli/src/builtins/aspect/lib/health_check.axl @@ -10,7 +10,9 @@ job. It does two things: fleet service between jobs and prints its contents. """ -load("./environment.axl", "AWS_LOG_GROUP", "DEFAULT_BIN_DIR", "get_environment", "is_warming_complete") +load("./environment.axl", "AWS_LOG_GROUP", "get_environment", "is_warming_complete") + +_DEFAULT_BIN_DIR = "/etc/aspect/workflows/bin" def _url_encode(s): @@ -181,7 +183,7 @@ def agent_health_check(ctx, environment): print("--- :bazel: Running %s" % ctx.task.name) if health.outcome == "unhealthy": - signal_bin = DEFAULT_BIN_DIR + "/signal_instance_unhealthy" + signal_bin = _DEFAULT_BIN_DIR + "/signal_instance_unhealthy" if ctx.std.fs.exists(signal_bin): ctx.std.process.command(signal_bin).spawn().wait() return health.message or "Bazel server is unhealthy" From 9cd4b47a92f764683898cb5c12690af20130ced7 Mon Sep 17 00:00:00 2001 From: thesayyn Date: Sun, 1 Mar 2026 13:26:29 -0800 Subject: [PATCH 6/6] Update artifacts.axl --- crates/aspect-cli/src/builtins/aspect/config/artifacts.axl | 2 +- crates/aspect-cli/src/builtins/aspect/lib/artifacts.axl | 4 +++- crates/axl-runtime/src/module/disk_store.rs | 1 + 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/crates/aspect-cli/src/builtins/aspect/config/artifacts.axl b/crates/aspect-cli/src/builtins/aspect/config/artifacts.axl index be4b40526..d9be4b2f8 100644 --- a/crates/aspect-cli/src/builtins/aspect/config/artifacts.axl +++ b/crates/aspect-cli/src/builtins/aspect/config/artifacts.axl @@ -157,7 +157,7 @@ def _upload_testlogs_buildkite(ctx, group, entries): ctx.std.process.command("ln").args(["-s", entry["src"], dest_path]).spawn().wait() child = ctx.std.process.command("buildkite-agent") \ - .args(["artifact", "upload", "**/*"]) \ + .args(["artifact", "upload", "**/*", "--log-level=warn"]) \ .current_dir(tmp_dir) \ .stdout("inherit") \ .stderr("inherit") \ diff --git a/crates/aspect-cli/src/builtins/aspect/lib/artifacts.axl b/crates/aspect-cli/src/builtins/aspect/lib/artifacts.axl index 219ec5ba7..1f3cdc7d1 100644 --- a/crates/aspect-cli/src/builtins/aspect/lib/artifacts.axl +++ b/crates/aspect-cli/src/builtins/aspect/lib/artifacts.axl @@ -239,8 +239,10 @@ def _upload_file_buildkite(ctx, path, name): if needs_copy: ctx.std.process.command("cp").args([path, dir + "/" + name]).spawn().wait() + # --literal: Do not treat paths are globs + # --log-level=warn: Do not spam stdout with garbage. only real failures. child = ctx.std.process.command("buildkite-agent") \ - .args(["artifact", "upload", name]) \ + .args(["artifact", "upload", name, "--literal", "--log-level=warn"]) \ .current_dir(dir) \ .stdout("inherit") \ .stderr("inherit") \ diff --git a/crates/axl-runtime/src/module/disk_store.rs b/crates/axl-runtime/src/module/disk_store.rs index c1b17ecd5..8100ab33d 100644 --- a/crates/axl-runtime/src/module/disk_store.rs +++ b/crates/axl-runtime/src/module/disk_store.rs @@ -375,6 +375,7 @@ impl DiskStore { } } + module_roots.sort_by(|a, b| a.0.cmp(&b.0)); Ok(module_roots) } }