diff --git a/.github/workflows/windows-render.yml b/.github/workflows/windows-render.yml index f5e8f19f1..6c6be113d 100644 --- a/.github/workflows/windows-render.yml +++ b/.github/workflows/windows-render.yml @@ -353,6 +353,11 @@ jobs: # invocations, CRLF, file URLs, etc.) in existing vitest suites. # The producer package is skipped because its tests require Docker / # Linux-only tooling (Dockerfile.test, LFS golden MP4 baselines). + # The aws-lambda package is skipped because it targets the AWS Lambda + # Linux runtime exclusively (`@sparticuz/chromium` is Linux-only; the + # ZIP layout is for `/var/task` on AL2023). Its handler.test.ts also + # trips a bun-on-Windows workspace-symlink quirk reading the + # producer's transitive `hono` dep. # ------------------------------------------------------------------- test-windows: name: Tests on windows-latest @@ -407,9 +412,15 @@ jobs: shell: pwsh run: bun run build - - name: Run tests (all packages except producer) + - name: Run tests (all packages except producer + aws-lambda) shell: pwsh - run: bun run --filter "!@hyperframes/producer" test + # Enumerate the packages we want to test instead of negating — + # `bun run --filter "!a" --filter "!b"` composes as a union (any + # package matching either negation runs), not an intersection. + # That meant `@hyperframes/producer` was effectively still being + # tested on Windows, which is what we explicitly want skipped + # (Docker / LFS-baseline tooling). + run: bun run --filter @hyperframes/core --filter @hyperframes/engine --filter @hyperframes/player --filter @hyperframes/cli --filter @hyperframes/studio --filter @hyperframes/shader-transitions test - name: Run runtime contract test shell: pwsh diff --git a/Dockerfile.test b/Dockerfile.test index e0b04dcab..a20d795b1 100644 --- a/Dockerfile.test +++ b/Dockerfile.test @@ -73,6 +73,7 @@ COPY packages/producer/package.json packages/producer/package.json COPY packages/cli/package.json packages/cli/package.json COPY packages/studio/package.json packages/studio/package.json COPY packages/shader-transitions/package.json packages/shader-transitions/package.json +COPY packages/aws-lambda/package.json packages/aws-lambda/package.json RUN bun install --frozen-lockfile # Copy source diff --git a/bun.lock b/bun.lock index 086091b54..f4d49111f 100644 --- a/bun.lock +++ b/bun.lock @@ -19,9 +19,30 @@ "typescript": "^5.0.0", }, }, + "packages/aws-lambda": { + "name": "@hyperframes/aws-lambda", + "version": "0.0.1", + "dependencies": { + "@aws-sdk/client-s3": "^3.700.0", + "@hyperframes/producer": "workspace:^", + "@sparticuz/chromium": "148.0.0", + "ffmpeg-static": "^5.2.0", + "ffprobe-static": "^3.1.0", + "puppeteer-core": "^24.39.1", + "tar": "^7.4.3", + }, + "devDependencies": { + "@types/aws-lambda": "^8.10.146", + "@types/node": "^25.0.10", + "@types/tar": "^6.1.13", + "esbuild": "^0.25.12", + "tsx": "^4.21.0", + "typescript": "^5.7.2", + }, + }, "packages/cli": { "name": "@hyperframes/cli", - "version": "0.6.2", + "version": "0.6.7", "bin": { "hyperframes": "./dist/cli.js", }, @@ -64,7 +85,7 @@ }, "packages/core": { "name": "@hyperframes/core", - "version": "0.6.2", + "version": "0.6.7", "dependencies": { "@chenglou/pretext": "^0.0.5", "postcss": "^8.5.8", @@ -91,7 +112,7 @@ }, "packages/engine": { "name": "@hyperframes/engine", - "version": "0.6.2", + "version": "0.6.7", "dependencies": { "@hono/node-server": "^1.13.0", "@hyperframes/core": "workspace:^", @@ -109,7 +130,7 @@ }, "packages/player": { "name": "@hyperframes/player", - "version": "0.6.2", + "version": "0.6.7", "devDependencies": { "@types/bun": "^1.1.0", "gsap": "^3.12.5", @@ -121,7 +142,7 @@ }, "packages/producer": { "name": "@hyperframes/producer", - "version": "0.6.2", + "version": "0.6.7", "dependencies": { "@fontsource/archivo-black": "^5.2.8", "@fontsource/eb-garamond": "^5.2.7", @@ -160,7 +181,7 @@ }, "packages/shader-transitions": { "name": "@hyperframes/shader-transitions", - "version": "0.6.2", + "version": "0.6.7", "dependencies": { "html2canvas": "^1.4.1", }, @@ -172,7 +193,7 @@ }, "packages/studio": { "name": "@hyperframes/studio", - "version": "0.6.2", + "version": "0.6.7", "dependencies": { "@codemirror/autocomplete": "^6.20.1", "@codemirror/commands": "^6.10.3", @@ -230,6 +251,68 @@ "@asamuzakjp/nwsapi": ["@asamuzakjp/nwsapi@2.3.9", "", {}, "sha512-n8GuYSrI9bF7FFZ/SjhwevlHc8xaVlb/7HmHelnc/PZXBD2ZR49NnN9sMMuDdEGPeeRQ5d0hqlSlEpgCX3Wl0Q=="], + "@aws-crypto/crc32": ["@aws-crypto/crc32@5.2.0", "", { "dependencies": { "@aws-crypto/util": "^5.2.0", "@aws-sdk/types": "^3.222.0", "tslib": "^2.6.2" } }, "sha512-nLbCWqQNgUiwwtFsen1AdzAtvuLRsQS8rYgMuxCrdKf9kOssamGLuPwyTY9wyYblNr9+1XM8v6zoDTPPSIeANg=="], + + "@aws-crypto/crc32c": ["@aws-crypto/crc32c@5.2.0", "", { "dependencies": { "@aws-crypto/util": "^5.2.0", "@aws-sdk/types": "^3.222.0", "tslib": "^2.6.2" } }, "sha512-+iWb8qaHLYKrNvGRbiYRHSdKRWhto5XlZUEBwDjYNf+ly5SVYG6zEoYIdxvf5R3zyeP16w4PLBn3rH1xc74Rag=="], + + "@aws-crypto/sha1-browser": ["@aws-crypto/sha1-browser@5.2.0", "", { "dependencies": { "@aws-crypto/supports-web-crypto": "^5.2.0", "@aws-crypto/util": "^5.2.0", "@aws-sdk/types": "^3.222.0", "@aws-sdk/util-locate-window": "^3.0.0", "@smithy/util-utf8": "^2.0.0", "tslib": "^2.6.2" } }, "sha512-OH6lveCFfcDjX4dbAvCFSYUjJZjDr/3XJ3xHtjn3Oj5b9RjojQo8npoLeA/bNwkOkrSQ0wgrHzXk4tDRxGKJeg=="], + + "@aws-crypto/sha256-browser": ["@aws-crypto/sha256-browser@5.2.0", "", { "dependencies": { "@aws-crypto/sha256-js": "^5.2.0", "@aws-crypto/supports-web-crypto": "^5.2.0", "@aws-crypto/util": "^5.2.0", "@aws-sdk/types": "^3.222.0", "@aws-sdk/util-locate-window": "^3.0.0", "@smithy/util-utf8": "^2.0.0", "tslib": "^2.6.2" } }, "sha512-AXfN/lGotSQwu6HNcEsIASo7kWXZ5HYWvfOmSNKDsEqC4OashTp8alTmaz+F7TC2L083SFv5RdB+qU3Vs1kZqw=="], + + "@aws-crypto/sha256-js": ["@aws-crypto/sha256-js@5.2.0", "", { "dependencies": { "@aws-crypto/util": "^5.2.0", "@aws-sdk/types": "^3.222.0", "tslib": "^2.6.2" } }, "sha512-FFQQyu7edu4ufvIZ+OadFpHHOt+eSTBaYaki44c+akjg7qZg9oOQeLlk77F6tSYqjDAFClrHJk9tMf0HdVyOvA=="], + + "@aws-crypto/supports-web-crypto": ["@aws-crypto/supports-web-crypto@5.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-iAvUotm021kM33eCdNfwIN//F77/IADDSs58i+MDaOqFrVjZo9bAal0NK7HurRuWLLpF1iLX7gbWrjHjeo+YFg=="], + + "@aws-crypto/util": ["@aws-crypto/util@5.2.0", "", { "dependencies": { "@aws-sdk/types": "^3.222.0", "@smithy/util-utf8": "^2.0.0", "tslib": "^2.6.2" } }, "sha512-4RkU9EsI6ZpBve5fseQlGNUWKMa1RLPQ1dnjnQoe07ldfIzcsGb5hC5W0Dm7u423KWzawlrpbjXBrXCEv9zazQ=="], + + "@aws-sdk/client-s3": ["@aws-sdk/client-s3@3.1048.0", "", { "dependencies": { "@aws-crypto/sha1-browser": "5.2.0", "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.974.11", "@aws-sdk/credential-provider-node": "^3.972.42", "@aws-sdk/middleware-bucket-endpoint": "^3.972.13", "@aws-sdk/middleware-expect-continue": "^3.972.12", "@aws-sdk/middleware-flexible-checksums": "^3.974.19", "@aws-sdk/middleware-location-constraint": "^3.972.10", "@aws-sdk/middleware-sdk-s3": "^3.972.40", "@aws-sdk/middleware-ssec": "^3.972.10", "@aws-sdk/signature-v4-multi-region": "^3.996.27", "@aws-sdk/types": "^3.973.8", "@smithy/core": "^3.24.2", "@smithy/fetch-http-handler": "^5.4.2", "@smithy/node-http-handler": "^4.7.2", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-SrJn5FteqqtcDBgQIvqLKk3Qn/2vSsi5XR03I53EDDR4CbCdLysVSNgUnjVncEECMua9Pz+nxO0/lEx3TP+6mA=="], + + "@aws-sdk/core": ["@aws-sdk/core@3.974.11", "", { "dependencies": { "@aws-sdk/types": "^3.973.8", "@aws-sdk/xml-builder": "^3.972.24", "@aws/lambda-invoke-store": "^0.2.2", "@smithy/core": "^3.24.2", "@smithy/signature-v4": "^5.4.2", "@smithy/types": "^4.14.1", "bowser": "^2.11.0", "tslib": "^2.6.2" } }, "sha512-QpnINq5FZH6EOaDEkmHdT7eUunbvD27pDNQypaWjFyYz7Zl1q3UCMQErBZxpmfGfI7MvI2TlK8KTkgNpv8b1ug=="], + + "@aws-sdk/crc64-nvme": ["@aws-sdk/crc64-nvme@3.972.8", "", { "dependencies": { "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-fVfUCL/Xh2zINYMPZvj+iBn6XWouQf0DAnjaWCI9MkmqXzL2Iy5FoQB8O7syFe6gN6AH1ecDDU58T51Ou0kFkA=="], + + "@aws-sdk/credential-provider-env": ["@aws-sdk/credential-provider-env@3.972.37", "", { "dependencies": { "@aws-sdk/core": "^3.974.11", "@aws-sdk/types": "^3.973.8", "@smithy/core": "^3.24.2", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-/jpPvEh6f7ntmIzf7dNxoNX6Q8vt8UpesCjbW6mFfk4V1NW6bIy9qxcQ6WbA8As5yQhsZOe+xeNd4xHX8kdY2Q=="], + + "@aws-sdk/credential-provider-http": ["@aws-sdk/credential-provider-http@3.972.39", "", { "dependencies": { "@aws-sdk/core": "^3.974.11", "@aws-sdk/types": "^3.973.8", "@smithy/core": "^3.24.2", "@smithy/fetch-http-handler": "^5.4.2", "@smithy/node-http-handler": "^4.7.2", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-pIgTpisWyWg7X1bUbzSjuUYosYTD0Ghz2M0hkSTmb3a6i3qV3uU+NYJPI/E2XSC0HcsZh5rsLPzeXrkb2DS0Cg=="], + + "@aws-sdk/credential-provider-ini": ["@aws-sdk/credential-provider-ini@3.972.41", "", { "dependencies": { "@aws-sdk/core": "^3.974.11", "@aws-sdk/credential-provider-env": "^3.972.37", "@aws-sdk/credential-provider-http": "^3.972.39", "@aws-sdk/credential-provider-login": "^3.972.41", "@aws-sdk/credential-provider-process": "^3.972.37", "@aws-sdk/credential-provider-sso": "^3.972.41", "@aws-sdk/credential-provider-web-identity": "^3.972.41", "@aws-sdk/nested-clients": "^3.997.9", "@aws-sdk/types": "^3.973.8", "@smithy/core": "^3.24.2", "@smithy/credential-provider-imds": "^4.3.2", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-u2tyjaxJJzW8UtW4SM1ZcPMDwO6y+kV+llvou+Adts0FAKyzes5jG4izQN+KX3yE8ZROpS5y1LJ//xL2iSf76w=="], + + "@aws-sdk/credential-provider-login": ["@aws-sdk/credential-provider-login@3.972.41", "", { "dependencies": { "@aws-sdk/core": "^3.974.11", "@aws-sdk/nested-clients": "^3.997.9", "@aws-sdk/types": "^3.973.8", "@smithy/core": "^3.24.2", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-0LBitxXiAiaE5nlFPfpNIww/8FRY/I7WIndWsc9GmNFOM7cE1wNpVNQEGEk9Outg5l8xl+3vybxFyUy4l9q/LQ=="], + + "@aws-sdk/credential-provider-node": ["@aws-sdk/credential-provider-node@3.972.42", "", { "dependencies": { "@aws-sdk/credential-provider-env": "^3.972.37", "@aws-sdk/credential-provider-http": "^3.972.39", "@aws-sdk/credential-provider-ini": "^3.972.41", "@aws-sdk/credential-provider-process": "^3.972.37", "@aws-sdk/credential-provider-sso": "^3.972.41", "@aws-sdk/credential-provider-web-identity": "^3.972.41", "@aws-sdk/types": "^3.973.8", "@smithy/core": "^3.24.2", "@smithy/credential-provider-imds": "^4.3.2", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-D4oon2zbqqsWOJUM99Gm3/ZyJ0IJvTXVN3PyloGb3kQEyI36fjCZheZj422lAgTWWd6TSHgiImLt3RIaLdv3dQ=="], + + "@aws-sdk/credential-provider-process": ["@aws-sdk/credential-provider-process@3.972.37", "", { "dependencies": { "@aws-sdk/core": "^3.974.11", "@aws-sdk/types": "^3.973.8", "@smithy/core": "^3.24.2", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-7nVaHBUaWIddASYfVaA9O4D5ZVjewU3sCol9WqZPGfW0nR+0WqE0xHZnD/U2L33PlOB8KNXGKZ6wOES/QijKzg=="], + + "@aws-sdk/credential-provider-sso": ["@aws-sdk/credential-provider-sso@3.972.41", "", { "dependencies": { "@aws-sdk/core": "^3.974.11", "@aws-sdk/nested-clients": "^3.997.9", "@aws-sdk/token-providers": "3.1048.0", "@aws-sdk/types": "^3.973.8", "@smithy/core": "^3.24.2", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-IOWAWEHe5LkjSKkkUUX9ciV6Y1scHTsnfEkdt5yyC4Slrc7AGbkLPrpntjqh18ksJAMOaVhoBsO8p2WyTcY2wQ=="], + + "@aws-sdk/credential-provider-web-identity": ["@aws-sdk/credential-provider-web-identity@3.972.41", "", { "dependencies": { "@aws-sdk/core": "^3.974.11", "@aws-sdk/nested-clients": "^3.997.9", "@aws-sdk/types": "^3.973.8", "@smithy/core": "^3.24.2", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-mbACk9Yypa8nm4iGZLs0PofOXEcTDOUw6wDnsPXNDNSd2WNXs1tSo+6nc/fh0jLYdfVZThhBL98PHW4aXFsG5A=="], + + "@aws-sdk/middleware-bucket-endpoint": ["@aws-sdk/middleware-bucket-endpoint@3.972.13", "", { "dependencies": { "@aws-sdk/core": "^3.974.11", "@aws-sdk/types": "^3.973.8", "@smithy/core": "^3.24.2", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-JDaukix+kt5KwF7FzNSkfZHpqiPJajVkKJLJexF6z5B44+CN70BXGiQaCEAiCtKtRZNvC16eF3SY9L0bDJPlbA=="], + + "@aws-sdk/middleware-expect-continue": ["@aws-sdk/middleware-expect-continue@3.972.12", "", { "dependencies": { "@aws-sdk/types": "^3.973.8", "@smithy/core": "^3.24.2", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-dA5pKTom/Ls9mgeyeaRBNQrRIVOLVjv4AmKOB0/e4yaiXEUy0gSz2d3liP8JHtYoCAEWySU1jWnyzwLOREN+4g=="], + + "@aws-sdk/middleware-flexible-checksums": ["@aws-sdk/middleware-flexible-checksums@3.974.19", "", { "dependencies": { "@aws-crypto/crc32": "5.2.0", "@aws-crypto/crc32c": "5.2.0", "@aws-crypto/util": "5.2.0", "@aws-sdk/core": "^3.974.11", "@aws-sdk/crc64-nvme": "^3.972.8", "@aws-sdk/types": "^3.973.8", "@smithy/core": "^3.24.2", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-GLciZVIvWM3C+ffuqnUqlAZwRjQdLt+KXiqr9+aRwZyKVyF2J5lrJAzzSqwweNl9hUWBN00BhilWXdMI5DjNcw=="], + + "@aws-sdk/middleware-location-constraint": ["@aws-sdk/middleware-location-constraint@3.972.10", "", { "dependencies": { "@aws-sdk/types": "^3.973.8", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-rI3NZvJcEvjoD0+0PI0iUAwlPw2IlSlhyvgBK/3WkKJQE/YiKFedd9dMN2lVacdNxPNhxL/jzQaKQdrGtQagjQ=="], + + "@aws-sdk/middleware-sdk-s3": ["@aws-sdk/middleware-sdk-s3@3.972.40", "", { "dependencies": { "@aws-sdk/core": "^3.974.11", "@aws-sdk/signature-v4-multi-region": "^3.996.27", "@aws-sdk/types": "^3.973.8", "@smithy/core": "^3.24.2", "@smithy/signature-v4": "^5.4.2", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-vyFY4EsAGySqqd87Z7n4qcCYXJO3QArB8VIJzuupY5XuLHIp579HTZldIUGGABvAOzLptfPb9+lJBJcB+3/cvA=="], + + "@aws-sdk/middleware-ssec": ["@aws-sdk/middleware-ssec@3.972.10", "", { "dependencies": { "@aws-sdk/types": "^3.973.8", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-Gli9A0u8EVVb+5bFDGS/QbSVg28w/wpEidg1ggVcSj65BDTdGR6punsOcVjqdiu1i42WHWo51MCvARPIIz9juw=="], + + "@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.997.9", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.974.11", "@aws-sdk/signature-v4-multi-region": "^3.996.27", "@aws-sdk/types": "^3.973.8", "@smithy/core": "^3.24.2", "@smithy/fetch-http-handler": "^5.4.2", "@smithy/node-http-handler": "^4.7.2", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-jPR3rnmRI4hWYyzfmTGBr7NblMp8QYYeflHXba1H6+7CGrWVqWKQzaXFQ4qbExqPRsXN3T3L3JxFhr6aouXUGQ=="], + + "@aws-sdk/signature-v4-multi-region": ["@aws-sdk/signature-v4-multi-region@3.996.27", "", { "dependencies": { "@aws-sdk/types": "^3.973.8", "@smithy/core": "^3.24.2", "@smithy/signature-v4": "^5.4.2", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-0Phbz4t6HI3D3skxvG2uI+VWU034/nSIw1T8d+FPzzQG9EQTrw94o9mOKO2Gv3n3Oc8P7JD7RAUxkoneLWv5Eg=="], + + "@aws-sdk/token-providers": ["@aws-sdk/token-providers@3.1048.0", "", { "dependencies": { "@aws-sdk/core": "^3.974.11", "@aws-sdk/nested-clients": "^3.997.9", "@aws-sdk/types": "^3.973.8", "@smithy/core": "^3.24.2", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-k0y/GcuesuSfWyUM0WamrGyeZmltRYaPbHO82UDA6mZ/doB+FOHKutikPAtSXMn/hDz970cF+iRuuiYO9VEbAA=="], + + "@aws-sdk/types": ["@aws-sdk/types@3.973.8", "", { "dependencies": { "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-gjlAdtHMbtR9X5iIhVUvbVcy55KnznpC6bkDUWW9z915bi0ckdUr5cjf16Kp6xq0bP5HBD2xzgbL9F9Quv5vUw=="], + + "@aws-sdk/util-locate-window": ["@aws-sdk/util-locate-window@3.965.5", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-WhlJNNINQB+9qtLtZJcpQdgZw3SCDCpXdUJP7cToGwHbCWCnRckGlc6Bx/OhWwIYFNAn+FIydY8SZ0QmVu3xTQ=="], + + "@aws-sdk/xml-builder": ["@aws-sdk/xml-builder@3.972.24", "", { "dependencies": { "@nodable/entities": "2.1.0", "@smithy/types": "^4.14.1", "fast-xml-parser": "5.7.3", "tslib": "^2.6.2" } }, "sha512-V8z5YcDPfsvzrBlj0xR1vhRtocblhYbqdreCJB/voGd4Sr5zjNAeWxexbnqVtskTJe0vFb5KMqbSL++ePl+zRw=="], + + "@aws/lambda-invoke-store": ["@aws/lambda-invoke-store@0.2.4", "", {}, "sha512-iY8yvjE0y651BixKNPgmv1WrQc+GZ142sb0z4gYnChDDY2YqI4P/jsSopBWrKfAt7LOJAkOXt7rC/hms+WclQQ=="], + "@babel/code-frame": ["@babel/code-frame@7.29.0", "", { "dependencies": { "@babel/helper-validator-identifier": "^7.28.5", "js-tokens": "^4.0.0", "picocolors": "^1.1.1" } }, "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw=="], "@babel/compat-data": ["@babel/compat-data@7.29.3", "", {}, "sha512-LIVqM46zQWZhj17qA8wb4nW/ixr2y1Nw+r1etiAWgRM6U1IqP+LNhL1yg440jYZR72jCWcWbLWzIosH+uP1fqg=="], @@ -350,6 +433,8 @@ "@csstools/css-tokenizer": ["@csstools/css-tokenizer@4.0.0", "", {}, "sha512-QxULHAm7cNu72w97JUNCBFODFaXpbDg+dP8b/oWFAZ2MTRppA3U00Y2L1HqaS4J6yBqxwa/Y3nMBaxVKbB/NsA=="], + "@derhuerst/http-basic": ["@derhuerst/http-basic@8.2.4", "", { "dependencies": { "caseless": "^0.12.0", "concat-stream": "^2.0.0", "http-response-object": "^3.0.1", "parse-cache-control": "^1.0.1" } }, "sha512-F9rL9k9Xjf5blCz8HsJRO4diy111cayL2vkY2XE4r4t3n0yPXVYy3KD3nJ1qbrSn9743UWSXH4IwuCa/HWlGFw=="], + "@emnapi/core": ["@emnapi/core@1.10.0", "", { "dependencies": { "@emnapi/wasi-threads": "1.2.1", "tslib": "^2.4.0" } }, "sha512-yq6OkJ4p82CAfPl0u9mQebQHKPJkY7WrIuk205cTYnYe+k2Z8YBh11FrbRG/H6ihirqcacOgl2BIO8oyMQLeXw=="], "@emnapi/runtime": ["@emnapi/runtime@1.10.0", "", { "dependencies": { "tslib": "^2.4.0" } }, "sha512-ewvYlk86xUoGI0zQRNq/mC+16R1QeDlKQy21Ki3oSYXNgLb45GV1P6A0M+/s6nyCuNDqe5VpaY84BzXGwVbwFA=="], @@ -450,6 +535,8 @@ "@hono/node-server": ["@hono/node-server@1.19.14", "", { "peerDependencies": { "hono": "^4" } }, "sha512-GwtvgtXxnWsucXvbQXkRgqksiH2Qed37H9xHZocE5sA3N8O8O8/8FA3uclQXxXVzc9XBZuEOMK7+r02FmSpHtw=="], + "@hyperframes/aws-lambda": ["@hyperframes/aws-lambda@workspace:packages/aws-lambda"], + "@hyperframes/cli": ["@hyperframes/cli@workspace:packages/cli"], "@hyperframes/core": ["@hyperframes/core@workspace:packages/core"], @@ -516,6 +603,8 @@ "@isaacs/cliui": ["@isaacs/cliui@8.0.2", "", { "dependencies": { "string-width": "^5.1.2", "string-width-cjs": "npm:string-width@^4.2.0", "strip-ansi": "^7.0.1", "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", "wrap-ansi": "^8.1.0", "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" } }, "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA=="], + "@isaacs/fs-minipass": ["@isaacs/fs-minipass@4.0.1", "", { "dependencies": { "minipass": "^7.0.4" } }, "sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w=="], + "@istanbuljs/schema": ["@istanbuljs/schema@0.1.6", "", {}, "sha512-+Sg6GCR/wy1oSmQDFq4LQDAhm3ETKnorxN+y5nbLULOR3P0c14f2Wurzj3/xqPXtasLFfHd5iRFQ7AJt4KH2cw=="], "@jridgewell/gen-mapping": ["@jridgewell/gen-mapping@0.3.13", "", { "dependencies": { "@jridgewell/sourcemap-codec": "^1.5.0", "@jridgewell/trace-mapping": "^0.3.24" } }, "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA=="], @@ -544,6 +633,8 @@ "@napi-rs/wasm-runtime": ["@napi-rs/wasm-runtime@1.1.4", "", { "dependencies": { "@tybys/wasm-util": "^0.10.1" }, "peerDependencies": { "@emnapi/core": "^1.7.1", "@emnapi/runtime": "^1.7.1" } }, "sha512-3NQNNgA1YSlJb/kMH1ildASP9HW7/7kYnRI2szWJaofaS1hWmbGI4H+d3+22aGzXXN9IJ+n+GiFVcGipJP18ow=="], + "@nodable/entities": ["@nodable/entities@2.1.0", "", {}, "sha512-nyT7T3nbMyBI/lvr6L5TyWbFJAI9FTgVRakNoBqCD+PmID8DzFrrNdLLtHMwMszOtqZa8PAOV24ZqDnQrhQINA=="], + "@nodelib/fs.scandir": ["@nodelib/fs.scandir@2.1.5", "", { "dependencies": { "@nodelib/fs.stat": "2.0.5", "run-parallel": "^1.1.9" } }, "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g=="], "@nodelib/fs.stat": ["@nodelib/fs.stat@2.0.5", "", {}, "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A=="], @@ -790,12 +881,34 @@ "@simple-libs/stream-utils": ["@simple-libs/stream-utils@1.2.0", "", {}, "sha512-KxXvfapcixpz6rVEB6HPjOUZT22yN6v0vI0urQSk1L8MlEWPDFCZkhw2xmkyoTGYeFw7tWTZd7e3lVzRZRN/EA=="], + "@smithy/core": ["@smithy/core@3.24.3", "", { "dependencies": { "@aws-crypto/crc32": "5.2.0", "@smithy/types": "^4.14.2", "tslib": "^2.6.2" } }, "sha512-Ep/7tPamGY8mgESE3LyLKtxJyy6U52WWAqr/3wial47Sj4u3PiIF73AOGI27UyLy9duTkhZbgzodOfLV4TduZg=="], + + "@smithy/credential-provider-imds": ["@smithy/credential-provider-imds@4.3.3", "", { "dependencies": { "@smithy/core": "^3.24.3", "@smithy/types": "^4.14.2", "tslib": "^2.6.2" } }, "sha512-I2Bti0DKFo2IJyN28ijCsx51BAumEYR4/1yZ1FXyBygy9MqbnMqCev4JPth/MbpRfBSRAX35hITSnAdJRo1u5w=="], + + "@smithy/fetch-http-handler": ["@smithy/fetch-http-handler@5.4.3", "", { "dependencies": { "@smithy/core": "^3.24.3", "@smithy/types": "^4.14.2", "tslib": "^2.6.2" } }, "sha512-F+DRf8IJazRJgYog2A/yJK7eYVc0rqTlRzO+5ZxjJd4WkZoKz0IJRncf7G6t1pdVT3kryJcwuTFhN1c5m6N47A=="], + + "@smithy/is-array-buffer": ["@smithy/is-array-buffer@2.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-GGP3O9QFD24uGeAXYUjwSTXARoqpZykHadOmA8G5vfJPK0/DC67qa//0qvqrJzL1xc8WQWX7/yc7fwudjPHPhA=="], + + "@smithy/node-http-handler": ["@smithy/node-http-handler@4.7.3", "", { "dependencies": { "@smithy/core": "^3.24.3", "@smithy/types": "^4.14.2", "tslib": "^2.6.2" } }, "sha512-/jPhevcTFPMVl6KNjbaI47iOg1zxC7IsnX4PQDGVZKMFceOXtB8IEYaB7a9VvkP/3oC60WzTeKocvSI7vLT0vA=="], + + "@smithy/signature-v4": ["@smithy/signature-v4@5.4.3", "", { "dependencies": { "@smithy/core": "^3.24.3", "@smithy/types": "^4.14.2", "tslib": "^2.6.2" } }, "sha512-53+75QuPl6DL+ct6vVEB51FDO5oulXr20TPV46VvJZg76lIlXNWfxi8j+G2V/t0I2qxCBOa3vX/8bmjrpFVo9g=="], + + "@smithy/types": ["@smithy/types@4.14.2", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-P+otAxbV4CqBybp7EkcJCrig63yE2E7PuNVOmilVMRcx/O+QDzGULTrKsq4DV13gSfak9ObPrWaHl/9bL5YcWw=="], + + "@smithy/util-buffer-from": ["@smithy/util-buffer-from@2.2.0", "", { "dependencies": { "@smithy/is-array-buffer": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-IJdWBbTcMQ6DA0gdNhh/BwrLkDR+ADW5Kr1aZmd4k3DIF6ezMV4R2NIAmT08wQJ3yUK82thHWmC/TnK/wpMMIA=="], + + "@smithy/util-utf8": ["@smithy/util-utf8@2.3.0", "", { "dependencies": { "@smithy/util-buffer-from": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-R8Rdn8Hy72KKcebgLiv8jQcQkXoLMOGGv5uI1/k0l+snqkOzQ1R0ChUBCxWMlBsFMekWjq0wRudIweFs7sKT5A=="], + + "@sparticuz/chromium": ["@sparticuz/chromium@148.0.0", "", { "dependencies": { "tar-fs": "^3.1.2" } }, "sha512-na5beDSZkrlcEWEMt+eHu4Xe+MLUgCtHBjHaXGsNaQu5tJWwXE+McxAcMtyumEM/JzXrxGpkO5vAPD9TWhil3g=="], + "@tootallnate/quickjs-emscripten": ["@tootallnate/quickjs-emscripten@0.23.0", "", {}, "sha512-C5Mc6rdnsaJDjO3UpGW/CQTHtCKaYlScZTly4JIu97Jxo/odCiH0ITnDXSJPTOrEKk/ycSZ0AOgTmkDtkOsvIA=="], "@tybys/wasm-util": ["@tybys/wasm-util@0.10.2", "", { "dependencies": { "tslib": "^2.4.0" } }, "sha512-RoBvJ2X0wuKlWFIjrwffGw1IqZHKQqzIchKaadZZfnNpsAYp2mM0h36JtPCjNDAHGgYez/15uMBpfGwchhiMgg=="], "@types/adm-zip": ["@types/adm-zip@0.5.8", "", { "dependencies": { "@types/node": "*" } }, "sha512-RVVH7QvZYbN+ihqZ4kX/dMiowf6o+Jk1fNwiSdx0NahBJLU787zkULhGhJM8mf/obmLGmgdMM0bXsQTmyfbR7Q=="], + "@types/aws-lambda": ["@types/aws-lambda@8.10.161", "", {}, "sha512-rUYdp+MQwSFocxIOcSsYSF3YYYC/uUpMbCY/mbO21vGqfrEYvNSoPyKYDj6RhXXpPfS0KstW9RwG3qXh9sL7FQ=="], + "@types/babel__core": ["@types/babel__core@7.20.5", "", { "dependencies": { "@babel/parser": "^7.20.7", "@babel/types": "^7.20.7", "@types/babel__generator": "*", "@types/babel__template": "*", "@types/babel__traverse": "*" } }, "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA=="], "@types/babel__generator": ["@types/babel__generator@7.27.0", "", { "dependencies": { "@babel/types": "^7.0.0" } }, "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg=="], @@ -824,6 +937,8 @@ "@types/retry": ["@types/retry@0.12.0", "", {}, "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA=="], + "@types/tar": ["@types/tar@6.1.13", "", { "dependencies": { "@types/node": "*", "minipass": "^4.0.0" } }, "sha512-IznnlmU5f4WcGTh2ltRu/Ijpmk8wiWXfF0VA4s+HPjHZgvFggk1YaIkbo5krX/zUCzWF8N/l4+W/LNxnvAJ8nw=="], + "@types/tough-cookie": ["@types/tough-cookie@4.0.5", "", {}, "sha512-/Ad8+nIOV7Rl++6f1BdKxFSMgmoqEoYbHRpPcx3JEfv8VRsQe9Z4mCXeJBzxs7mbHY/XOZZuXlRNfhpVPbs6ZA=="], "@types/whatwg-mimetype": ["@types/whatwg-mimetype@3.0.2", "", {}, "sha512-c2AKvDT8ToxLIOUlN51gTiHXflsfIFisS4pO7pDPoKouJCESkhZnEy623gwP9laCy5lnLDAw1vAzu2vM2YLOrA=="], @@ -856,7 +971,7 @@ "adm-zip": ["adm-zip@0.5.17", "", {}, "sha512-+Ut8d9LLqwEvHHJl1+PIHqoyDxFgVN847JTVM3Izi3xHDWPE4UtzzXysMZQs64DMcrJfBeS/uoEP4AD3HQHnQQ=="], - "agent-base": ["agent-base@7.1.4", "", {}, "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ=="], + "agent-base": ["agent-base@6.0.2", "", { "dependencies": { "debug": "4" } }, "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ=="], "ajv": ["ajv@8.20.0", "", { "dependencies": { "fast-deep-equal": "^3.1.3", "fast-uri": "^3.0.1", "json-schema-traverse": "^1.0.0", "require-from-string": "^2.0.2" } }, "sha512-Thbli+OlOj+iMPYFBVBfJ3OmCAnaSyNn4M1vz9T6Gka5Jt9ba/HIR56joy65tY6kx/FCF5VXNB819Y7/GUrBGA=="], @@ -914,6 +1029,8 @@ "boolbase": ["boolbase@1.0.0", "", {}, "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww=="], + "bowser": ["bowser@2.14.1", "", {}, "sha512-tzPjzCxygAKWFOJP011oxFHs57HzIhOEracIgAePE4pqB3LikALKnSzUyU4MGs9/iCEUuHlAJTjTc5M+u7YEGg=="], + "brace-expansion": ["brace-expansion@5.0.6", "", { "dependencies": { "balanced-match": "^4.0.2" } }, "sha512-kLpxurY4Z4r9sgMsyG0Z9uzsBlgiU/EFKhj/h91/8yHu0edo7XuixOIH3VcJ8kkxs6/jPzoI6U9Vj3WqbMQ94g=="], "braces": ["braces@3.0.3", "", { "dependencies": { "fill-range": "^7.1.1" } }, "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA=="], @@ -924,6 +1041,8 @@ "buffer-equal-constant-time": ["buffer-equal-constant-time@1.0.1", "", {}, "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA=="], + "buffer-from": ["buffer-from@1.1.2", "", {}, "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ=="], + "bun-types": ["bun-types@1.3.13", "", { "dependencies": { "@types/node": "*" } }, "sha512-QXKeHLlOLqQX9LgYaHJfzdBaV21T63HhFJnvuRCcjZiaUDpbs5ED1MgxbMra71CsryN/1dAoXuJJJwIv/2drVA=="], "bundle-name": ["bundle-name@4.1.0", "", { "dependencies": { "run-applescript": "^7.0.0" } }, "sha512-tjwM5exMg6BGRI+kNmTntNsvdZS1X8BFYS6tnJ2hdH0kVxM6/eVZ2xy+FqStSWvYmtfFMDLIxurorHwDKfDz5Q=="], @@ -938,6 +1057,8 @@ "caniuse-lite": ["caniuse-lite@1.0.30001792", "", {}, "sha512-hVLMUZFgR4JJ6ACt1uEESvQN1/dBVqPAKY0hgrV70eN3391K6juAfTjKZLKvOMsx8PxA7gsY1/tLMMTcfFLLpw=="], + "caseless": ["caseless@0.12.0", "", {}, "sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw=="], + "chai": ["chai@5.3.3", "", { "dependencies": { "assertion-error": "^2.0.1", "check-error": "^2.1.1", "deep-eql": "^5.0.1", "loupe": "^3.1.0", "pathval": "^2.0.0" } }, "sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw=="], "chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], @@ -946,6 +1067,8 @@ "chokidar": ["chokidar@4.0.3", "", { "dependencies": { "readdirp": "^4.0.1" } }, "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA=="], + "chownr": ["chownr@3.0.0", "", {}, "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g=="], + "chromium-bidi": ["chromium-bidi@14.0.0", "", { "dependencies": { "mitt": "^3.0.1", "zod": "^3.24.1" }, "peerDependencies": { "devtools-protocol": "*" } }, "sha512-9gYlLtS6tStdRWzrtXaTMnqcM4dudNegMXJxkR0I/CXObHalYeYcAMPrL19eroNZHtJ8DQmu1E+ZNOYu/IXMXw=="], "citty": ["citty@0.2.2", "", {}, "sha512-+6vJA3L98yv+IdfKGZHBNiGW5KHn22e/JwID0Strsz8h4S/csAu/OuICwxrg44k5MRiZHWIo8XXuJgQTriRP4w=="], @@ -964,6 +1087,8 @@ "compare-versions": ["compare-versions@6.1.1", "", {}, "sha512-4hm4VPpIecmlg59CHXnRDnqGplJFrbLG4aFEl5vl6cK1u76ws3LLvX7ikFnTDl5vo39sjWD6AaDPYodJp/NNHg=="], + "concat-stream": ["concat-stream@2.0.0", "", { "dependencies": { "buffer-from": "^1.0.0", "inherits": "^2.0.3", "readable-stream": "^3.0.2", "typedarray": "^0.0.6" } }, "sha512-MWufYdFw53ccGjCA+Ol7XJYpAlW6/prSMzuPOTRnJGcGzuhLn4Scrz7qf6o8bROZ514ltazcIFJZevcfbo0x7A=="], + "concurrently": ["concurrently@8.2.2", "", { "dependencies": { "chalk": "^4.1.2", "date-fns": "^2.30.0", "lodash": "^4.17.21", "rxjs": "^7.8.1", "shell-quote": "^1.8.1", "spawn-command": "0.0.2", "supports-color": "^8.1.1", "tree-kill": "^1.2.2", "yargs": "^17.7.2" }, "bin": { "conc": "dist/bin/concurrently.js", "concurrently": "dist/bin/concurrently.js" } }, "sha512-1dP4gpXFhei8IOtlXRE/T/4H88ElHgTiUzh71YUmtjTEHMSRS2Z/fgOxHSxxusGHogsRfxNq1vyAwxSC+EVyDg=="], "confbox": ["confbox@0.1.8", "", {}, "sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w=="], @@ -1104,6 +1229,10 @@ "fast-wrap-ansi": ["fast-wrap-ansi@0.2.0", "", { "dependencies": { "fast-string-width": "^3.0.2" } }, "sha512-rLV8JHxTyhVmFYhBJuMujcrHqOT2cnO5Zxj37qROj23CP39GXubJRBUFF0z8KFK77Uc0SukZUf7JZhsVEQ6n8w=="], + "fast-xml-builder": ["fast-xml-builder@1.2.0", "", { "dependencies": { "path-expression-matcher": "^1.5.0", "xml-naming": "^0.1.0" } }, "sha512-00aAWieqff+ZJhsXA4g1g7M8k+7AYoMUUHF+/zFb5U6Uv/P0Vl4QZo84/IcufzYalLuEj9928bXN9PbbFzMF0Q=="], + + "fast-xml-parser": ["fast-xml-parser@5.7.3", "", { "dependencies": { "@nodable/entities": "^2.1.0", "fast-xml-builder": "^1.1.7", "path-expression-matcher": "^1.5.0", "strnum": "^2.2.3" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-C0AaNuC+mscy6vrAQKAc/rMq+zAPHodfHGZu4sGVehvAQt/JLG1O5zEcYcXSY5zSqr4YVgxsB+pHXTq0i7eDlg=="], + "fastq": ["fastq@1.20.1", "", { "dependencies": { "reusify": "^1.0.4" } }, "sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw=="], "fd-package-json": ["fd-package-json@2.0.0", "", { "dependencies": { "walk-up-path": "^4.0.0" } }, "sha512-jKmm9YtsNXN789RS/0mSzOC1NUq9mkVd65vbSSVsKdjGvYXBuE4oWe2QOEoFeRmJg+lPuZxpmrfFclNhoRMneQ=="], @@ -1114,6 +1243,10 @@ "fetch-blob": ["fetch-blob@3.2.0", "", { "dependencies": { "node-domexception": "^1.0.0", "web-streams-polyfill": "^3.0.3" } }, "sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ=="], + "ffmpeg-static": ["ffmpeg-static@5.3.0", "", { "dependencies": { "@derhuerst/http-basic": "^8.2.0", "env-paths": "^2.2.0", "https-proxy-agent": "^5.0.0", "progress": "^2.0.3" } }, "sha512-H+K6sW6TiIX6VGend0KQwthe+kaceeH/luE8dIZyOP35ik7ahYojDuqlTV1bOrtEwl01sy2HFNGQfi5IDJvotg=="], + + "ffprobe-static": ["ffprobe-static@3.1.0", "", {}, "sha512-Dvpa9uhVMOYivhHKWLGDoa512J751qN1WZAIO+Xw4L/mrUSPxS4DApzSUDbCFE/LUq2+xYnznEahTd63AqBSpA=="], + "fill-range": ["fill-range@7.1.1", "", { "dependencies": { "to-regex-range": "^5.0.1" } }, "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg=="], "fix-dts-default-cjs-exports": ["fix-dts-default-cjs-exports@1.0.1", "", { "dependencies": { "magic-string": "^0.30.17", "mlly": "^1.7.4", "rollup": "^4.34.8" } }, "sha512-pVIECanWFC61Hzl2+oOCtoJ3F17kglZC/6N94eRWycFgBH35hHx0Li604ZIzhseh97mf2p0cv7vVrOZGoqhlEg=="], @@ -1188,12 +1321,16 @@ "http-proxy-agent": ["http-proxy-agent@7.0.2", "", { "dependencies": { "agent-base": "^7.1.0", "debug": "^4.3.4" } }, "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig=="], - "https-proxy-agent": ["https-proxy-agent@7.0.6", "", { "dependencies": { "agent-base": "^7.1.2", "debug": "4" } }, "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw=="], + "http-response-object": ["http-response-object@3.0.2", "", { "dependencies": { "@types/node": "^10.0.3" } }, "sha512-bqX0XTF6fnXSQcEJ2Iuyr75yVakyjIDCqroJQ/aHfSdlM743Cwqoi2nDYMzLGWUcuTWGWy8AAvOKXTfiv6q9RA=="], + + "https-proxy-agent": ["https-proxy-agent@5.0.1", "", { "dependencies": { "agent-base": "6", "debug": "4" } }, "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA=="], "import-fresh": ["import-fresh@3.3.1", "", { "dependencies": { "parent-module": "^1.0.0", "resolve-from": "^4.0.0" } }, "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ=="], "import-meta-resolve": ["import-meta-resolve@4.2.0", "", {}, "sha512-Iqv2fzaTQN28s/FwZAoFq0ZSs/7hMAHJVX+w8PZl3cY19Pxk6jFFalxQoIfW2826i/fDLXv8IiEZRIT0lDuWcg=="], + "inherits": ["inherits@2.0.4", "", {}, "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="], + "ini": ["ini@6.0.0", "", {}, "sha512-IBTdIkzZNOpqm7q3dRqJvMaldXjDHWkEDfrwGEQTs5eaQMWV+djAhR+wahyNNMAa+qpbDUhBMVt4ZKNwpPm7xQ=="], "ip-address": ["ip-address@10.2.0", "", {}, "sha512-/+S6j4E9AHvW9SWMSEY9Xfy66O5PWvVEJ08O0y5JGyEKQpojb0K0GKpz/v5HJ/G0vi3D2sjGK78119oXZeE0qA=="], @@ -1324,7 +1461,9 @@ "minimist": ["minimist@1.2.8", "", {}, "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA=="], - "minipass": ["minipass@7.1.3", "", {}, "sha512-tEBHqDnIoM/1rXME1zgka9g6Q2lcoCkxHLuc7ODJ5BxbP5d4c2Z5cGgtXAku59200Cx7diuHTOYfSBD8n6mm8A=="], + "minipass": ["minipass@4.2.8", "", {}, "sha512-fNzuVyifolSLFL4NzpF+wEF4qrgqaaKX0haXPQEdQ7NKAN+WecoKMHV09YcuL/DHxrUsYQOK3MiuDf7Ip2OXfQ=="], + + "minizlib": ["minizlib@3.1.0", "", { "dependencies": { "minipass": "^7.1.2" } }, "sha512-KZxYo1BUkWD2TVFLr0MQoM8vUUigWD3LlD83a/75BqC+4qE0Hb1Vo5v1FgcfaNXvfXzr+5EhQ6ing/CaBijTlw=="], "mitt": ["mitt@3.0.1", "", {}, "sha512-vKivATfr97l2/QBCYAkXYDbrIWPM2IIKEl7YPhjCvKlG3kE2gm+uBo6nEXK3M5/Ffh/FLpKExzOQ3JJoJGFKBw=="], @@ -1386,10 +1525,14 @@ "parent-module": ["parent-module@1.0.1", "", { "dependencies": { "callsites": "^3.0.0" } }, "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g=="], + "parse-cache-control": ["parse-cache-control@1.0.1", "", {}, "sha512-60zvsJReQPX5/QP0Kzfd/VrpjScIQ7SHBW6bFCYfEP+fp0Eppr1SHhIO5nd1PjZtvclzSzES9D/p5nFJurwfWg=="], + "parse-json": ["parse-json@5.2.0", "", { "dependencies": { "@babel/code-frame": "^7.0.0", "error-ex": "^1.3.1", "json-parse-even-better-errors": "^2.3.0", "lines-and-columns": "^1.1.6" } }, "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg=="], "parse5": ["parse5@8.0.1", "", { "dependencies": { "entities": "^8.0.0" } }, "sha512-z1e/HMG90obSGeidlli3hj7cbocou0/wa5HacvI3ASx34PecNjNQeaHNo5WIZpWofN9kgkqV1q5YvXe3F0FoPw=="], + "path-expression-matcher": ["path-expression-matcher@1.5.0", "", {}, "sha512-cbrerZV+6rvdQrrD+iGMcZFEiiSrbv9Tfdkvnusy6y0x0GKBXREFg/Y65GhIfm0tnLntThhzCnfKwp1WRjeCyQ=="], + "path-key": ["path-key@3.1.1", "", {}, "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q=="], "path-parse": ["path-parse@1.0.7", "", {}, "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw=="], @@ -1454,6 +1597,8 @@ "read-cache": ["read-cache@1.0.0", "", { "dependencies": { "pify": "^2.3.0" } }, "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA=="], + "readable-stream": ["readable-stream@3.6.2", "", { "dependencies": { "inherits": "^2.0.3", "string_decoder": "^1.1.1", "util-deprecate": "^1.0.1" } }, "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA=="], + "readdirp": ["readdirp@4.1.2", "", {}, "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg=="], "require-directory": ["require-directory@2.1.1", "", {}, "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q=="], @@ -1526,6 +1671,8 @@ "string-width-cjs": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="], + "string_decoder": ["string_decoder@1.3.0", "", { "dependencies": { "safe-buffer": "~5.2.0" } }, "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA=="], + "strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="], "strip-ansi-cjs": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="], @@ -1534,6 +1681,8 @@ "strip-literal": ["strip-literal@3.1.0", "", { "dependencies": { "js-tokens": "^9.0.1" } }, "sha512-8r3mkIM/2+PpjHoOtiAW8Rg3jJLHaV7xPwG+YRGrv6FP0wwk/toTpATxWYOW0BKdWwl82VT2tFYi5DlROa0Mxg=="], + "strnum": ["strnum@2.3.0", "", {}, "sha512-ums3KNd42PGyx5xaoVTO1mjU1bH3NpY4vsrVlnv9PNGqQj8wd7rJ6nEypLrJ7z5vxK5RP0yMLo6J/Gsm62DI5Q=="], + "style-mod": ["style-mod@4.1.3", "", {}, "sha512-i/n8VsZydrugj3Iuzll8+x/00GH2vnYsk1eomD8QiRrSAeW6ItbCQDtfXCeJHd0iwiNagqjQkvpvREEPtW3IoQ=="], "sucrase": ["sucrase@3.35.1", "", { "dependencies": { "@jridgewell/gen-mapping": "^0.3.2", "commander": "^4.0.0", "lines-and-columns": "^1.1.6", "mz": "^2.7.0", "pirates": "^4.0.1", "tinyglobby": "^0.2.11", "ts-interface-checker": "^0.1.9" }, "bin": { "sucrase": "bin/sucrase", "sucrase-node": "bin/sucrase-node" } }, "sha512-DhuTmvZWux4H1UOnWMB3sk0sbaCVOoQZjv8u1rDoTV0HTdGem9hkAZtl4JZy8P2z4Bg0nT+YMeOFyVr4zcG5Tw=="], @@ -1546,6 +1695,8 @@ "tailwindcss": ["tailwindcss@3.4.19", "", { "dependencies": { "@alloc/quick-lru": "^5.2.0", "arg": "^5.0.2", "chokidar": "^3.6.0", "didyoumean": "^1.2.2", "dlv": "^1.1.3", "fast-glob": "^3.3.2", "glob-parent": "^6.0.2", "is-glob": "^4.0.3", "jiti": "^1.21.7", "lilconfig": "^3.1.3", "micromatch": "^4.0.8", "normalize-path": "^3.0.0", "object-hash": "^3.0.0", "picocolors": "^1.1.1", "postcss": "^8.4.47", "postcss-import": "^15.1.0", "postcss-js": "^4.0.1", "postcss-load-config": "^4.0.2 || ^5.0 || ^6.0", "postcss-nested": "^6.2.0", "postcss-selector-parser": "^6.1.2", "resolve": "^1.22.8", "sucrase": "^3.35.0" }, "bin": { "tailwind": "lib/cli.js", "tailwindcss": "lib/cli.js" } }, "sha512-3ofp+LL8E+pK/JuPLPggVAIaEuhvIz4qNcf3nA1Xn2o/7fb7s/TYpHhwGDv1ZU3PkBluUVaF8PyCHcm48cKLWQ=="], + "tar": ["tar@7.5.15", "", { "dependencies": { "@isaacs/fs-minipass": "^4.0.0", "chownr": "^3.0.0", "minipass": "^7.1.2", "minizlib": "^3.1.0", "yallist": "^5.0.0" } }, "sha512-dzGK0boVlC4W5QFuQN1EFSl3bIDYsk7Tj40U6eIBnK2k/8ml7TZ5agbI5j5+qnoVcAA+rNtBml8SEiLxZpNqRQ=="], + "tar-fs": ["tar-fs@3.1.2", "", { "dependencies": { "pump": "^3.0.0", "tar-stream": "^3.1.5" }, "optionalDependencies": { "bare-fs": "^4.0.1", "bare-path": "^3.0.0" } }, "sha512-QGxxTxxyleAdyM3kpFs14ymbYmNFrfY+pHj7Z8FgtbZ7w2//VAgLMac7sT6nRpIHjppXO2AwwEOg0bPFVRcmXw=="], "tar-stream": ["tar-stream@3.2.0", "", { "dependencies": { "b4a": "^1.6.4", "bare-fs": "^4.5.5", "fast-fifo": "^1.2.0", "streamx": "^2.15.0" } }, "sha512-ojzvCvVaNp6aOTFmG7jaRD0meowIAuPc3cMMhSgKiVWws1GyHbGd/xvnyuRKcKlMpt3qvxx6r0hreCNITP9hIg=="], @@ -1598,6 +1749,8 @@ "typed-query-selector": ["typed-query-selector@2.12.2", "", {}, "sha512-EOPFbyIub4ngnEdqi2yOcNeDLaX/0jcE1JoAXQDDMIthap7FoN795lc/SHfIq2d416VufXpM8z/lD+WRm2gfOQ=="], + "typedarray": ["typedarray@0.0.6", "", {}, "sha512-/aCDEGatGvZ2BIk+HmLf4ifCJFwvKFNb9/JeZPMulfgFracn9QFcAf5GO8B/mweUjSoblS5In0cWhqpfs/5PQA=="], + "typescript": ["typescript@5.9.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw=="], "ufo": ["ufo@1.6.4", "", {}, "sha512-JFNbkD1Svwe0KvGi8GOeLcP4kAWQ609twvCdcHxq1oSL8svv39ZuSvajcD8B+5D0eL4+s1Is2D/O6KN3qcTeRA=="], @@ -1654,11 +1807,13 @@ "xml-name-validator": ["xml-name-validator@5.0.0", "", {}, "sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg=="], + "xml-naming": ["xml-naming@0.1.0", "", {}, "sha512-k8KO9hrMyNk6tUWqUfkTEZbezRRpONVOzUTnc97VnCvyj6Tf9lyUR9EDAIeiVLv56jsMcoXEwjW8Kv5yPY52lw=="], + "xmlchars": ["xmlchars@2.2.0", "", {}, "sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw=="], "y18n": ["y18n@5.0.8", "", {}, "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA=="], - "yallist": ["yallist@3.1.1", "", {}, "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g=="], + "yallist": ["yallist@5.0.0", "", {}, "sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw=="], "yaml": ["yaml@2.9.0", "", { "bin": { "yaml": "bin.mjs" } }, "sha512-2AvhNX3mb8zd6Zy7INTtSpl1F15HW6Wnqj0srWlkKLcpYl/gMIMJiyuGq2KeI2YFxUPjdlB+3Lc10seMLtL4cA=="], @@ -1688,6 +1843,8 @@ "@isaacs/cliui/wrap-ansi": ["wrap-ansi@8.1.0", "", { "dependencies": { "ansi-styles": "^6.1.0", "string-width": "^5.0.1", "strip-ansi": "^7.0.1" } }, "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ=="], + "@isaacs/fs-minipass/minipass": ["minipass@7.1.3", "", {}, "sha512-tEBHqDnIoM/1rXME1zgka9g6Q2lcoCkxHLuc7ODJ5BxbP5d4c2Z5cGgtXAku59200Cx7diuHTOYfSBD8n6mm8A=="], + "anymatch/picomatch": ["picomatch@2.3.2", "", {}, "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA=="], "chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], @@ -1704,10 +1861,18 @@ "fast-glob/glob-parent": ["glob-parent@5.1.2", "", { "dependencies": { "is-glob": "^4.0.1" } }, "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow=="], + "gaxios/https-proxy-agent": ["https-proxy-agent@7.0.6", "", { "dependencies": { "agent-base": "^7.1.2", "debug": "4" } }, "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw=="], + "get-uri/data-uri-to-buffer": ["data-uri-to-buffer@6.0.2", "", {}, "sha512-7hvf7/GW8e86rW0ptuwS3OcBGDjIi6SZva7hCyWC0yYry2cOPmLIjXAUHI6DK2HsnwJd9ifmt57i8eV2n4YNpw=="], "glob/minimatch": ["minimatch@9.0.9", "", { "dependencies": { "brace-expansion": "^2.0.2" } }, "sha512-OBwBN9AL4dqmETlpS2zasx+vTeWclWzkblfZk7KTA5j3jeOONz/tRCnZomUyvNg83wL5Zv9Ss6HMJXAgL8R2Yg=="], + "glob/minipass": ["minipass@7.1.3", "", {}, "sha512-tEBHqDnIoM/1rXME1zgka9g6Q2lcoCkxHLuc7ODJ5BxbP5d4c2Z5cGgtXAku59200Cx7diuHTOYfSBD8n6mm8A=="], + + "http-proxy-agent/agent-base": ["agent-base@7.1.4", "", {}, "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ=="], + + "http-response-object/@types/node": ["@types/node@10.17.60", "", {}, "sha512-F0KIgDJfy2nA3zMLmWGKxcH2ZVEtCZXHHdOQs2gSaQ27+lNeEfGxzkIw90aXswATX7AZ33tahPbzy6KAfUreVw=="], + "import-fresh/resolve-from": ["resolve-from@4.0.0", "", {}, "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g=="], "istanbul-lib-report/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], @@ -1718,18 +1883,34 @@ "micromatch/picomatch": ["picomatch@2.3.2", "", {}, "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA=="], + "minizlib/minipass": ["minipass@7.1.3", "", {}, "sha512-tEBHqDnIoM/1rXME1zgka9g6Q2lcoCkxHLuc7ODJ5BxbP5d4c2Z5cGgtXAku59200Cx7diuHTOYfSBD8n6mm8A=="], + + "pac-proxy-agent/agent-base": ["agent-base@7.1.4", "", {}, "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ=="], + + "pac-proxy-agent/https-proxy-agent": ["https-proxy-agent@7.0.6", "", { "dependencies": { "agent-base": "^7.1.2", "debug": "4" } }, "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw=="], + "parse5/entities": ["entities@8.0.0", "", {}, "sha512-zwfzJecQ/Uej6tusMqwAqU/6KL2XaB2VZ2Jg54Je6ahNBGNH6Ek6g3jjNCF0fG9EWQKGZNddNjU5F1ZQn/sBnA=="], "path-scurry/lru-cache": ["lru-cache@10.4.3", "", {}, "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ=="], + "path-scurry/minipass": ["minipass@7.1.3", "", {}, "sha512-tEBHqDnIoM/1rXME1zgka9g6Q2lcoCkxHLuc7ODJ5BxbP5d4c2Z5cGgtXAku59200Cx7diuHTOYfSBD8n6mm8A=="], + + "proxy-agent/agent-base": ["agent-base@7.1.4", "", {}, "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ=="], + + "proxy-agent/https-proxy-agent": ["https-proxy-agent@7.0.6", "", { "dependencies": { "agent-base": "^7.1.2", "debug": "4" } }, "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw=="], + "proxy-agent/lru-cache": ["lru-cache@7.18.3", "", {}, "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA=="], + "socks-proxy-agent/agent-base": ["agent-base@7.1.4", "", {}, "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ=="], + "strip-literal/js-tokens": ["js-tokens@9.0.1", "", {}, "sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ=="], "tailwindcss/chokidar": ["chokidar@3.6.0", "", { "dependencies": { "anymatch": "~3.1.2", "braces": "~3.0.2", "glob-parent": "~5.1.2", "is-binary-path": "~2.1.0", "is-glob": "~4.0.1", "normalize-path": "~3.0.0", "readdirp": "~3.6.0" }, "optionalDependencies": { "fsevents": "~2.3.2" } }, "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw=="], "tailwindcss/jiti": ["jiti@1.21.7", "", { "bin": { "jiti": "bin/jiti.js" } }, "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A=="], + "tar/minipass": ["minipass@7.1.3", "", {}, "sha512-tEBHqDnIoM/1rXME1zgka9g6Q2lcoCkxHLuc7ODJ5BxbP5d4c2Z5cGgtXAku59200Cx7diuHTOYfSBD8n6mm8A=="], + "tsup/esbuild": ["esbuild@0.27.7", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.27.7", "@esbuild/android-arm": "0.27.7", "@esbuild/android-arm64": "0.27.7", "@esbuild/android-x64": "0.27.7", "@esbuild/darwin-arm64": "0.27.7", "@esbuild/darwin-x64": "0.27.7", "@esbuild/freebsd-arm64": "0.27.7", "@esbuild/freebsd-x64": "0.27.7", "@esbuild/linux-arm": "0.27.7", "@esbuild/linux-arm64": "0.27.7", "@esbuild/linux-ia32": "0.27.7", "@esbuild/linux-loong64": "0.27.7", "@esbuild/linux-mips64el": "0.27.7", "@esbuild/linux-ppc64": "0.27.7", "@esbuild/linux-riscv64": "0.27.7", "@esbuild/linux-s390x": "0.27.7", "@esbuild/linux-x64": "0.27.7", "@esbuild/netbsd-arm64": "0.27.7", "@esbuild/netbsd-x64": "0.27.7", "@esbuild/openbsd-arm64": "0.27.7", "@esbuild/openbsd-x64": "0.27.7", "@esbuild/openharmony-arm64": "0.27.7", "@esbuild/sunos-x64": "0.27.7", "@esbuild/win32-arm64": "0.27.7", "@esbuild/win32-ia32": "0.27.7", "@esbuild/win32-x64": "0.27.7" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-IxpibTjyVnmrIQo5aqNpCgoACA/dTKLTlhMHihVHhdkxKyPO1uBBthumT0rdHmcsk9uMonIWS0m4FljWzILh3w=="], "tsup/tinyexec": ["tinyexec@0.3.2", "", {}, "sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA=="], @@ -1740,12 +1921,16 @@ "vitest/tinypool": ["tinypool@1.1.1", "", {}, "sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg=="], + "@babel/helper-compilation-targets/lru-cache/yallist": ["yallist@3.1.1", "", {}, "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g=="], + "@isaacs/cliui/string-width/emoji-regex": ["emoji-regex@9.2.2", "", {}, "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="], "@isaacs/cliui/strip-ansi/ansi-regex": ["ansi-regex@6.2.2", "", {}, "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg=="], "@isaacs/cliui/wrap-ansi/ansi-styles": ["ansi-styles@6.2.3", "", {}, "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg=="], + "gaxios/https-proxy-agent/agent-base": ["agent-base@7.1.4", "", {}, "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ=="], + "glob/minimatch/brace-expansion": ["brace-expansion@2.1.0", "", { "dependencies": { "balanced-match": "^1.0.0" } }, "sha512-TN1kCZAgdgweJhWWpgKYrQaMNHcDULHkWwQIspdtjV4Y5aurRdZpjAqn6yX3FPqTA9ngHCc4hJxMAMgGfve85w=="], "tailwindcss/chokidar/glob-parent": ["glob-parent@5.1.2", "", { "dependencies": { "is-glob": "^4.0.1" } }, "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow=="], diff --git a/packages/aws-lambda/README.md b/packages/aws-lambda/README.md new file mode 100644 index 000000000..d32e5da20 --- /dev/null +++ b/packages/aws-lambda/README.md @@ -0,0 +1,113 @@ +# @hyperframes/aws-lambda + +AWS Lambda adapter for HyperFrames distributed rendering. Wraps the OSS +`plan` / `renderChunk` / `assemble` primitives into a single Lambda handler +that Step Functions can dispatch on, plus a build pipeline that bundles +the handler + Chrome runtime + ffmpeg into a deployable ZIP. + +The Lambda adapter ships in two parts: the foundation (this package + the +SAM example) validates the architecture end-to-end on real AWS; the +user-facing surface (CLI, CDK construct, migration guide) lands in +follow-up PRs. + +## Architecture + +``` +┌──────────────────────────────────────────────────────────────────┐ +│ Step Functions state machine │ +│ Plan → Map(N) RenderChunk → Assemble │ +└──────────────────────────────────────────────────────────────────┘ + │ dispatches by event.Action + ▼ +┌──────────────────────────────────────────────────────────────────┐ +│ One Lambda function (this package's `dist/handler.zip`) │ +│ handler.mjs │ +│ ├─ Action="plan" → @hyperframes/producer/distributed │ +│ ├─ Action="renderChunk" → @hyperframes/producer/distributed │ +│ └─ Action="assemble" → @hyperframes/producer/distributed │ +│ bin/ffmpeg — ffmpeg-static │ +│ node_modules/@sparticuz/chromium/ — Lambda-optimised Chromium │ +└──────────────────────────────────────────────────────────────────┘ + │ pure functions over local paths + ▼ +┌──────────────────────────────────────────────────────────────────┐ +│ S3 bucket — plan tarball + per-chunk outputs + final mp4 │ +└──────────────────────────────────────────────────────────────────┘ +``` + +The handler downloads inputs from S3 into `/tmp`, calls the OSS primitive, +uploads outputs back to S3, and returns a small JSON result that fits +inside Step Functions' history budget (under 200 bytes per chunk). + +## Chrome runtime + +The package supports two Chromium sources: + +| Source | Default | Size | When to pick it | +| ------------------------------- | ------- | ------------------ | --------------------------------------------------------------------------------------------------------------------- | +| `@sparticuz/chromium` | yes | ~70 MiB compressed | Lambda. Decompresses into `/tmp` at runtime; the rest of the ecosystem already uses it for headless-Chrome-in-Lambda. | +| Bundled `chrome-headless-shell` | no | ~140 MiB | Fallback. Used if `@sparticuz/chromium` ever drops `HeadlessExperimental.beginFrame` support. | + +Pick the source at build time: + +```bash +bun run --cwd packages/aws-lambda build:zip +bun run --cwd packages/aws-lambda build:zip -- --source=chrome-headless-shell +``` + +The handler reads `HYPERFRAMES_LAMBDA_CHROME_SOURCE` at boot. The build +script sets that env var via Lambda function configuration in +`examples/aws-lambda/template.yaml`. + +## BeginFrame regression guard + +HyperFrames' renderer drives Chrome via the CDP +`HeadlessExperimental.beginFrame` command — same path the K8s deploy uses. +The Lambda adapter assumes that `@sparticuz/chromium`'s +chrome-headless-shell build honours BeginFrame. To prove it (and re-prove +it on every release), the package ships a Docker probe: + +```bash +# Build the Lambda-like container and run the probe. +bun run --cwd packages/aws-lambda probe:beginframe:docker +``` + +The probe boots `@sparticuz/chromium` inside +`public.ecr.aws/lambda/nodejs:22` and asserts CDP `beginFrame` with +`screenshot: true` returns a PNG buffer. Exit code 0 = green; non-zero = +fall back to bundling chrome-headless-shell directly via `--source=chrome-headless-shell`. + +## Building the ZIP + +```bash +bun install # at the monorepo root +bun run --cwd packages/aws-lambda build:zip # → packages/aws-lambda/dist/handler.zip +bun run --cwd packages/aws-lambda verify:zip-size # CI gate +``` + +The build script bundles `src/handler.ts` via esbuild, stages +`@sparticuz/chromium` and `puppeteer-core` under `node_modules/`, copies +ffmpeg-static into `bin/`, and zips the result. The unzipped layout is +designed to extract cleanly into Lambda's `/var/task/`. + +`verify:zip-size` enforces: + +- Unzipped ≤ 248 MiB (in-house budget; Lambda hard ceiling is 250 MiB unzipped — AWS docs label this "250 MB" but use binary mebibytes) +- Zipped ≤ 150 MiB (in-house budget; Lambda has no hard zipped cap for S3-deployed functions) + +CI fails the PR if either is exceeded. + +## Running tests + +```bash +bun run --cwd packages/aws-lambda test # unit tests (no Chrome) +bun run --cwd packages/aws-lambda probe:beginframe # local probe (Linux only) +``` + +## What's NOT in this PR + +- `examples/aws-lambda/template.yaml` (SAM template — separate PR). +- Real-AWS deploy + smoke workflow (separate PR). +- `npx hyperframes lambda deploy` CLI — follow-up. +- CDK construct (`HyperframesRenderStack`) — follow-up. +- Migration guide — follow-up. diff --git a/packages/aws-lambda/package.json b/packages/aws-lambda/package.json new file mode 100644 index 000000000..3c6447fbb --- /dev/null +++ b/packages/aws-lambda/package.json @@ -0,0 +1,55 @@ +{ + "name": "@hyperframes/aws-lambda", + "version": "0.0.1", + "description": "AWS Lambda adapter for HyperFrames distributed rendering — Plan/RenderChunk/Assemble handler + ZIP bundling.", + "repository": { + "type": "git", + "url": "https://github.com/heygen-com/hyperframes", + "directory": "packages/aws-lambda" + }, + "files": [ + "src/", + "scripts/", + "README.md" + ], + "type": "module", + "main": "./src/index.ts", + "types": "./src/index.ts", + "exports": { + ".": "./src/index.ts", + "./handler": "./src/handler.ts" + }, + "publishConfig": { + "access": "public", + "registry": "https://registry.npmjs.org/" + }, + "scripts": { + "build": "tsc --noEmit", + "build:zip": "tsx scripts/build-zip.ts", + "probe:beginframe": "tsx scripts/probe-beginframe.ts", + "probe:beginframe:docker": "docker build -f scripts/probe-beginframe.dockerfile -t hyperframes-lambda-probe:local ../.. && docker run --rm hyperframes-lambda-probe:local", + "test": "bun test", + "typecheck": "tsc --noEmit", + "verify:zip-size": "tsx scripts/verify-zip-size.ts" + }, + "dependencies": { + "@aws-sdk/client-s3": "^3.700.0", + "@hyperframes/producer": "workspace:^", + "@sparticuz/chromium": "148.0.0", + "ffmpeg-static": "^5.2.0", + "ffprobe-static": "^3.1.0", + "puppeteer-core": "^24.39.1", + "tar": "^7.4.3" + }, + "devDependencies": { + "@types/aws-lambda": "^8.10.146", + "@types/node": "^25.0.10", + "@types/tar": "^6.1.13", + "esbuild": "^0.25.12", + "tsx": "^4.21.0", + "typescript": "^5.7.2" + }, + "engines": { + "node": ">=22" + } +} diff --git a/packages/aws-lambda/scripts/_formatBytes.ts b/packages/aws-lambda/scripts/_formatBytes.ts new file mode 100644 index 000000000..3d72510d2 --- /dev/null +++ b/packages/aws-lambda/scripts/_formatBytes.ts @@ -0,0 +1,15 @@ +/** + * Shared binary-unit byte formatter for the build/verify scripts. + * + * The Lambda ZIP-size budget is in mebibytes (Lambda's 250 MB / 248 MiB + * gate is binary, not decimal), so logs and CI failure messages use + * KiB / MiB / GiB. This is intentionally a different unit system from + * `packages/cli/src/ui/format.ts`'s `formatBytes` (KB / MB, decimal) — + * don't conflate them. + */ +export function formatBytes(bytes: number): string { + if (bytes < 1024) return `${bytes} B`; + if (bytes < 1024 * 1024) return `${(bytes / 1024).toFixed(1)} KiB`; + if (bytes < 1024 * 1024 * 1024) return `${(bytes / (1024 * 1024)).toFixed(1)} MiB`; + return `${(bytes / (1024 * 1024 * 1024)).toFixed(2)} GiB`; +} diff --git a/packages/aws-lambda/scripts/build-zip.ts b/packages/aws-lambda/scripts/build-zip.ts new file mode 100644 index 000000000..420524442 --- /dev/null +++ b/packages/aws-lambda/scripts/build-zip.ts @@ -0,0 +1,480 @@ +#!/usr/bin/env tsx +/** + * Build the AWS Lambda deployment ZIP. + * + * Pack layout (paths inside the ZIP are relative to Lambda's + * `/var/task/`): + * + * handler.mjs — bundled entry, set as Lambda's Handler + * handler.mjs.map — sourcemap (debugging aid; small) + * bin/ffmpeg — ffmpeg-static binary + * bin/chrome-headless-shell — fallback Chrome (only when CHROME_SOURCE=shell) + * node_modules/@sparticuz/chromium/ + * — primary Chrome (lives under node_modules so + * runtime `import("@sparticuz/chromium")` + * resolves; the package's own tarball stays + * inside). + * + * The handler bundle (esbuild) externalises modules whose binary assets + * must be present at runtime — `@sparticuz/chromium` for its bin tarball, + * `puppeteer-core` because Lambda runtime resolves it via Node module + * resolution from `node_modules/`. Everything else is inlined for cold + * start speed. + * + * Run: + * bun run --cwd packages/aws-lambda build:zip + * bun run --cwd packages/aws-lambda build:zip -- --source=chrome-headless-shell + * + * Outputs the resolved ZIP path + size to stdout and writes a sidecar + * JSON (`dist/handler.zip.manifest.json`) describing the contents. + */ + +import { spawnSync } from "node:child_process"; +import { + chmodSync, + cpSync, + existsSync, + mkdirSync, + readdirSync, + readFileSync, + rmSync, + statSync, + writeFileSync, +} from "node:fs"; +import { dirname, join, resolve } from "node:path"; +import { fileURLToPath } from "node:url"; +import * as esbuild from "esbuild"; +import { formatBytes } from "./_formatBytes.js"; + +const scriptDir = dirname(fileURLToPath(import.meta.url)); +const packageRoot = resolve(scriptDir, ".."); +const monorepoRoot = resolve(packageRoot, "../.."); +const distDir = join(packageRoot, "dist"); + +interface BuildOptions { + source: "sparticuz" | "chrome-headless-shell"; + /** Hard upper bound on the unzipped bundle size in bytes (Lambda limit is 250 MiB). */ + maxUnzippedBytes: number; + /** Hard upper bound on the ZIP file size in bytes. */ + maxZippedBytes: number; +} + +const DEFAULT_OPTIONS: BuildOptions = { + source: "sparticuz", + // Lambda's hard ceiling for ZIP-deployed functions is 250 MiB unzipped + // (AWS docs label it "250 MB" but the 262144000-byte value is 250 + // binary mebibytes). We gate at 248 MiB to keep ~2 MiB of headroom — + // the sparticuz Chrome (~70 MiB) + ffmpeg (~80 MiB) + ffprobe (~62 + // MiB) + bundled Node deps put us close to the ceiling. Chrome itself + // decompresses into Lambda's `/tmp` at cold start, which has its own + // 10 GiB budget, so the unzipped /var/task footprint above is what + // actually competes with Lambda's 250 MiB limit. + maxUnzippedBytes: 248 * 1024 * 1024, + // Lambda's only zipped-size cap is for direct console/CLI uploads (50 + // MiB); S3-deployed functions are bounded by the unzipped ceiling. We + // gate at 150 MiB to flag a sudden bundle-size regression without + // false-failing on the natural ~100 MiB sparticuz + ffmpeg payload. + maxZippedBytes: 150 * 1024 * 1024, +}; + +function parseArgs(argv: string[]): BuildOptions { + const opts = { ...DEFAULT_OPTIONS }; + for (const arg of argv.slice(2)) { + if (arg.startsWith("--source=")) { + const v = arg.slice("--source=".length); + if (v !== "sparticuz" && v !== "chrome-headless-shell") { + throw new Error(`--source must be 'sparticuz' or 'chrome-headless-shell' (got ${v})`); + } + opts.source = v; + } else if (arg.startsWith("--max-unzipped=")) { + opts.maxUnzippedBytes = Number.parseInt(arg.slice("--max-unzipped=".length), 10); + } else if (arg.startsWith("--max-zipped=")) { + opts.maxZippedBytes = Number.parseInt(arg.slice("--max-zipped=".length), 10); + } else if (arg === "--help") { + console.log( + "Usage: tsx build-zip.ts [--source=sparticuz|chrome-headless-shell]\n" + + " [--max-unzipped=] [--max-zipped=]", + ); + process.exit(0); + } else { + throw new Error(`Unknown flag: ${arg}`); + } + } + return opts; +} + +async function main(): Promise { + const opts = parseArgs(process.argv); + const start = Date.now(); + + rmSync(distDir, { recursive: true, force: true }); + mkdirSync(distDir, { recursive: true }); + + const stagingDir = join(distDir, "staging"); + mkdirSync(stagingDir, { recursive: true }); + + console.log(`[build-zip] source=${opts.source}`); + + // 1. Bundle the handler. + await bundleHandler(stagingDir); + + // 2. Stage runtime modules (puppeteer-core + @sparticuz/chromium or the + // fallback chrome-headless-shell tar). + stageRuntimeModules(stagingDir, opts.source); + + // 3. Stage the ffmpeg binary. + stageFfmpeg(stagingDir); + + // 3b. Stage the hyperframe runtime manifest + IIFE as siblings of + // handler.mjs. The producer's `hyperframeRuntimeLoader` checks + // SIBLING_MANIFEST_PATH first, so dropping the manifest alongside + // the bundled handler at /var/task/hyperframe.manifest.json lets + // renderChunk find it without needing PRODUCER_HYPERFRAME_MANIFEST_PATH. + stageHyperframeRuntime(stagingDir); + + // 4. If we're on the chrome-headless-shell fallback, stage that binary. + if (opts.source === "chrome-headless-shell") { + stageChromeHeadlessShell(stagingDir); + } + + // 5. Compute the unzipped size BEFORE zipping so we fail loud when over budget. + const unzippedBytes = directorySizeBytes(stagingDir); + console.log(`[build-zip] unzipped staging size: ${formatBytes(unzippedBytes)}`); + if (unzippedBytes > opts.maxUnzippedBytes) { + throw new Error( + `[build-zip] unzipped bundle ${formatBytes(unzippedBytes)} exceeds limit ${formatBytes( + opts.maxUnzippedBytes, + )} (Lambda ZIP ceiling: 250 MiB unzipped). ` + + `Switch --source to the lighter option, or move Chrome to a Lambda Layer.`, + ); + } + + // 6. Build the ZIP. + const zipPath = join(distDir, "handler.zip"); + zipDirectory(stagingDir, zipPath); + const zippedBytes = statSync(zipPath).size; + console.log(`[build-zip] zip size: ${formatBytes(zippedBytes)} → ${zipPath}`); + if (zippedBytes > opts.maxZippedBytes) { + throw new Error( + `[build-zip] zip ${formatBytes(zippedBytes)} exceeds ZIP size limit ${formatBytes( + opts.maxZippedBytes, + )}.`, + ); + } + + // 7. Sidecar manifest. + const manifest = { + builtAt: new Date().toISOString(), + durationMs: Date.now() - start, + source: opts.source, + unzippedBytes, + zippedBytes, + maxUnzippedBytes: opts.maxUnzippedBytes, + maxZippedBytes: opts.maxZippedBytes, + }; + writeFileSync(join(distDir, "handler.zip.manifest.json"), JSON.stringify(manifest, null, 2)); + + // 8. Cleanup staging. + rmSync(stagingDir, { recursive: true, force: true }); + console.log(`[build-zip] done in ${Date.now() - start}ms`); +} + +async function bundleHandler(stagingDir: string): Promise { + const entry = join(packageRoot, "src/handler.ts"); + const outfile = join(stagingDir, "handler.mjs"); + + const workspaceAliasPlugin: esbuild.Plugin = { + name: "workspace-alias", + setup(build) { + build.onResolve({ filter: /^@hyperframes\/producer\/distributed$/ }, () => ({ + path: resolve(monorepoRoot, "packages/producer/src/distributed.ts"), + })); + build.onResolve({ filter: /^@hyperframes\/producer$/ }, () => ({ + path: resolve(monorepoRoot, "packages/producer/src/index.ts"), + })); + build.onResolve({ filter: /^@hyperframes\/engine$/ }, () => ({ + path: resolve(monorepoRoot, "packages/engine/src/index.ts"), + })); + build.onResolve({ filter: /^@hyperframes\/engine\/alpha-blit$/ }, () => ({ + path: resolve(monorepoRoot, "packages/engine/src/utils/alphaBlit.ts"), + })); + build.onResolve({ filter: /^@hyperframes\/engine\/shader-transitions$/ }, () => ({ + path: resolve(monorepoRoot, "packages/engine/src/utils/shaderTransitions.ts"), + })); + build.onResolve({ filter: /^@hyperframes\/core$/ }, () => ({ + path: resolve(monorepoRoot, "packages/core/src/index.ts"), + })); + build.onResolve({ filter: /^@hyperframes\/core\/lint$/ }, () => ({ + path: resolve(monorepoRoot, "packages/core/src/lint/index.ts"), + })); + }, + }; + + await esbuild.build({ + bundle: true, + platform: "node", + target: "node22", + format: "esm", + // Externalise binary-shipped modules so node module resolution picks + // them up at runtime. esbuild would otherwise try to inline their + // postinstall-extracted binaries, which it cannot do. + external: [ + "@sparticuz/chromium", + "puppeteer-core", + "puppeteer", + // AWS SDK v3 is pre-installed in the Lambda Node 22 runtime; mark + // external so we don't double-bundle 3+ MiB of SDK. + "@aws-sdk/client-s3", + ], + plugins: [workspaceAliasPlugin], + minify: false, + // sourcemap=false: the ZIP is tight on Lambda's 250 MiB unzipped cap + // (Chrome ~70 MiB + ffmpeg ~80 MiB + ffprobe ~62 MiB + Node deps). A + // 4-5 MiB sourcemap puts us over. Re-enable for local debugging by passing + // --sourcemap; the bundle's stack traces stay readable enough without + // it because we don't minify. + sourcemap: false, + entryPoints: [entry], + outfile, + // Lambda's Node 22 runtime treats `.mjs` as ESM. Inject a real `require` + // via `createRequire` so esbuild's `__require` shim resolves to it + // instead of throwing "Dynamic require of is not supported" on + // CommonJS modules in the dependency graph (postcss, etc. that ship + // top-level `require('path')` calls). The shim does + // `typeof require !== "undefined" ? require : `, so + // making `require` a real value in module scope flips it onto the + // happy path. + banner: { + js: [ + "// hyperframes-aws-lambda handler bundle", + 'import { createRequire as __hf_createRequire } from "module";', + "const require = __hf_createRequire(import.meta.url);", + ].join("\n"), + }, + }); + console.log(`[build-zip] bundled handler → ${outfile}`); +} + +function stageRuntimeModules(stagingDir: string, source: BuildOptions["source"]): void { + // Bun's isolated-install layout means cpSync(@sparticuz/chromium) only + // copies the package's own files, missing transitive deps like `tar-fs`. + // The clean cross-package-manager solution: write a tiny package.json + // into staging/ that declares the production deps, then `npm install` + // there. npm flattens transitive deps into staging/node_modules/. + const pkg: Record = { + name: "hyperframes-aws-lambda-bundled", + version: "0.0.0", + private: true, + dependencies: { + "puppeteer-core": readDepVersion("puppeteer-core"), + }, + }; + if (source === "sparticuz") { + (pkg.dependencies as Record)["@sparticuz/chromium"] = + readDepVersion("@sparticuz/chromium"); + } + writeFileSync(join(stagingDir, "package.json"), JSON.stringify(pkg, null, 2)); + + // --no-package-lock so we don't pollute staging with a lockfile we don't + // ship; --no-audit/--no-fund just for log noise. + const result = spawnSync( + "npm", + ["install", "--no-package-lock", "--no-audit", "--no-fund", "--omit=dev", "--omit=optional"], + { + cwd: stagingDir, + stdio: "inherit", + }, + ); + if (result.status !== 0) { + throw new Error(`[build-zip] npm install into staging failed (status ${result.status})`); + } + console.log(`[build-zip] staged node_modules via npm install`); +} + +function readDepVersion(moduleName: string): string { + // Resolve the EXACT version bun installed into the workspace, not the + // semver range declared in package.json. The staging-dir npm install + // runs with `--no-package-lock`, so a caret range would float to the + // latest registry version at build time — diverging from what the + // workspace tests ran against and breaking ZIP-content determinism + // across consecutive builds. The lockfile pin gives us reproducibility. + const lockText = readFileSync(join(monorepoRoot, "bun.lock"), "utf-8"); + // bun.lock lines look like: + // "puppeteer-core": ["puppeteer-core@24.43.1", "", { ... }, "sha512-..."], + const re = new RegExp( + `"${moduleName.replace(/[.*+?^${}()|[\]\\]/g, "\\$&")}":\\s*\\["${moduleName.replace( + /[.*+?^${}()|[\]\\]/g, + "\\$&", + )}@([^"]+)"`, + ); + const match = re.exec(lockText); + if (!match || !match[1]) { + // Fall back to the manifest range — better than failing the build + // entirely if bun.lock's format changes between bun versions. + const manifest = JSON.parse(readFileSync(join(packageRoot, "package.json"), "utf-8")) as { + dependencies?: Record; + }; + return manifest.dependencies?.[moduleName] ?? "latest"; + } + return match[1]; +} + +function resolveModuleDir(moduleName: string): string { + // Walk up from packageRoot to find a matching node_modules entry. + // Used by stageFfmpeg below; the @sparticuz/chromium + puppeteer-core + // paths now go through npm install instead. + let dir = packageRoot; + for (let i = 0; i < 5; i++) { + const candidate = join(dir, "node_modules", moduleName); + if (existsSync(candidate)) return candidate; + dir = dirname(dir); + } + throw new Error( + `[build-zip] could not resolve ${moduleName} from ${packageRoot} — run 'bun install' first.`, + ); +} + +function stageHyperframeRuntime(stagingDir: string): void { + const coreDist = resolve(monorepoRoot, "packages/core/dist"); + const manifestSrc = join(coreDist, "hyperframe.manifest.json"); + const iifeSrc = join(coreDist, "hyperframe.runtime.iife.js"); + if (!existsSync(manifestSrc) || !existsSync(iifeSrc)) { + throw new Error( + `[build-zip] hyperframe runtime artifacts missing under ${coreDist}. ` + + `Run 'bun run --filter @hyperframes/core build:hyperframes-runtime:modular' first.`, + ); + } + cpSync(manifestSrc, join(stagingDir, "hyperframe.manifest.json")); + cpSync(iifeSrc, join(stagingDir, "hyperframe.runtime.iife.js")); + console.log(`[build-zip] staged hyperframe.manifest.json + hyperframe.runtime.iife.js`); +} + +function stageFfmpeg(stagingDir: string): void { + const binDir = join(stagingDir, "bin"); + mkdirSync(binDir, { recursive: true }); + + // ffmpeg from `ffmpeg-static`. The package only ships the encoder + // binary; the audio pad/trim path also needs ffprobe, which comes + // from `ffprobe-static`. + const ffmpegBinary = join(resolveModuleDir("ffmpeg-static"), "ffmpeg"); + if (!existsSync(ffmpegBinary)) { + throw new Error( + `[build-zip] ffmpeg-static binary missing at ${ffmpegBinary}. Did postinstall run?`, + ); + } + const ffmpegDest = join(binDir, "ffmpeg"); + cpSync(ffmpegBinary, ffmpegDest); + chmodSync(ffmpegDest, 0o755); + + // ffprobe lives at `ffprobe-static/bin///ffprobe`. + // The producer's `audioPadTrim` spawns `ffprobe` from PATH so we need + // it alongside ffmpeg under /var/task/bin/. + const ffprobeModule = resolveModuleDir("ffprobe-static"); + const ffprobeCandidates = [ + join(ffprobeModule, "bin", "linux", "x64", "ffprobe"), + join(ffprobeModule, "bin", "linux", "arm64", "ffprobe"), + ]; + const ffprobeBinary = ffprobeCandidates.find((p) => existsSync(p)); + if (!ffprobeBinary) { + throw new Error( + `[build-zip] ffprobe-static binary not found under ${ffprobeModule}/bin/linux/. Did postinstall run?`, + ); + } + const ffprobeDest = join(binDir, "ffprobe"); + cpSync(ffprobeBinary, ffprobeDest); + chmodSync(ffprobeDest, 0o755); + + console.log(`[build-zip] staged ffmpeg + ffprobe → bin/`); +} + +function stageChromeHeadlessShell(stagingDir: string): void { + // The fallback path bundles the same chrome-headless-shell binary the + // K8s deploy uses. The binary is fetched via `@puppeteer/browsers` on + // first build into the host's `~/.cache/puppeteer/`; the build script + // re-uses that cache rather than redownloading. + const home = process.env.HOME ?? "/root"; + const baseDir = join(home, ".cache", "puppeteer", "chrome-headless-shell"); + if (!existsSync(baseDir)) { + throw new Error( + `[build-zip] chrome-headless-shell cache missing at ${baseDir}. Run\n` + + ` npx --yes @puppeteer/browsers install chrome-headless-shell@stable --path ${home}/.cache/puppeteer\n` + + `before --source=chrome-headless-shell.`, + ); + } + // Sort by numeric semver descending. `sort().reverse()` is lexicographic, + // which silently picks "99.0.0" over "131.0.0" once Chrome ships + // three-digit majors that aren't strictly width-aligned. `compareSemver` + // returns negative/zero/positive on (a, b), so descending = `b - a`. + const versions = readdirSync(baseDir).sort((a, b) => compareSemver(b, a)); + for (const v of versions) { + const candidate = join(baseDir, v, "chrome-headless-shell-linux64", "chrome-headless-shell"); + if (existsSync(candidate)) { + const dest = join(stagingDir, "bin", "chrome-headless-shell"); + mkdirSync(dirname(dest), { recursive: true }); + cpSync(candidate, dest); + chmodSync(dest, 0o755); + console.log(`[build-zip] staged chrome-headless-shell (${v}) → bin/chrome-headless-shell`); + return; + } + } + throw new Error(`[build-zip] no linux64 chrome-headless-shell binary found under ${baseDir}.`); +} + +/** + * Compare two semver-shaped strings like "131.0.6778.108". Treats any + * non-numeric directory name as `-Infinity` so it sorts to the bottom + * (Puppeteer's cache layout sometimes includes `latest` or branch tags). + * Used by `stageChromeHeadlessShell` to pick the newest cached Chrome + * without tripping on the lexicographic "99 > 131" trap. + */ +function compareSemver(a: string, b: string): number { + const partsA = a.split(".").map((s) => Number.parseInt(s, 10)); + const partsB = b.split(".").map((s) => Number.parseInt(s, 10)); + const len = Math.max(partsA.length, partsB.length); + for (let i = 0; i < len; i++) { + const ai = partsA[i] ?? 0; + const bi = partsB[i] ?? 0; + if (Number.isNaN(ai) && Number.isNaN(bi)) continue; + if (Number.isNaN(ai)) return -1; + if (Number.isNaN(bi)) return 1; + if (ai !== bi) return ai - bi; + } + return 0; +} + +function zipDirectory(sourceDir: string, zipPath: string): void { + const result = spawnSync("zip", ["-rq", zipPath, "."], { cwd: sourceDir, stdio: "inherit" }); + if (result.status !== 0) { + throw new Error(`[build-zip] zip exited with status ${result.status}`); + } +} + +function directorySizeBytes(dir: string): number { + // Use spawnSync (no shell) instead of execSync so `dir` is passed as + // an argv element rather than interpolated into a shell command — + // CodeQL's `js/shell-command-injected-from-environment` rule fires + // on the latter even with JSON-quoting. `du -sb` is Linux-only; + // build-zip is CI-side where Linux coreutils is present. + const result = spawnSync("du", ["-sb", dir], { encoding: "utf-8" }); + if (result.status === 0 && result.stdout) { + const bytes = Number.parseInt(result.stdout.split(/\s+/)[0] ?? "0", 10); + if (!Number.isNaN(bytes)) return bytes; + } + return walkSize(dir); +} + +function walkSize(dir: string): number { + let total = 0; + for (const entry of readdirSync(dir, { withFileTypes: true })) { + const full = join(dir, entry.name); + if (entry.isDirectory()) total += walkSize(full); + else if (entry.isFile()) total += statSync(full).size; + } + return total; +} + +void main().catch((err) => { + console.error("[build-zip] failed:", err instanceof Error ? err.message : String(err)); + if (err instanceof Error && err.stack) console.error(err.stack); + process.exit(1); +}); diff --git a/packages/aws-lambda/scripts/probe-beginframe.dockerfile b/packages/aws-lambda/scripts/probe-beginframe.dockerfile new file mode 100644 index 000000000..26e4c7bba --- /dev/null +++ b/packages/aws-lambda/scripts/probe-beginframe.dockerfile @@ -0,0 +1,61 @@ +# BeginFrame regression-guard container. +# +# Uses the official AWS Lambda Node 22 image as the base so the probe +# exercises @sparticuz/chromium against the SAME glibc, kernel feature +# set, and `/tmp` filesystem layout that real Lambda invocations see. If +# this Dockerfile passes, the bundled handler is on solid footing for +# real AWS. +# +# Build context: monorepo root (../../). Build + run: +# +# bun run --cwd packages/aws-lambda probe:beginframe:docker +# +# The default CMD runs `tsx scripts/probe-beginframe.ts` and exits 0 on +# pass, 1 on BeginFrame failure, 2 on harness failure. + +FROM public.ecr.aws/lambda/nodejs:22 + +# Shared libraries @sparticuz/chromium expects but the Lambda base image +# does not bring in by default. Versions are pinned to whatever +# `dnf install` resolves on the Lambda base image at build time; we just +# need them present. +RUN dnf install -y \ + alsa-lib \ + atk \ + cups-libs \ + gtk3 \ + libdrm \ + libxkbcommon \ + libXcomposite \ + libXdamage \ + libXrandr \ + mesa-libgbm \ + nss \ + pango \ + tar \ + gzip \ + unzip \ + && dnf clean all + +WORKDIR /var/task + +# The probe is self-contained — we install the three deps it needs into a +# fresh package directory rather than re-using the monorepo's +# workspace-rooted manifests (which carry `workspace:` protocol deps npm +# can't resolve). +COPY packages/aws-lambda/scripts/ scripts/ + +RUN printf '{"name":"hf-lambda-probe","version":"1.0.0","type":"module"}\n' > package.json \ + && npm install --no-audit --no-fund --omit=optional \ + @sparticuz/chromium@148.0.0 \ + puppeteer-core@^24.39.1 \ + tsx@^4.21.0 + +ENV NODE_PATH=/var/task/node_modules +ENV PATH="/var/task/node_modules/.bin:${PATH}" + +# Lambda's `tmpfs` is mounted at /tmp; sparticuz decompresses into /tmp +# at runtime. The base image already has /tmp writable. + +ENTRYPOINT [] +CMD ["node", "--experimental-strip-types", "scripts/probe-beginframe.ts"] diff --git a/packages/aws-lambda/scripts/probe-beginframe.ts b/packages/aws-lambda/scripts/probe-beginframe.ts new file mode 100644 index 000000000..0ac545ed5 --- /dev/null +++ b/packages/aws-lambda/scripts/probe-beginframe.ts @@ -0,0 +1,157 @@ +#!/usr/bin/env tsx +/** + * BeginFrame regression guard for `@sparticuz/chromium`. + * + * The load-bearing assumption of `@hyperframes/aws-lambda` is that the + * Chromium build shipped by `@sparticuz/chromium` honours CDP + * `HeadlessExperimental.beginFrame` with `screenshot: true`. This script + * boots that Chromium build (decompressing into `/tmp` per the library's + * runtime contract), navigates to a tiny static page, issues one + * `beginFrame` with a screenshot request, and asserts the response + * carries a PNG buffer. + * + * The script is the contract test, not a one-shot verification — every + * release should run it inside the Docker container at + * `scripts/probe-beginframe.dockerfile` to catch any future + * `@sparticuz/chromium` rebuild that drops `HeadlessExperimental` support. + * + * Exits 0 on pass, 1 on fail. Run via: + * + * bun run --cwd packages/aws-lambda probe:beginframe # host + * bun run --cwd packages/aws-lambda probe:beginframe:docker # Lambda-like + */ + +import { mkdtempSync, promises as fs } from "node:fs"; +import { tmpdir } from "node:os"; +import { join } from "node:path"; + +interface ProbeResult { + passed: boolean; + durationMs: number; + chromiumPath: string; + screenshotBytes: number; + hasDamage: boolean; + detail: string; +} + +const PROBE_HTML = ` +hf-beginframe-probe + +
hf-beginframe-probe
`; + +async function main(): Promise { + const start = Date.now(); + const result = await probe(); + result.durationMs = Date.now() - start; + console.log(JSON.stringify(result, null, 2)); + if (!result.passed) { + process.exit(1); + } +} + +async function probe(): Promise { + let chromiumPath = ""; + try { + const { default: chromium } = await import("@sparticuz/chromium"); + chromiumPath = await chromium.executablePath(); + const args = chromium.args; + + const puppeteer = await import("puppeteer-core"); + + // Write probe HTML to /tmp + serve via file:// — no HTTP server in the + // probe so we don't add a dependency surface that could mask a + // Chrome-side issue. `mkdtempSync` (vs `tmpdir() + Date.now()`) gives + // an unguessable directory name so two concurrent probes on the same + // host don't collide and CodeQL's insecure-tempfile rule clears. + const tmpHtmlDir = mkdtempSync(join(tmpdir(), "hf-beginframe-")); + const htmlPath = join(tmpHtmlDir, "probe.html"); + await fs.writeFile(htmlPath, PROBE_HTML, "utf-8"); + + // BeginFrame requires the full compositor-driving flag set. These match + // the args the engine's `browserManager` passes when `captureMode !== + // "screenshot"`. Without the surface-synchronization + threaded-disable + // flags, Chrome's compositor returns `hasDamage: false` and skips the + // screenshot — the same observation pinned in the hyperframes memory + // ("Chrome's beginFrame with `screenshot` param always reports + // hasDamage=true"). + const beginFrameFlags = [ + "--deterministic-mode", + "--enable-begin-frame-control", + "--disable-new-content-rendering-timeout", + "--run-all-compositor-stages-before-draw", + "--disable-threaded-animation", + "--disable-threaded-scrolling", + "--disable-checker-imaging", + "--disable-image-animation-resync", + "--enable-surface-synchronization", + // Software GL — Lambda has no GPU; matches the in-process renderer's + // software-locked path. + "--use-gl=angle", + "--use-angle=swiftshader", + "--enable-unsafe-swiftshader", + ]; + + const browser = await puppeteer.launch({ + executablePath: chromiumPath, + headless: "shell", + args: [...args, ...beginFrameFlags], + defaultViewport: { width: 800, height: 600 }, + }); + try { + const page = await browser.newPage(); + await page.goto(`file://${htmlPath}`, { waitUntil: "domcontentloaded", timeout: 30_000 }); + const session = await page.createCDPSession(); + await session.send("HeadlessExperimental.enable"); + // Warm-up beginFrame with noDisplayUpdates: true — drives the + // compositor without producing a screenshot, matching how the engine + // primes a capture loop. + await session.send("HeadlessExperimental.beginFrame", { + frameTimeTicks: 0, + interval: 33, + noDisplayUpdates: true, + }); + const response = await session.send("HeadlessExperimental.beginFrame", { + frameTimeTicks: 1000, + interval: 33, + screenshot: { format: "png" }, + }); + await fs.rm(tmpHtmlDir, { recursive: true, force: true }).catch(() => {}); + const screenshot = response.screenshotData ?? ""; + const bytes = screenshot ? Buffer.from(screenshot, "base64") : Buffer.alloc(0); + const isPng = + bytes.length >= 8 && + bytes[0] === 0x89 && + bytes[1] === 0x50 && + bytes[2] === 0x4e && + bytes[3] === 0x47; + return { + passed: isPng && bytes.length > 0, + durationMs: 0, + chromiumPath, + screenshotBytes: bytes.length, + hasDamage: response.hasDamage, + detail: isPng + ? "OK — BeginFrame returned a PNG buffer." + : `FAIL — BeginFrame returned ${bytes.length} bytes, PNG signature ${ + bytes.length >= 4 ? bytes.subarray(0, 4).toString("hex") : "" + }`, + }; + } finally { + await browser.close().catch(() => {}); + } + } catch (err) { + return { + passed: false, + durationMs: 0, + chromiumPath, + screenshotBytes: 0, + hasDamage: false, + detail: `FAIL — ${err instanceof Error ? err.message : String(err)}`, + }; + } +} + +void main().catch((err) => { + console.error("[probe-beginframe] unexpected:", err); + process.exit(2); +}); diff --git a/packages/aws-lambda/scripts/verify-zip-size.ts b/packages/aws-lambda/scripts/verify-zip-size.ts new file mode 100644 index 000000000..1592b8b38 --- /dev/null +++ b/packages/aws-lambda/scripts/verify-zip-size.ts @@ -0,0 +1,83 @@ +#!/usr/bin/env tsx +/** + * CI gate on the Lambda ZIP size. + * + * Reads `dist/handler.zip.manifest.json` (written by `build-zip.ts`) and + * exits non-zero if either the unzipped or zipped size exceeds the + * declared limits. Lambda's hard ceiling for ZIP-deployed functions is + * 250 MiB unzipped (262144000 bytes — AWS docs label it "250 MB" but use + * binary mebibytes); the in-house budget is 248 MiB to keep headroom for + * the Chrome tarball decompression that happens at cold start. + */ + +import { existsSync, readFileSync, statSync } from "node:fs"; +import { dirname, join, resolve } from "node:path"; +import { fileURLToPath } from "node:url"; +import { formatBytes } from "./_formatBytes.js"; + +const scriptDir = dirname(fileURLToPath(import.meta.url)); +const packageRoot = resolve(scriptDir, ".."); +const distDir = join(packageRoot, "dist"); +const zipPath = join(distDir, "handler.zip"); +const manifestPath = join(distDir, "handler.zip.manifest.json"); + +interface Manifest { + unzippedBytes: number; + zippedBytes: number; + source: string; +} + +const IN_HOUSE_UNZIPPED_LIMIT = 248 * 1024 * 1024; +const IN_HOUSE_ZIPPED_LIMIT = 150 * 1024 * 1024; + +function main(): void { + if (!existsSync(zipPath)) { + console.error(`[verify-zip-size] ${zipPath} not found. Run 'bun run build:zip' first.`); + process.exit(1); + } + if (!existsSync(manifestPath)) { + console.error( + `[verify-zip-size] ${manifestPath} not found. The manifest is written by build-zip.ts; ` + + `re-run the build.`, + ); + process.exit(1); + } + + const manifest = JSON.parse(readFileSync(manifestPath, "utf-8")) as Manifest; + const actualZipped = statSync(zipPath).size; + if (actualZipped !== manifest.zippedBytes) { + console.warn( + `[verify-zip-size] note: zip file size on disk (${actualZipped}) differs from ` + + `manifest (${manifest.zippedBytes}). Using on-disk value.`, + ); + } + + let failed = false; + if (manifest.unzippedBytes > IN_HOUSE_UNZIPPED_LIMIT) { + console.error( + `[verify-zip-size] FAIL unzipped: ${formatBytes(manifest.unzippedBytes)} > ` + + `${formatBytes(IN_HOUSE_UNZIPPED_LIMIT)} (in-house limit; Lambda hard ceiling is 250 MiB).`, + ); + failed = true; + } + if (actualZipped > IN_HOUSE_ZIPPED_LIMIT) { + console.error( + `[verify-zip-size] FAIL zipped: ${formatBytes(actualZipped)} > ` + + `${formatBytes(IN_HOUSE_ZIPPED_LIMIT)} (in-house limit; Lambda direct-upload ceiling is 50 MiB, ` + + `S3-deploy ceiling is 250 MiB).`, + ); + failed = true; + } + + if (failed) { + console.error("[verify-zip-size] FAILED — bundle is too large for Lambda ZIP deploy."); + process.exit(1); + } + console.log( + `[verify-zip-size] OK source=${manifest.source} unzipped=${formatBytes( + manifest.unzippedBytes, + )} zipped=${formatBytes(actualZipped)}`, + ); +} + +main(); diff --git a/packages/aws-lambda/src/chromium.test.ts b/packages/aws-lambda/src/chromium.test.ts new file mode 100644 index 000000000..75953e281 --- /dev/null +++ b/packages/aws-lambda/src/chromium.test.ts @@ -0,0 +1,99 @@ +/** + * Unit tests for the Chrome runtime resolver. + * + * The actual @sparticuz/chromium probe lives in + * `scripts/probe-beginframe.ts` (run in a Lambda-like Docker container). + * These tests pin the env-var → source-selection logic so a misconfigured + * deploy fails loudly rather than silently picking the wrong binary. + */ + +import { afterEach, beforeEach, describe, expect, it } from "bun:test"; +import { mkdtempSync, rmSync, writeFileSync } from "node:fs"; +import { tmpdir } from "node:os"; +import { join } from "node:path"; +import { + _setSparticuzChromiumForTests, + resolveChromeArgs, + resolveChromeExecutablePath, + resolveChromeSource, +} from "./chromium.js"; + +const savedEnv: Record = {}; + +beforeEach(() => { + savedEnv.HYPERFRAMES_LAMBDA_CHROME_SOURCE = process.env.HYPERFRAMES_LAMBDA_CHROME_SOURCE; + savedEnv.HYPERFRAMES_LAMBDA_CHROME_PATH = process.env.HYPERFRAMES_LAMBDA_CHROME_PATH; +}); + +afterEach(() => { + process.env.HYPERFRAMES_LAMBDA_CHROME_SOURCE = savedEnv.HYPERFRAMES_LAMBDA_CHROME_SOURCE; + process.env.HYPERFRAMES_LAMBDA_CHROME_PATH = savedEnv.HYPERFRAMES_LAMBDA_CHROME_PATH; + _setSparticuzChromiumForTests(null); +}); + +describe("resolveChromeSource", () => { + it("defaults to sparticuz when no env var is set", () => { + delete process.env.HYPERFRAMES_LAMBDA_CHROME_SOURCE; + expect(resolveChromeSource()).toBe("sparticuz"); + }); + + it("returns chrome-headless-shell when env var requests it", () => { + process.env.HYPERFRAMES_LAMBDA_CHROME_SOURCE = "chrome-headless-shell"; + expect(resolveChromeSource()).toBe("chrome-headless-shell"); + }); + + it("accepts the short alias 'shell'", () => { + process.env.HYPERFRAMES_LAMBDA_CHROME_SOURCE = "shell"; + expect(resolveChromeSource()).toBe("chrome-headless-shell"); + }); + + it("is case insensitive", () => { + process.env.HYPERFRAMES_LAMBDA_CHROME_SOURCE = "Chrome-Headless-Shell"; + expect(resolveChromeSource()).toBe("chrome-headless-shell"); + }); + + it("falls back to sparticuz on an unknown value", () => { + process.env.HYPERFRAMES_LAMBDA_CHROME_SOURCE = "wat"; + expect(resolveChromeSource()).toBe("sparticuz"); + }); +}); + +describe("resolveChromeExecutablePath", () => { + it("returns the path from a stubbed sparticuz module", async () => { + process.env.HYPERFRAMES_LAMBDA_CHROME_SOURCE = "sparticuz"; + _setSparticuzChromiumForTests({ + args: ["--fake-arg"], + executablePath: async () => "/tmp/sparticuz-chromium", + }); + expect(await resolveChromeExecutablePath()).toBe("/tmp/sparticuz-chromium"); + expect(await resolveChromeArgs()).toEqual(["--fake-arg"]); + }); + + it("reads chrome-headless-shell path from HYPERFRAMES_LAMBDA_CHROME_PATH", async () => { + process.env.HYPERFRAMES_LAMBDA_CHROME_SOURCE = "chrome-headless-shell"; + const dir = mkdtempSync(join(tmpdir(), "hf-chrome-test-")); + const binPath = join(dir, "chrome-headless-shell"); + writeFileSync(binPath, "fake binary contents"); + try { + process.env.HYPERFRAMES_LAMBDA_CHROME_PATH = binPath; + expect(await resolveChromeExecutablePath()).toBe(binPath); + expect(await resolveChromeArgs()).toEqual([]); + } finally { + rmSync(dir, { recursive: true, force: true }); + } + }); + + it("throws if chrome-headless-shell path is missing", async () => { + process.env.HYPERFRAMES_LAMBDA_CHROME_SOURCE = "chrome-headless-shell"; + delete process.env.HYPERFRAMES_LAMBDA_CHROME_PATH; + await expect(resolveChromeExecutablePath()).rejects.toThrow( + /HYPERFRAMES_LAMBDA_CHROME_PATH to be set/, + ); + }); + + it("throws if chrome-headless-shell path doesn't exist on disk", async () => { + process.env.HYPERFRAMES_LAMBDA_CHROME_SOURCE = "chrome-headless-shell"; + process.env.HYPERFRAMES_LAMBDA_CHROME_PATH = "/nonexistent/path/chrome-headless-shell"; + await expect(resolveChromeExecutablePath()).rejects.toThrow(/does not exist/); + }); +}); diff --git a/packages/aws-lambda/src/chromium.ts b/packages/aws-lambda/src/chromium.ts new file mode 100644 index 000000000..af0009237 --- /dev/null +++ b/packages/aws-lambda/src/chromium.ts @@ -0,0 +1,124 @@ +/** + * Lambda-runtime Chrome resolver. + * + * `renderChunk()` (the only primitive that needs a browser) launches Chrome + * via the engine's `BrowserManager`. In Lambda we can't ship the full + * Puppeteer-managed Chrome download — Puppeteer's Chrome binary is ~330 MB + * unzipped, well over Lambda's 250 MB ZIP-deploy ceiling. + * + * Two valid runtime sources: + * + * 1. `@sparticuz/chromium` (primary). Decompresses a Lambda-optimised + * `chrome-headless-shell` build into `/tmp` at runtime. ~70 MB + * compressed; the same binary the rest of the ecosystem uses for + * headless-Chrome-in-Lambda. CDP-level BeginFrame works because the + * command lives in the protocol, not the binary; the + * `scripts/probe-beginframe.ts` regression guard pins this. + * + * 2. A bundled `chrome-headless-shell` binary (fallback). If + * `@sparticuz/chromium`'s build ever drops `HeadlessExperimental` + * support, we fall back to the same `chrome-headless-shell` build + * the K8s deploy uses. The fallback raises the ZIP from ~70 MB + * Chrome to ~140 MB Chrome — still well under 250 MB. + * + * The runtime path is selected by the `HYPERFRAMES_LAMBDA_CHROME_SOURCE` + * env var (set by `build-zip.ts`): + * + * "sparticuz" → use `@sparticuz/chromium.executablePath()` + * "chrome-headless-shell" → use `process.env.HYPERFRAMES_LAMBDA_CHROME_PATH` + * + * Adapters that bundle this package can override + * `HYPERFRAMES_LAMBDA_CHROME_PATH` directly when running outside Lambda + * (e.g. the SAM-local RIE smoke). + */ + +import { existsSync } from "node:fs"; + +/** Discriminator for the two supported Chrome sources. */ +export type ChromeSource = "sparticuz" | "chrome-headless-shell"; + +/** + * Read which Chrome source the bundled ZIP was built against. Defaults to + * `"sparticuz"` so a fresh build with no env override picks the primary + * path. + */ +export function resolveChromeSource(): ChromeSource { + const raw = process.env.HYPERFRAMES_LAMBDA_CHROME_SOURCE?.toLowerCase(); + if (raw === "chrome-headless-shell" || raw === "shell") return "chrome-headless-shell"; + return "sparticuz"; +} + +/** + * Resolve the absolute path to a Chrome binary suitable for BeginFrame. + * + * For `"sparticuz"`: dynamically import `@sparticuz/chromium` and call + * `chromium.executablePath()`. The module is dynamic so a build-zip that + * never reaches the import (because the fallback Chrome is bundled) can + * tree-shake it out. + * + * For `"chrome-headless-shell"`: read the path from + * `HYPERFRAMES_LAMBDA_CHROME_PATH`. Throws if absent or non-existent so a + * misconfigured deploy fails loudly at boot rather than at first frame. + */ +export async function resolveChromeExecutablePath(): Promise { + const source = resolveChromeSource(); + if (source === "sparticuz") { + const mod = await loadSparticuzChromium(); + return mod.executablePath(); + } + const explicit = process.env.HYPERFRAMES_LAMBDA_CHROME_PATH; + if (!explicit) { + throw new Error( + "[chromium] HYPERFRAMES_LAMBDA_CHROME_SOURCE=chrome-headless-shell requires " + + "HYPERFRAMES_LAMBDA_CHROME_PATH to be set to the absolute path of the bundled binary.", + ); + } + if (!existsSync(explicit)) { + throw new Error( + `[chromium] HYPERFRAMES_LAMBDA_CHROME_PATH=${JSON.stringify(explicit)} does not exist`, + ); + } + return explicit; +} + +/** + * Resolve the Chromium launch args for the selected source. For + * `@sparticuz/chromium` we forward `chromium.args` (Lambda-tuned defaults + * — single-process, no-sandbox, /tmp paths). For the shell fallback the + * engine's own arg builder owns it; we return an empty array so the + * engine's defaults apply. + */ +export async function resolveChromeArgs(): Promise { + if (resolveChromeSource() !== "sparticuz") return []; + const mod = await loadSparticuzChromium(); + return mod.args; +} + +/** + * Dynamic import wrapper isolated so unit tests can stub the module without + * jest-style module mocking gymnastics. The narrow type here pins the + * subset of `@sparticuz/chromium`'s surface this package depends on; if + * the upstream module ever changes shape the type error here surfaces + * before runtime. + */ +interface SparticuzChromiumModule { + args: string[]; + executablePath(): Promise; +} + +let cachedSparticuz: SparticuzChromiumModule | null = null; + +async function loadSparticuzChromium(): Promise { + if (cachedSparticuz) return cachedSparticuz; + const mod = (await import("@sparticuz/chromium")) as + | SparticuzChromiumModule + | { default: SparticuzChromiumModule }; + const resolved = "default" in mod ? mod.default : mod; + cachedSparticuz = resolved; + return resolved; +} + +/** Test-only seam: replace the cached `@sparticuz/chromium` module. */ +export function _setSparticuzChromiumForTests(mod: SparticuzChromiumModule | null): void { + cachedSparticuz = mod; +} diff --git a/packages/aws-lambda/src/events.ts b/packages/aws-lambda/src/events.ts new file mode 100644 index 000000000..3f61f4f8b --- /dev/null +++ b/packages/aws-lambda/src/events.ts @@ -0,0 +1,136 @@ +/** + * Lambda event + result types for the HyperFrames distributed render handler. + * + * The Step Functions state machine in `examples/aws-lambda/template.yaml` + * dispatches on the `Action` field. Each action maps 1:1 onto one of the + * three OSS distributed primitives: + * + * "plan" → `plan(projectDir, config, planDir)` (Activity A) + * "renderChunk" → `renderChunk(planDir, chunkIndex, output)` (Activity B) + * "assemble" → `assemble(planDir, chunkPaths, audio, out)` (Activity C) + * + * All file I/O is mediated by S3 — the handler downloads inputs into + * `/tmp` (Lambda's only writable filesystem path), invokes the primitive, + * uploads outputs back to S3, and returns a small JSON payload that fits + * inside Step Functions' history budget (under 200 bytes for chunk + * results per §2.4). + */ + +import type { DistributedRenderConfig } from "@hyperframes/producer/distributed"; + +/** Discriminator for the three roles the one Lambda image fulfills. */ +export type LambdaAction = "plan" | "renderChunk" | "assemble"; + +/** + * Top-level shape of any event the handler may receive. + * + * Step Functions can also invoke with a wrapped payload (e.g. when a Map + * state's `ItemSelector` passes through `$$.Map.Item.Value`), so the + * handler unwraps both `event.Payload` and `event.Input` before + * dispatching. + */ +export type LambdaEvent = + | PlanEvent + | RenderChunkEvent + | AssembleEvent + | { Payload: LambdaEvent } + | { Input: LambdaEvent }; + +/** Activity A: produce a planDir, upload to S3. */ +export interface PlanEvent { + Action: "plan"; + /** S3 URI pointing at a `tar -czf`-archived project directory (`s3://bucket/key.tar.gz`). */ + ProjectS3Uri: string; + /** S3 URI prefix where the planDir tar should be uploaded (`s3://bucket/{prefix}/`). */ + PlanOutputS3Prefix: string; + /** `DistributedRenderConfig` minus runtime-only fields (logger, abortSignal). */ + Config: SerializableDistributedRenderConfig; +} + +/** Activity B: fetch planDir, render one chunk, upload result. */ +export interface RenderChunkEvent { + Action: "renderChunk"; + /** S3 URI of the plan tar produced by a PlanEvent invocation. */ + PlanS3Uri: string; + /** + * `PlanResult.planHash` from the Plan invocation. The handler verifies + * this against the untarred planDir's `plan.json` before invoking the + * producer, throwing a typed `PLAN_HASH_MISMATCH` on divergence so the + * state machine routes it as non-retryable. Defense-in-depth — the + * producer also re-checks internally. + */ + PlanHash: string; + /** 0-based chunk index this invocation should render. */ + ChunkIndex: number; + /** S3 URI prefix where the chunk output should be uploaded (`s3://bucket/{prefix}/`). */ + ChunkOutputS3Prefix: string; + /** Output container format from the plan's encoder.json; drives file vs frame-dir handling. */ + Format: "mp4" | "mov" | "png-sequence"; +} + +/** Activity C: fetch planDir + all chunks + audio, assemble, upload final. */ +export interface AssembleEvent { + Action: "assemble"; + /** S3 URI of the plan tar produced by a PlanEvent invocation. */ + PlanS3Uri: string; + /** S3 URIs of every chunk, ordered by chunk index. Length must equal `chunkCount`. */ + ChunkS3Uris: string[]; + /** S3 URI of the planDir's `audio.aac` if the composition has audio; `null` otherwise. */ + AudioS3Uri: string | null; + /** Final output S3 URI (`s3://bucket/key.mp4`). */ + OutputS3Uri: string; + /** Output container format; drives file vs frame-dir handling. */ + Format: "mp4" | "mov" | "png-sequence"; +} + +/** + * `DistributedRenderConfig` minus the runtime-only fields (`logger`, + * `abortSignal`, `producerConfig`). The Step Functions event JSON cannot + * carry function references; the handler reconstitutes the runtime fields + * from Lambda environment + the AbortController it owns. + */ +export type SerializableDistributedRenderConfig = Omit< + DistributedRenderConfig, + "logger" | "abortSignal" | "producerConfig" +>; + +// ── Result types — kept small to fit Step Functions history budgets ───────── + +/** Result of a `plan` invocation. Carries enough to size the Map(N) state. */ +export interface PlanLambdaResult { + Action: "plan"; + PlanS3Uri: string; + PlanHash: string; + ChunkCount: number; + TotalFrames: number; + Fps: 24 | 30 | 60; + Width: number; + Height: number; + Format: "mp4" | "mov" | "png-sequence"; + HasAudio: boolean; + AudioS3Uri: string | null; + FfmpegVersion: string; + ProducerVersion: string; + DurationMs: number; +} + +/** Result of a `renderChunk` invocation. Sized ≤200 bytes per §2.4. */ +export interface RenderChunkLambdaResult { + Action: "renderChunk"; + ChunkS3Uri: string; + ChunkIndex: number; + Sha256: string; + FramesEncoded: number; + DurationMs: number; +} + +/** Result of an `assemble` invocation. */ +export interface AssembleLambdaResult { + Action: "assemble"; + OutputS3Uri: string; + FramesEncoded: number; + FileSize: number; + DurationMs: number; +} + +export type LambdaResult = PlanLambdaResult | RenderChunkLambdaResult | AssembleLambdaResult; diff --git a/packages/aws-lambda/src/handler.test.ts b/packages/aws-lambda/src/handler.test.ts new file mode 100644 index 000000000..0af053c5a --- /dev/null +++ b/packages/aws-lambda/src/handler.test.ts @@ -0,0 +1,438 @@ +/** + * Handler dispatch unit tests. + * + * Asserts that: + * - The handler routes Action="plan" / "renderChunk" / "assemble" to the + * matching OSS primitive. + * - It unwraps Step Functions `{ Payload }` and `{ Input }` envelopes. + * - It rejects unknown actions with a clear message. + * - It plumbs S3 download/upload calls in the correct order. + * + * The real OSS primitives are NOT exercised here — they live in + * `@hyperframes/producer/distributed` and have their own coverage in + * `packages/producer`. The Lambda handler is thin glue; this file pins + * the glue's contract. + */ + +import { afterEach, beforeEach, describe, expect, it, mock } from "bun:test"; +import { mkdirSync, mkdtempSync, rmSync, writeFileSync } from "node:fs"; +import { tmpdir } from "node:os"; +import { join } from "node:path"; +import type { AssembleResult, ChunkResult, PlanResult } from "@hyperframes/producer/distributed"; +import type { AssembleEvent, LambdaEvent, PlanEvent, RenderChunkEvent } from "./events.js"; +import { handler, unwrapEvent } from "./handler.js"; + +interface FakeS3Op { + kind: "download" | "upload"; + uri: string; + bytes?: number; +} + +/** + * In-memory S3 stand-in. Records every operation so test assertions can + * pin the exact sequence of downloads and uploads, plus fakes the GetObject + * stream so {@link downloadS3ObjectToFile} writes the expected bytes. + */ +class FakeS3Client { + ops: FakeS3Op[] = []; + // Map S3 URIs → byte buffers the fake serves. + objects = new Map(); + + // Methods called by the real S3 transport — minimal surface so the + // handler's call sites don't need rewriting under test. + async send(command: unknown): Promise { + const op = command as { input: { Bucket: string; Key: string } } & { + constructor: { name: string }; + }; + const cmdName = op.constructor?.name ?? ""; + const uri = `s3://${op.input.Bucket}/${op.input.Key}`; + if (cmdName === "GetObjectCommand") { + const bytes = this.objects.get(uri) ?? Buffer.alloc(0); + this.ops.push({ kind: "download", uri, bytes: bytes.length }); + // Mock the AWS SDK stream contract just enough for pipeline() to + // pump bytes into a write stream. + const { Readable } = await import("node:stream"); + return { Body: Readable.from([bytes]) }; + } + if (cmdName === "PutObjectCommand") { + // Buffer the body so we can record how many bytes were uploaded; the + // handler's hot path streams from disk, but tests pin the count. + const body = (command as { input: { Body: NodeJS.ReadableStream | Buffer } }).input.Body; + let bytes = 0; + if (Buffer.isBuffer(body)) { + bytes = body.length; + } else if (body && typeof (body as NodeJS.ReadableStream).pipe === "function") { + for await (const chunk of body as NodeJS.ReadableStream) { + bytes += (chunk as Buffer).length; + } + } + this.ops.push({ kind: "upload", uri, bytes }); + this.objects.set(uri, Buffer.alloc(bytes)); + return {}; + } + return {}; + } +} + +const tmpDirs: string[] = []; + +beforeEach(() => { + // Each test gets its own tmp root so concurrent test runs don't share state. +}); + +afterEach(() => { + for (const dir of tmpDirs) { + try { + rmSync(dir, { recursive: true, force: true }); + } catch { + // Best-effort cleanup. + } + } + tmpDirs.length = 0; +}); + +function makeTmpRoot(): string { + const dir = mkdtempSync(join(tmpdir(), "hf-lambda-test-")); + tmpDirs.push(dir); + return dir; +} + +describe("unwrapEvent", () => { + it("returns a bare event unchanged", () => { + const event: PlanEvent = { + Action: "plan", + ProjectS3Uri: "s3://bucket/project.tar.gz", + PlanOutputS3Prefix: "s3://bucket/renders/abc/", + Config: { fps: 30, width: 1920, height: 1080, format: "mp4" }, + }; + expect(unwrapEvent(event).Action).toBe("plan"); + }); + + it("unwraps a Step Functions { Payload } envelope", () => { + const inner: RenderChunkEvent = { + Action: "renderChunk", + PlanS3Uri: "s3://bucket/plan.tar.gz", + PlanHash: "deadbeef", + ChunkIndex: 3, + ChunkOutputS3Prefix: "s3://bucket/renders/abc/", + Format: "mp4", + }; + const wrapped: LambdaEvent = { Payload: inner }; + expect(unwrapEvent(wrapped).Action).toBe("renderChunk"); + }); + + it("unwraps multiple levels of envelopes", () => { + const inner: AssembleEvent = { + Action: "assemble", + PlanS3Uri: "s3://bucket/plan.tar.gz", + ChunkS3Uris: ["s3://bucket/chunks/0001.mp4"], + AudioS3Uri: null, + OutputS3Uri: "s3://bucket/output.mp4", + Format: "mp4", + }; + const doubly: LambdaEvent = { Payload: { Input: inner } }; + expect(unwrapEvent(doubly).Action).toBe("assemble"); + }); + + it("throws on unknown action", () => { + expect(() => unwrapEvent({ Action: "doSomething" } as unknown as LambdaEvent)).toThrow( + /no recognised Action/, + ); + }); +}); + +describe("handler dispatch", () => { + it("routes Action='plan' to the plan primitive", async () => { + const tmpRoot = makeTmpRoot(); + const s3 = new FakeS3Client(); + // Seed a fake project tarball so the untar step has something to chew on. + s3.objects.set("s3://bucket/project.tar.gz", await makeMinimalProjectTar()); + + const planMock = mock( + async (_projectDir: string, _config: unknown, planDir: string): Promise => { + // Simulate plan() writing a minimal planDir. + mkdirSync(planDir, { recursive: true }); + writeFileSync(join(planDir, "plan.json"), JSON.stringify({ planHash: "fakehash" })); + mkdirSync(join(planDir, "meta"), { recursive: true }); + writeFileSync(join(planDir, "meta", "chunks.json"), "[]"); + return { + planDir, + planHash: "fakehash", + chunkCount: 4, + totalFrames: 720, + fps: 30 as const, + width: 1920, + height: 1080, + format: "mp4" as const, + ffmpegVersion: "6.0", + producerVersion: "0.0.0-test", + }; + }, + ); + const renderChunkMock = mock(async () => { + throw new Error("should not be called"); + }); + const assembleMock = mock(async () => { + throw new Error("should not be called"); + }); + + const event: PlanEvent = { + Action: "plan", + ProjectS3Uri: "s3://bucket/project.tar.gz", + PlanOutputS3Prefix: "s3://bucket/renders/abc/", + Config: { fps: 30, width: 1920, height: 1080, format: "mp4" }, + }; + + const result = await handler(event, { + s3: s3 as unknown as import("@aws-sdk/client-s3").S3Client, + primitives: { + plan: planMock as unknown as typeof import("@hyperframes/producer/distributed").plan, + renderChunk: + renderChunkMock as unknown as typeof import("@hyperframes/producer/distributed").renderChunk, + assemble: + assembleMock as unknown as typeof import("@hyperframes/producer/distributed").assemble, + }, + tmpRoot, + skipChromeResolution: true, + }); + + expect(result.Action).toBe("plan"); + if (result.Action !== "plan") throw new Error("unreachable"); + expect(result.PlanHash).toBe("fakehash"); + expect(result.ChunkCount).toBe(4); + expect(planMock).toHaveBeenCalledTimes(1); + expect(renderChunkMock).not.toHaveBeenCalled(); + expect(assembleMock).not.toHaveBeenCalled(); + // Plan should have downloaded the project zip and uploaded the plan tar. + expect( + s3.ops.some((o) => o.kind === "download" && o.uri === "s3://bucket/project.tar.gz"), + ).toBe(true); + }); + + it("routes Action='renderChunk' to the renderChunk primitive", async () => { + const tmpRoot = makeTmpRoot(); + const s3 = new FakeS3Client(); + // Seed a planDir tarball with a minimal structure renderChunk would + // observe; the test mock doesn't read it, but the handler untar step does. + s3.objects.set("s3://bucket/plan.tar.gz", await makeMinimalPlanTar()); + + const renderChunkMock = mock( + async ( + _planDir: string, + _chunkIndex: number, + outputChunkPath: string, + ): Promise => { + // Write a fake chunk file so the upload step has bytes to send. + writeFileSync(outputChunkPath, Buffer.from("FAKE-MP4-CHUNK")); + return { + outputPath: outputChunkPath, + outputKind: "file", + framesEncoded: 240, + sha256: "0".repeat(64), + durationMs: 12345, + perfPath: outputChunkPath + ".perf.json", + }; + }, + ); + + const planMock = mock(async () => { + throw new Error("should not be called"); + }); + const assembleMock = mock(async () => { + throw new Error("should not be called"); + }); + + const event: RenderChunkEvent = { + Action: "renderChunk", + PlanS3Uri: "s3://bucket/plan.tar.gz", + PlanHash: "fakehash", + ChunkIndex: 2, + ChunkOutputS3Prefix: "s3://bucket/renders/abc/", + Format: "mp4", + }; + + const result = await handler(event, { + s3: s3 as unknown as import("@aws-sdk/client-s3").S3Client, + primitives: { + plan: planMock as unknown as typeof import("@hyperframes/producer/distributed").plan, + renderChunk: + renderChunkMock as unknown as typeof import("@hyperframes/producer/distributed").renderChunk, + assemble: + assembleMock as unknown as typeof import("@hyperframes/producer/distributed").assemble, + }, + tmpRoot, + skipChromeResolution: true, + }); + + expect(result.Action).toBe("renderChunk"); + if (result.Action !== "renderChunk") throw new Error("unreachable"); + expect(result.ChunkIndex).toBe(2); + expect(result.Sha256).toBe("0".repeat(64)); + expect(result.FramesEncoded).toBe(240); + expect(renderChunkMock).toHaveBeenCalledTimes(1); + }); + + it("rejects renderChunk when event.PlanHash diverges from plan.json", async () => { + const tmpRoot = makeTmpRoot(); + const s3 = new FakeS3Client(); + // The fixture's plan.json has planHash="fakehash"; the event below + // claims something else, so the handler must throw PLAN_HASH_MISMATCH + // before invoking the primitive. + s3.objects.set("s3://bucket/plan.tar.gz", await makeMinimalPlanTar()); + + const renderChunkMock = mock(async () => { + throw new Error("primitive should not be called on a hash mismatch"); + }); + const planMock = mock(async () => { + throw new Error("should not be called"); + }); + const assembleMock = mock(async () => { + throw new Error("should not be called"); + }); + + const event: RenderChunkEvent = { + Action: "renderChunk", + PlanS3Uri: "s3://bucket/plan.tar.gz", + PlanHash: "not-the-real-hash", + ChunkIndex: 0, + ChunkOutputS3Prefix: "s3://bucket/renders/abc/", + Format: "mp4", + }; + + let caught: unknown; + try { + await handler(event, { + s3: s3 as unknown as import("@aws-sdk/client-s3").S3Client, + primitives: { + plan: planMock as unknown as typeof import("@hyperframes/producer/distributed").plan, + renderChunk: + renderChunkMock as unknown as typeof import("@hyperframes/producer/distributed").renderChunk, + assemble: + assembleMock as unknown as typeof import("@hyperframes/producer/distributed").assemble, + }, + tmpRoot, + skipChromeResolution: true, + }); + } catch (err) { + caught = err; + } + expect(caught).toBeInstanceOf(Error); + expect((caught as Error).name).toBe("PLAN_HASH_MISMATCH"); + expect((caught as Error).message).toMatch(/not-the-real-hash/); + expect(renderChunkMock).not.toHaveBeenCalled(); + }); + + it("routes Action='assemble' to the assemble primitive", async () => { + const tmpRoot = makeTmpRoot(); + const s3 = new FakeS3Client(); + s3.objects.set("s3://bucket/plan.tar.gz", await makeMinimalPlanTar()); + s3.objects.set("s3://bucket/chunks/0001.mp4", Buffer.from("CHUNK-1")); + s3.objects.set("s3://bucket/chunks/0002.mp4", Buffer.from("CHUNK-2")); + + const assembleMock = mock( + async ( + _planDir: string, + _chunkPaths: readonly string[], + _audioPath: string | null, + outputPath: string, + ): Promise => { + writeFileSync(outputPath, Buffer.from("FAKE-FINAL-MP4")); + return { + outputPath, + durationMs: 7777, + framesEncoded: 480, + fileSize: 14, + }; + }, + ); + + const event: AssembleEvent = { + Action: "assemble", + PlanS3Uri: "s3://bucket/plan.tar.gz", + ChunkS3Uris: ["s3://bucket/chunks/0001.mp4", "s3://bucket/chunks/0002.mp4"], + AudioS3Uri: null, + OutputS3Uri: "s3://bucket/renders/abc/output.mp4", + Format: "mp4", + }; + + const result = await handler(event, { + s3: s3 as unknown as import("@aws-sdk/client-s3").S3Client, + primitives: { + plan: mock(async () => { + throw new Error("should not be called"); + }) as unknown as typeof import("@hyperframes/producer/distributed").plan, + renderChunk: mock(async () => { + throw new Error("should not be called"); + }) as unknown as typeof import("@hyperframes/producer/distributed").renderChunk, + assemble: + assembleMock as unknown as typeof import("@hyperframes/producer/distributed").assemble, + }, + tmpRoot, + skipChromeResolution: true, + }); + + expect(result.Action).toBe("assemble"); + if (result.Action !== "assemble") throw new Error("unreachable"); + expect(result.OutputS3Uri).toBe("s3://bucket/renders/abc/output.mp4"); + expect(result.FramesEncoded).toBe(480); + expect(assembleMock).toHaveBeenCalledTimes(1); + }); + + it("rejects unknown actions", async () => { + const tmpRoot = makeTmpRoot(); + await expect( + handler({ Action: "doSomething" } as unknown as LambdaEvent, { + s3: new FakeS3Client() as unknown as import("@aws-sdk/client-s3").S3Client, + tmpRoot, + skipChromeResolution: true, + }), + ).rejects.toThrow(/no recognised Action/); + }); +}); + +// ── helpers ───────────────────────────────────────────────────────────────── + +/** + * Build the smallest valid `.tar.gz` the handler's untar step accepts: a + * single file inside an archive. Uses the npm `tar` package (same as + * `s3Transport.ts`) so the fixture builder runs cross-platform — Windows + * doesn't ship GNU tar in `/usr/bin/tar`, and bare Alpine containers + * don't ship `tar` at all. Keeps the test runnable everywhere the rest + * of the suite runs. + */ +async function makeMinimalProjectTar(): Promise { + const tar = await import("tar"); + const { mkdtempSync: mk, readFileSync, rmSync: rm, writeFileSync: wf } = await import("node:fs"); + const dir = mk(join(tmpdir(), "hf-lambda-mktar-")); + try { + wf(join(dir, "index.html"), "test"); + const tarPath = join(dir, "out.tar.gz"); + await tar.create({ gzip: true, file: tarPath, cwd: dir }, ["index.html"]); + return readFileSync(tarPath); + } finally { + rm(dir, { recursive: true, force: true }); + } +} + +/** + * Build a minimal `.tar.gz` for a tiny planDir containing `plan.json` + + * `meta/chunks.json`. Used by renderChunk/assemble tests where the handler + * untars but the mock primitive doesn't inspect contents. + */ +async function makeMinimalPlanTar(): Promise { + const tar = await import("tar"); + const { + mkdtempSync: mk, + mkdirSync: md, + readFileSync: rf, + writeFileSync: wf, + } = await import("node:fs"); + const dir = mk(join(tmpdir(), "hf-lambda-test-plan-")); + tmpDirs.push(dir); + md(join(dir, "meta"), { recursive: true }); + wf(join(dir, "plan.json"), JSON.stringify({ planHash: "fakehash" })); + wf(join(dir, "meta", "chunks.json"), "[]"); + const tarPath = join(dir, "out.tar.gz"); + await tar.create({ gzip: true, file: tarPath, cwd: dir }, ["plan.json", "meta"]); + return rf(tarPath); +} diff --git a/packages/aws-lambda/src/handler.ts b/packages/aws-lambda/src/handler.ts new file mode 100644 index 000000000..83da2d967 --- /dev/null +++ b/packages/aws-lambda/src/handler.ts @@ -0,0 +1,530 @@ +/** + * AWS Lambda handler for HyperFrames distributed rendering. + * + * One Lambda function, three roles. Step Functions dispatches by setting + * `event.Action`; the handler unwraps Map-state envelopes, primes the + * Lambda environment (Chrome path, ffmpeg path, tmpdir), and forwards to + * the matching OSS primitive from `@hyperframes/producer/distributed`. + * + * Everything heavy — capture, encode, audio mix — happens inside the OSS + * primitives. The handler is thin glue: parse event → S3 download → call + * primitive → S3 upload → return small JSON result. + */ + +import { existsSync, mkdirSync, mkdtempSync, readFileSync, rmSync, statSync } from "node:fs"; +import { tmpdir } from "node:os"; +import { basename, join } from "node:path"; +import { S3Client } from "@aws-sdk/client-s3"; +import { + assemble, + type AssembleResult, + type ChunkResult, + type DistributedRenderConfig, + plan, + type PlanResult, + renderChunk, +} from "@hyperframes/producer/distributed"; +import { resolveChromeExecutablePath } from "./chromium.js"; +import type { + AssembleEvent, + AssembleLambdaResult, + LambdaAction, + LambdaEvent, + LambdaResult, + PlanEvent, + PlanLambdaResult, + RenderChunkEvent, + RenderChunkLambdaResult, +} from "./events.js"; +import { + downloadS3ObjectToFile, + parseS3Uri, + tarDirectory, + untarDirectory, + uploadFileToS3, +} from "./s3Transport.js"; + +/** + * Lazily-constructed S3 client. Cached at module scope so warm Lambda + * containers reuse the underlying HTTP keep-alive pool across invocations. + */ +let cachedS3Client: S3Client | null = null; +function getS3Client(): S3Client { + if (cachedS3Client) return cachedS3Client; + cachedS3Client = new S3Client({}); + return cachedS3Client; +} + +/** + * Optional injection points used by the handler's unit tests. Production + * callers leave these unset; the real OSS primitives are used. Tests + * inject `s3` and `primitives` directly rather than mutating module + * state — the dependency-injection seam is sufficient and avoids a + * second leak point for cross-test contamination. + */ +export interface HandlerDeps { + s3?: S3Client; + primitives?: { + plan: typeof plan; + renderChunk: typeof renderChunk; + assemble: typeof assemble; + }; + /** Override the per-invocation `/tmp` workdir root (defaults to Lambda's `/tmp`). */ + tmpRoot?: string; + /** Skip Chrome resolution (used by handler dispatch tests that mock renderChunk). */ + skipChromeResolution?: boolean; +} + +/** + * Lambda entry. Step Functions sometimes wraps the event in + * `{ Payload: ... }` or `{ Input: ... }` depending on the state machine + * shape; unwrap until we hit a discriminated event. + */ +export async function handler(event: LambdaEvent, deps?: HandlerDeps): Promise { + const unwrapped = unwrapEvent(event); + primeRuntimeEnv(); + // Single structured boot log line — CloudWatch Logs Insights queries + // key off `event=handler_start` to grep for a specific Action / S3 URI + // when triaging without attaching a debugger. + logEvent({ event: "handler_start", action: unwrapped.Action, input: summarizeEvent(unwrapped) }); + try { + switch (unwrapped.Action) { + case "plan": + return await handlePlan(unwrapped, deps); + case "renderChunk": + return await handleRenderChunk(unwrapped, deps); + case "assemble": + return await handleAssemble(unwrapped, deps); + default: { + // Compile-time exhaustiveness: a new LambdaAction member trips + // the `never` assignment before the runtime error is reachable. + const _exhaustive: never = unwrapped; + throw new Error( + `[handler] unknown Action: ${JSON.stringify( + (_exhaustive as { Action?: string }).Action, + )}. Expected one of "plan", "renderChunk", "assemble".`, + ); + } + } + } catch (err) { + // Log before re-throwing so CloudWatch captures the structured + // error context alongside Lambda's default stack trace. Otherwise + // ops only sees the trace and has to correlate with execution + // history to recover the action + input. + logEvent({ + event: "handler_error", + action: unwrapped.Action, + message: err instanceof Error ? err.message : String(err), + name: err instanceof Error ? err.name : undefined, + }); + throw err; + } +} + +/** + * Walk through Step Functions' Map-state and Task-state envelopes until + * the discriminated event is found. + */ +// Step Functions wraps at most `{Payload: {Input: ...}}` in our state +// machine; 4 levels is 2× headroom for unusual Map / Wait state +// configurations and prevents infinite loops on malformed input. +const MAX_ENVELOPE_DEPTH = 4; + +export function unwrapEvent(event: LambdaEvent): PlanEvent | RenderChunkEvent | AssembleEvent { + let cursor: LambdaEvent = event; + for (let i = 0; i < MAX_ENVELOPE_DEPTH; i++) { + if (cursor && typeof cursor === "object") { + const obj = cursor as Record; + if (typeof obj.Action === "string" && isLambdaAction(obj.Action)) { + return cursor as PlanEvent | RenderChunkEvent | AssembleEvent; + } + if ("Payload" in obj) { + cursor = obj.Payload as LambdaEvent; + continue; + } + if ("Input" in obj) { + cursor = obj.Input as LambdaEvent; + continue; + } + } + break; + } + throw new Error( + `[handler] event has no recognised Action; unwrapped ${MAX_ENVELOPE_DEPTH} levels of Payload/Input without finding one.`, + ); +} + +function isLambdaAction(value: string): value is LambdaAction { + return value === "plan" || value === "renderChunk" || value === "assemble"; +} + +/** + * Emit a single JSON line to stdout. CloudWatch ingests each line as a + * structured event; Logs Insights queries can `filter event="..."` and + * project specific fields. We write to stdout (not stderr) because + * Lambda's default destination for both is the same log group, and + * Logs Insights' INFO/ERROR level parser keys off the JSON `level` + * field, not the stream. + */ +function logEvent(payload: Record): void { + console.log(JSON.stringify(payload)); +} + +/** + * Compact, non-PII summary of a Lambda event for logging. The full + * event payload can include the entire project config; we only emit + * the routable fields (S3 URIs, chunk index, format) needed to triage + * a failure from CloudWatch. + */ +function summarizeEvent( + event: PlanEvent | RenderChunkEvent | AssembleEvent, +): Record { + switch (event.Action) { + case "plan": + return { + projectS3Uri: event.ProjectS3Uri, + planOutputS3Prefix: event.PlanOutputS3Prefix, + format: event.Config.format, + fps: event.Config.fps, + }; + case "renderChunk": + return { + planS3Uri: event.PlanS3Uri, + chunkIndex: event.ChunkIndex, + format: event.Format, + }; + case "assemble": + return { + planS3Uri: event.PlanS3Uri, + chunkCount: event.ChunkS3Uris.length, + hasAudio: event.AudioS3Uri !== null, + outputS3Uri: event.OutputS3Uri, + format: event.Format, + }; + } +} + +/** + * Lambda sets `TMPDIR` to `/tmp` already, but the bundled binaries (Chrome + * + ffmpeg) live alongside the handler at `/var/task/bin/`. Add that to + * PATH the first time the handler runs so spawn("ffmpeg", …) inside the + * OSS primitives resolves to the bundled binary. + */ +let runtimeEnvPrimed = false; +function primeRuntimeEnv(): void { + if (runtimeEnvPrimed) return; + runtimeEnvPrimed = true; + const taskRoot = process.env.LAMBDA_TASK_ROOT ?? "/var/task"; + const bin = join(taskRoot, "bin"); + if (existsSync(bin)) { + process.env.PATH = `${bin}:${process.env.PATH ?? ""}`; + } +} + +// ── Plan ──────────────────────────────────────────────────────────────────── + +async function handlePlan(event: PlanEvent, deps?: HandlerDeps): Promise { + const started = Date.now(); + const s3 = deps?.s3 ?? getS3Client(); + const primitive = deps?.primitives?.plan ?? plan; + + const work = mkdtempSync(join(deps?.tmpRoot ?? tmpdir(), "hf-lambda-plan-")); + // We use `.tar.gz` (not `.zip`) as the project archive's on-the-wire + // format because Lambda's Amazon Linux base image ships GNU `tar` but + // not `unzip` in `/usr/bin`. The smoke script + future CLI both + // produce tar.gz uploads. + const projectArchive = join(work, "project.tar.gz"); + const projectDir = join(work, "project"); + const planDir = join(work, "plan"); + + try { + await downloadS3ObjectToFile(s3, event.ProjectS3Uri, projectArchive); + await untarDirectory(projectArchive, projectDir); + + const config: DistributedRenderConfig = { + ...event.Config, + }; + const result: PlanResult = await primitive(projectDir, config, planDir); + + // Upload the planDir as a single tarball. Step Functions cannot pass + // a directory-shaped artifact between states; we serialize and rely on + // the consumer (renderChunk / assemble) to untar. Audio is co-located + // alongside the plan so RenderChunk doesn't have to pull the whole + // plan tarball when audio isn't relevant to the chunk. + const planTar = join(work, "plan.tar.gz"); + await tarDirectory(planDir, planTar); + const planTarUri = `${trimTrailingSlash(event.PlanOutputS3Prefix)}/plan.tar.gz`; + const audioPath = join(planDir, "audio.aac"); + const hasAudio = existsSync(audioPath) && statSync(audioPath).size > 0; + const audioUri = hasAudio ? `${trimTrailingSlash(event.PlanOutputS3Prefix)}/audio.aac` : null; + // Plan and audio are independent S3 PUTs; run them in parallel so + // the response returns as soon as the slower of the two completes. + await Promise.all([ + uploadFileToS3(s3, planTar, planTarUri, "application/gzip"), + hasAudio && audioUri ? uploadFileToS3(s3, audioPath, audioUri, "audio/aac") : null, + ]); + + return { + Action: "plan", + PlanS3Uri: planTarUri, + PlanHash: result.planHash, + ChunkCount: result.chunkCount, + TotalFrames: result.totalFrames, + Fps: result.fps, + Width: result.width, + Height: result.height, + Format: result.format, + HasAudio: audioUri !== null, + AudioS3Uri: audioUri, + FfmpegVersion: result.ffmpegVersion, + ProducerVersion: result.producerVersion, + DurationMs: Date.now() - started, + }; + } finally { + cleanupDir(work); + } +} + +// ── RenderChunk ───────────────────────────────────────────────────────────── + +async function handleRenderChunk( + event: RenderChunkEvent, + deps?: HandlerDeps, +): Promise { + const started = Date.now(); + const s3 = deps?.s3 ?? getS3Client(); + const primitive = deps?.primitives?.renderChunk ?? renderChunk; + + // Sparticuz decompresses Chromium into /tmp on first call; warm starts + // skip the work (path already cached). Guard the env-var mutation too so + // a caller-supplied PRODUCER_HEADLESS_SHELL_PATH (e.g. the SAM-local + // RIE smoke) wins over the auto-resolution. + if (!deps?.skipChromeResolution && !process.env.PRODUCER_HEADLESS_SHELL_PATH) { + const chromePath = await resolveChromeExecutablePath(); + // The OSS engine resolves Chrome via `PRODUCER_HEADLESS_SHELL_PATH` + // first (see `browserManager.resolveHeadlessShellPath`); set it before + // invoking the primitive so launch picks up the bundled binary. + process.env.PRODUCER_HEADLESS_SHELL_PATH = chromePath; + } + + const work = mkdtempSync(join(deps?.tmpRoot ?? tmpdir(), "hf-lambda-chunk-")); + const planTar = join(work, "plan.tar.gz"); + const planDir = join(work, "plan"); + + try { + await downloadS3ObjectToFile(s3, event.PlanS3Uri, planTar); + await untarDirectory(planTar, planDir); + + // Verify the plan's hash matches what Step Functions told us to render. + // The producer's renderChunk re-checks internally (defense-in-depth), + // but doing it here at the handler boundary lets us fail before paying + // the Chrome-launch + render cost on a misrouted chunk. Throws a + // typed PLAN_HASH_MISMATCH that Step Functions can route as + // non-retryable. + verifyPlanHash(planDir, event.PlanHash); + + const chunkOutputBase = join( + work, + event.Format === "png-sequence" + ? `chunk-${pad(event.ChunkIndex)}` + : `chunk-${pad(event.ChunkIndex)}${formatExtension(event.Format)}`, + ); + + const result: ChunkResult = await primitive(planDir, event.ChunkIndex, chunkOutputBase); + + const chunkUri = await uploadChunkOutput( + s3, + result, + event.ChunkOutputS3Prefix, + event.ChunkIndex, + ); + + return { + Action: "renderChunk", + ChunkS3Uri: chunkUri, + ChunkIndex: event.ChunkIndex, + Sha256: result.sha256, + FramesEncoded: result.framesEncoded, + DurationMs: Date.now() - started, + }; + } finally { + cleanupDir(work); + } +} + +async function uploadChunkOutput( + s3: S3Client, + result: ChunkResult, + prefix: string, + chunkIndex: number, +): Promise { + const trimmed = trimTrailingSlash(prefix); + if (result.outputKind === "file") { + const ext = result.outputPath.slice(result.outputPath.lastIndexOf(".")); + const uri = `${trimmed}/chunks/${pad(chunkIndex)}${ext}`; + await uploadFileToS3(s3, result.outputPath, uri); + return uri; + } + // frame-dir: upload as a tarball so a single S3 object represents the chunk. + // Assemble's png-sequence path expects a directory per chunk; it untars on + // its end. + const tarball = `${result.outputPath}.tar.gz`; + await tarDirectory(result.outputPath, tarball); + const uri = `${trimmed}/chunks/${pad(chunkIndex)}.tar.gz`; + await uploadFileToS3(s3, tarball, uri, "application/gzip"); + return uri; +} + +// ── Assemble ──────────────────────────────────────────────────────────────── + +async function handleAssemble( + event: AssembleEvent, + deps?: HandlerDeps, +): Promise { + const started = Date.now(); + const s3 = deps?.s3 ?? getS3Client(); + const primitive = deps?.primitives?.assemble ?? assemble; + + const work = mkdtempSync(join(deps?.tmpRoot ?? tmpdir(), "hf-lambda-assemble-")); + const planTar = join(work, "plan.tar.gz"); + const planDir = join(work, "plan"); + + try { + await downloadS3ObjectToFile(s3, event.PlanS3Uri, planTar); + await untarDirectory(planTar, planDir); + + const chunkPaths = await downloadChunkObjects(s3, event.ChunkS3Uris, work, event.Format); + + let audioPath: string | null = null; + if (event.AudioS3Uri) { + audioPath = join(planDir, "audio.aac"); + await downloadS3ObjectToFile(s3, event.AudioS3Uri, audioPath); + } + + const finalOutput = + event.Format === "png-sequence" + ? join(work, "output-frames") + : join(work, `output${formatExtension(event.Format)}`); + + const result: AssembleResult = await primitive(planDir, chunkPaths, audioPath, finalOutput); + + if (event.Format === "png-sequence") { + const tarball = `${finalOutput}.tar.gz`; + await tarDirectory(finalOutput, tarball); + await uploadFileToS3(s3, tarball, event.OutputS3Uri, "application/gzip"); + } else { + await uploadFileToS3(s3, finalOutput, event.OutputS3Uri); + } + + return { + Action: "assemble", + OutputS3Uri: event.OutputS3Uri, + FramesEncoded: result.framesEncoded, + FileSize: result.fileSize, + DurationMs: Date.now() - started, + }; + } finally { + cleanupDir(work); + } +} + +async function downloadChunkObjects( + s3: S3Client, + uris: string[], + workDir: string, + format: "mp4" | "mov" | "png-sequence", +): Promise { + const chunksDir = join(workDir, "chunks"); + mkdirSync(chunksDir, { recursive: true }); + // Each chunk is an independent S3 GET (+ untar for png-sequence). Run + // them in parallel — assemble's wall-clock is otherwise dominated by + // `Σ chunk-download-ms` instead of `max(chunk-download-ms)`. Preserve + // the input order by writing into a pre-sized array rather than + // pushing as each task settles. + const local: string[] = new Array(uris.length); + await Promise.all( + uris.map(async (uri, i) => { + if (!uri) { + throw new Error(`[handler] chunk URI at index ${i} is empty`); + } + const { key } = parseS3Uri(uri); + const localPath = join(chunksDir, basename(key)); + await downloadS3ObjectToFile(s3, uri, localPath); + if (format === "png-sequence") { + const dirPath = join(chunksDir, `frames-${pad(i)}`); + await untarDirectory(localPath, dirPath); + local[i] = dirPath; + } else { + local[i] = localPath; + } + }), + ); + return local; +} + +// ── Helpers ───────────────────────────────────────────────────────────────── + +function pad(n: number): string { + return n.toString().padStart(4, "0"); +} + +function trimTrailingSlash(prefix: string): string { + return prefix.endsWith("/") ? prefix.slice(0, -1) : prefix; +} + +function formatExtension(format: "mp4" | "mov" | "png-sequence"): string { + switch (format) { + case "mp4": + return ".mp4"; + case "mov": + return ".mov"; + case "png-sequence": + return ""; + default: { + // Compile-time exhaustiveness — a future Format member trips here. + const _exhaustive: never = format; + throw new Error(`[handler] unsupported format: ${_exhaustive as string}`); + } + } +} + +function cleanupDir(dir: string): void { + try { + // Lambda warm starts can reuse `/tmp` across invocations; clean up + // aggressively so we don't leak a chunk-sized footprint between renders. + rmSync(dir, { recursive: true, force: true }); + } catch { + // Best-effort — leak is preferable to crashing on success path. + } +} + +/** + * Read the untarred planDir's `plan.json` and assert its `planHash` + * matches what the Step Functions event claims. Throws on mismatch with + * a typed `PLAN_HASH_MISMATCH` error name so the state machine's typed + * non-retryable list routes it correctly. + * + * This is defense-in-depth — the producer's `renderChunk` does the same + * check internally — but performing it here lets us fail before paying + * the Chrome-launch + per-frame capture cost on a misrouted chunk. + */ +function verifyPlanHash(planDir: string, expected: string): void { + const planJsonPath = join(planDir, "plan.json"); + let parsed: { planHash?: unknown }; + try { + parsed = JSON.parse(readFileSync(planJsonPath, "utf-8")) as { planHash?: unknown }; + } catch (err) { + const msg = err instanceof Error ? err.message : String(err); + const error = new Error(`PLAN_HASH_MISMATCH: failed to read ${planJsonPath}: ${msg}`); + error.name = "PLAN_HASH_MISMATCH"; + throw error; + } + const actual = parsed.planHash; + if (typeof actual !== "string" || actual !== expected) { + const error = new Error( + `PLAN_HASH_MISMATCH: event PlanHash=${expected} did not match plan.json planHash=${String(actual)}`, + ); + error.name = "PLAN_HASH_MISMATCH"; + throw error; + } +} diff --git a/packages/aws-lambda/src/index.ts b/packages/aws-lambda/src/index.ts new file mode 100644 index 000000000..6cf54adaa --- /dev/null +++ b/packages/aws-lambda/src/index.ts @@ -0,0 +1,45 @@ +/** + * `@hyperframes/aws-lambda` — Lambda adapter for the HyperFrames + * distributed render pipeline. + * + * The package exports the Lambda handler entry point plus the event / + * result types Step Functions consumers and CDK constructs need to + * type-check their state machine definitions. + * + * The handler is bundled with `scripts/build-zip.ts` into `dist/handler.zip` + * — that artifact is what `examples/aws-lambda/template.yaml` and any + * future CDK construct point at via `CodeUri`. The package is NOT a + * dependency of `@hyperframes/producer`; consumers install it separately. + */ + +export { handler, type HandlerDeps, unwrapEvent } from "./handler.js"; +export { + type AssembleEvent, + type AssembleLambdaResult, + type LambdaAction, + type LambdaEvent, + type LambdaResult, + type PlanEvent, + type PlanLambdaResult, + type RenderChunkEvent, + type RenderChunkLambdaResult, + type SerializableDistributedRenderConfig, +} from "./events.js"; +// `_setSparticuzChromiumForTests` is intentionally NOT re-exported from +// the package barrel — it's a test-only DI seam. Test files import it +// directly from `./chromium.js`. +export { + type ChromeSource, + resolveChromeArgs, + resolveChromeExecutablePath, + resolveChromeSource, +} from "./chromium.js"; +export { + downloadS3ObjectToFile, + formatS3Uri, + parseS3Uri, + type S3Location, + tarDirectory, + untarDirectory, + uploadFileToS3, +} from "./s3Transport.js"; diff --git a/packages/aws-lambda/src/s3Transport.test.ts b/packages/aws-lambda/src/s3Transport.test.ts new file mode 100644 index 000000000..dcaa9c549 --- /dev/null +++ b/packages/aws-lambda/src/s3Transport.test.ts @@ -0,0 +1,93 @@ +/** + * Unit tests for the S3 URI parser + tar helpers. Real S3 network calls + * are covered by the dispatch tests in `handler.test.ts` via a fake + * S3Client; here we pin the lower-level helpers. + */ + +import { afterAll, beforeAll, describe, expect, it } from "bun:test"; +import { mkdtempSync, readFileSync, rmSync, writeFileSync } from "node:fs"; +import { tmpdir } from "node:os"; +import { join } from "node:path"; +import { formatS3Uri, parseS3Uri, tarDirectory, untarDirectory } from "./s3Transport.js"; + +let scratchRoot: string; + +beforeAll(() => { + scratchRoot = mkdtempSync(join(tmpdir(), "hf-s3transport-test-")); +}); + +afterAll(() => { + rmSync(scratchRoot, { recursive: true, force: true }); +}); + +describe("parseS3Uri", () => { + it("parses a simple bucket+key URI", () => { + expect(parseS3Uri("s3://my-bucket/path/to/object.zip")).toEqual({ + bucket: "my-bucket", + key: "path/to/object.zip", + }); + }); + + it("preserves nested keys", () => { + expect(parseS3Uri("s3://b/a/b/c/d.mp4").key).toBe("a/b/c/d.mp4"); + }); + + it("throws on non-s3 schemes", () => { + expect(() => parseS3Uri("https://example.com/x")).toThrow(/expected s3:\/\//); + }); + + it("throws on missing key", () => { + expect(() => parseS3Uri("s3://bucket-only")).toThrow(/missing key/); + }); + + it("throws on empty bucket", () => { + expect(() => parseS3Uri("s3:///somekey")).toThrow(/empty bucket or key/); + }); +}); + +describe("formatS3Uri", () => { + it("round-trips with parseS3Uri", () => { + const uri = "s3://my-bucket/path/to/object.zip"; + expect(formatS3Uri(parseS3Uri(uri))).toBe(uri); + }); +}); + +describe("tar round-trip", () => { + it("tars a directory and untars to identical contents", async () => { + const sourceDir = join(scratchRoot, "src"); + const destDir = join(scratchRoot, "dest"); + const tarPath = join(scratchRoot, "out.tar.gz"); + + const { mkdirSync } = await import("node:fs"); + mkdirSync(join(sourceDir, "nested"), { recursive: true }); + writeFileSync(join(sourceDir, "top.txt"), "hello-top"); + writeFileSync(join(sourceDir, "nested", "inner.txt"), "hello-inner"); + + await tarDirectory(sourceDir, tarPath); + await untarDirectory(tarPath, destDir); + + expect(readFileSync(join(destDir, "top.txt"), "utf-8")).toBe("hello-top"); + expect(readFileSync(join(destDir, "nested", "inner.txt"), "utf-8")).toBe("hello-inner"); + }); + + it("wipes the destination before extracting", async () => { + const sourceDir = join(scratchRoot, "src2"); + const destDir = join(scratchRoot, "dest2"); + const tarPath = join(scratchRoot, "out2.tar.gz"); + + const { mkdirSync } = await import("node:fs"); + mkdirSync(sourceDir, { recursive: true }); + writeFileSync(join(sourceDir, "fresh.txt"), "new"); + + mkdirSync(destDir, { recursive: true }); + writeFileSync(join(destDir, "stale.txt"), "leftover"); + + await tarDirectory(sourceDir, tarPath); + await untarDirectory(tarPath, destDir); + + // Stale file should be gone; fresh file should be present. + expect(readFileSync(join(destDir, "fresh.txt"), "utf-8")).toBe("new"); + const { existsSync } = await import("node:fs"); + expect(existsSync(join(destDir, "stale.txt"))).toBe(false); + }); +}); diff --git a/packages/aws-lambda/src/s3Transport.ts b/packages/aws-lambda/src/s3Transport.ts new file mode 100644 index 000000000..988ae2a62 --- /dev/null +++ b/packages/aws-lambda/src/s3Transport.ts @@ -0,0 +1,156 @@ +/** + * Thin S3 transport for the Lambda handler. + * + * The OSS distributed primitives are pure functions over local file paths; + * the Lambda handler bridges S3 ↔ Lambda's `/tmp` filesystem on each + * invocation. Functions here are intentionally narrow: parse a URI, download + * an object to a local path, upload a path/directory, tar-extract a planDir, + * tar-pack a planDir back out. + * + * Tar (not zip) for planDir transit: + * - planDirs contain symlinks (extract stage materializes them but the + * compiled/ subtree may include linked assets); tar preserves them, zip + * does not. + * - We use the `tar` npm package (pure JS over `node:zlib`) — AWS + * Lambda's `nodejs:22` base image ships neither `tar` nor `unzip` in + * `/usr/bin`, so a system-binary tar would ENOENT in the actual + * deployment. + */ + +import { + createReadStream, + createWriteStream, + existsSync, + mkdirSync, + readdirSync, + rmSync, + statSync, +} from "node:fs"; +import { dirname, join } from "node:path"; +import { pipeline } from "node:stream/promises"; +import { GetObjectCommand, PutObjectCommand, type S3Client } from "@aws-sdk/client-s3"; +import * as tar from "tar"; + +/** Parsed `s3://bucket/key` URI. */ +export interface S3Location { + bucket: string; + key: string; +} + +/** Parse `s3://bucket/key/path` → `{ bucket, key }`. Throws on malformed input. */ +export function parseS3Uri(uri: string): S3Location { + if (!uri.startsWith("s3://")) { + throw new Error(`[s3Transport] expected s3:// URI, got: ${JSON.stringify(uri)}`); + } + const rest = uri.slice("s3://".length); + const slash = rest.indexOf("/"); + if (slash === -1) { + throw new Error(`[s3Transport] missing key in s3 URI: ${JSON.stringify(uri)}`); + } + const bucket = rest.slice(0, slash); + const key = rest.slice(slash + 1); + if (!bucket || !key) { + throw new Error(`[s3Transport] empty bucket or key in s3 URI: ${JSON.stringify(uri)}`); + } + return { bucket, key }; +} + +/** Build `s3://bucket/key` from a location. */ +export function formatS3Uri(loc: S3Location): string { + return `s3://${loc.bucket}/${loc.key}`; +} + +/** Stream an S3 object to a local file path. Throws if the body is missing. */ +export async function downloadS3ObjectToFile( + client: S3Client, + uri: string, + destPath: string, +): Promise { + const { bucket, key } = parseS3Uri(uri); + const response = await client.send(new GetObjectCommand({ Bucket: bucket, Key: key })); + const body = response.Body as NodeJS.ReadableStream | undefined; + if (!body) { + throw new Error(`[s3Transport] s3 GetObject returned empty body for ${uri}`); + } + mkdirSync(dirname(destPath), { recursive: true }); + await pipeline(body, createWriteStream(destPath)); +} + +/** + * Upload a local file's contents to an S3 URI using a streaming + * `PutObjectCommand`. PutObject's 5 GB cap comfortably exceeds the + * distributed pipeline's 2 GB planDir limit and the typical + * chunk size (≤ 200 MB), so a single PUT works for every artifact this + * adapter handles. + */ +export async function uploadFileToS3( + client: S3Client, + localPath: string, + uri: string, + contentType?: string, +): Promise { + if (!existsSync(localPath)) { + throw new Error(`[s3Transport] upload source missing: ${localPath}`); + } + const { bucket, key } = parseS3Uri(uri); + const size = statSync(localPath).size; + await client.send( + new PutObjectCommand({ + Bucket: bucket, + Key: key, + Body: createReadStream(localPath), + ContentType: contentType, + ContentLength: size, + }), + ); +} + +/** + * Pack a directory into a `.tar.gz` at `destTarball`. Uses the `tar` npm + * package (pure JS over `node:zlib`) rather than spawning a system tar + * binary — the AWS Lambda Node 22 base image ships a minimal set of + * userland tools and does NOT include `tar` in `/usr/bin`. + */ +export async function tarDirectory(sourceDir: string, destTarball: string): Promise { + if (!existsSync(sourceDir) || !statSync(sourceDir).isDirectory()) { + throw new Error(`[s3Transport] tar source must be an existing directory: ${sourceDir}`); + } + mkdirSync(dirname(destTarball), { recursive: true }); + await tar.create({ gzip: true, file: destTarball, cwd: sourceDir }, ["."]); +} + +/** + * Extract a `.tar.gz` produced by {@link tarDirectory} into `destDir`. + * The directory is created (or cleared) before extraction so a retried + * invocation doesn't observe stale files from a prior run on the same + * warm Lambda container. + */ +export async function untarDirectory(tarballPath: string, destDir: string): Promise { + if (!existsSync(tarballPath)) { + throw new Error(`[s3Transport] tarball missing: ${tarballPath}`); + } + // Wipe target so the warm container's prior planDir doesn't bleed into + // the new invocation. Lambda re-uses /tmp across invocations on the same + // container. + if (existsSync(destDir)) { + rmSync(destDir, { recursive: true, force: true }); + } + mkdirSync(destDir, { recursive: true }); + await tar.extract({ file: tarballPath, cwd: destDir }); +} + +/** List all regular files under a directory, sorted, returned as absolute paths. */ +export function listFilesInDirectory(dir: string): string[] { + const out: string[] = []; + function walk(d: string): void { + for (const entry of readdirSync(d, { withFileTypes: true }).sort((a, b) => + a.name < b.name ? -1 : a.name > b.name ? 1 : 0, + )) { + const full = join(d, entry.name); + if (entry.isDirectory()) walk(full); + else if (entry.isFile()) out.push(full); + } + } + walk(dir); + return out; +} diff --git a/packages/aws-lambda/tsconfig.json b/packages/aws-lambda/tsconfig.json new file mode 100644 index 000000000..faa4cf6d7 --- /dev/null +++ b/packages/aws-lambda/tsconfig.json @@ -0,0 +1,19 @@ +{ + "compilerOptions": { + "target": "ES2022", + "module": "ESNext", + "moduleResolution": "bundler", + "esModuleInterop": true, + "strict": true, + "noUncheckedIndexedAccess": true, + "skipLibCheck": true, + "noEmit": true, + "baseUrl": ".", + "paths": { + "@hyperframes/producer": ["../producer/src/index.ts"], + "@hyperframes/producer/distributed": ["../producer/src/distributed.ts"] + } + }, + "include": ["src/**/*"], + "exclude": ["node_modules", "dist", "src/**/*.test.ts", "scripts"] +} diff --git a/packages/producer/src/services/distributed/plan.test.ts b/packages/producer/src/services/distributed/plan.test.ts index 36043d58d..bd5eadd5a 100644 --- a/packages/producer/src/services/distributed/plan.test.ts +++ b/packages/producer/src/services/distributed/plan.test.ts @@ -18,6 +18,7 @@ import { afterAll, beforeAll, describe, expect, it } from "bun:test"; import { existsSync, mkdirSync, mkdtempSync, readFileSync, rmSync, writeFileSync } from "node:fs"; import { tmpdir } from "node:os"; import { join } from "node:path"; +import { recomputePlanHashFromPlanDir } from "../render/stages/freezePlan.js"; import { buildChunkSlices, DEFAULT_CHUNK_SIZE, @@ -208,6 +209,111 @@ describe("plan() — golden planDir + planHash determinism", () => { }, TIMEOUT_MS, ); + + it( + "plan.json.planHash matches recomputePlanHashFromPlanDir(planDir) on the same disk", + async () => { + // Regression guard for a real-world bug observed on audio-bearing + // fixtures: plan() left a temporary `.plan-work/` subtree inside + // planDir while freezePlan walked it, so the hash baked into + // plan.json included artifacts the chunk worker would never see. + // The chunk worker's `recomputePlanHashFromPlanDir` walk then + // returned a different hash, tripping PLAN_HASH_MISMATCH at the + // first chunk invocation. + // + // This test verifies that the hash plan() writes matches the hash + // recomputed from the on-disk planDir contents — i.e. the chunk + // worker's view. Holds for any plan, audio or not. + const planDir = join(runRoot, "plan-hash-recompute"); + mkdirSync(planDir, { recursive: true }); + const result = await plan( + projectDir, + { fps: 30, width: 320, height: 240, format: "mp4" }, + planDir, + ); + const recomputed = recomputePlanHashFromPlanDir(planDir); + expect(recomputed).toBe(result.planHash); + const planJson = JSON.parse(readFileSync(join(planDir, "plan.json"), "utf-8")) as { + planHash: string; + }; + expect(planJson.planHash).toBe(result.planHash); + }, + TIMEOUT_MS, + ); + + // Audio-bearing variant of the planHash recompute test. The pre-fix bug + // surfaced because `runAudioStage` downloads/mixes source audio into + // `/.plan-work/`, and `freezePlan` walked that subtree before + // plan.ts cleaned it up. A composition without `