Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -17,3 +17,5 @@ OPENAI_API_KEY=sk-your-openai-key-here
# OLLAMA_BASE_URL=http://127.0.0.1:11434/v1
# OPENAI_BASE_URL=http://127.0.0.1:11434/v1
# OPENAI_API_KEY is optional for local; defaults to a placeholder if OPENAI_BASE_URL/OLLAMA_BASE_URL is set (ollama provider only).
# Optional — only if you use Ollama Cloud model tags (e.g. *:cloud); local models need LLM_PROVIDER + LLM_MODEL only.
# OLLAMA_API_KEY=
79 changes: 68 additions & 11 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
@@ -1,11 +1,17 @@
# CI: TypeScript typecheck + Playwright agent tests against OpenAI (API key from repo secrets).
# CI: typecheck + smoke test via local Ollama.
#
# Required secret (Settings → Secrets and variables → Actions):
# OPENAI_API_KEY — your OpenAI API key
# Configure without editing this file (repo → Settings → Secrets and variables → Actions):
# • Variables: LLM_PROVIDER, LLM_MODEL, optional OLLAMA_BASE_URL
# • Secret OLLAMA_API_KEY — only if you set LLM_MODEL to an Ollama *Cloud* tag (*:cloud).
#
# Optional: set repository variable LLM_MODEL (e.g. gpt-4o-mini) or edit the env block below.
# Defaults if unset (all local, no API key): ollama + llama3.2:3b + http://127.0.0.1:11434/v1
# llama3.2:3b is a good CI default: small/fast on CPU, much more reliable tool-calling than 1b.
#
# Fork PRs: the test job is skipped (secrets are not available to workflows from forks).
# Fork PRs: the test job is skipped (secrets/vars from the base repo are not available to workflows from forks).
#
# Manual run (Actions → CI → Run workflow):
# • GitHub shows "Use workflow from" — pick the branch there (that version of the workflow runs).
# • Optional: set "Checkout ref" below only if you need a different ref than the branch picker.

name: CI

Expand All @@ -15,6 +21,12 @@ on:
pull_request:
branches: [main, master]
workflow_dispatch:
inputs:
checkout_ref:
description: 'Optional — branch name or refs/heads/... to checkout. Leave empty to use the branch selected in "Use workflow from" above.'
required: false
default: ''
type: string

concurrency:
group: ci-${{ github.workflow }}-${{ github.ref }}
Expand All @@ -26,6 +38,8 @@ jobs:
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
with:
ref: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.checkout_ref != '' && github.event.inputs.checkout_ref || github.ref }}

- uses: actions/setup-node@v4
with:
Expand All @@ -46,13 +60,17 @@ jobs:
(github.event_name == 'pull_request' &&
github.event.pull_request.head.repo.full_name == github.repository)
runs-on: ubuntu-latest
timeout-minutes: 60
timeout-minutes: 45
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
LLM_PROVIDER: openai
LLM_MODEL: gpt-4o-mini
# secrets.* overrides vars.* (both optional); fallbacks keep CI working if nothing is configured.
LLM_PROVIDER: ${{ secrets.LLM_PROVIDER || vars.LLM_PROVIDER || 'ollama' }}
LLM_MODEL: ${{ secrets.LLM_MODEL || vars.LLM_MODEL || 'llama3.2:3b' }}
OLLAMA_BASE_URL: ${{ secrets.OLLAMA_BASE_URL || vars.OLLAMA_BASE_URL || 'http://127.0.0.1:11434/v1' }}
OLLAMA_API_KEY: ${{ secrets.OLLAMA_API_KEY }}
steps:
- uses: actions/checkout@v4
with:
ref: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.checkout_ref != '' && github.event.inputs.checkout_ref || github.ref }}

- uses: actions/setup-node@v4
with:
Expand All @@ -65,8 +83,47 @@ jobs:
- name: Install Playwright browsers
run: npx playwright install --with-deps

- name: Run Playwright tests
run: npm test
- name: Ollama Cloud models need OLLAMA_API_KEY
run: |
case "$LLM_MODEL" in
*:cloud*)
if [ -z "${OLLAMA_API_KEY}" ]; then
echo "::error title=Missing OLLAMA_API_KEY::Models tagged *:cloud use Ollama Cloud. Add repository secret OLLAMA_API_KEY (https://ollama.com/settings/keys). Or use a local tag (e.g. llama3.2:3b) in Variables."
exit 1
fi
echo "OLLAMA_API_KEY is set for Cloud model."
;;
*)
echo "LLM_MODEL=$LLM_MODEL (local tag — OLLAMA_API_KEY optional)"
;;
esac

- name: Install Ollama
run: curl -fsSL https://ollama.com/install.sh | sh

- name: Start Ollama and wait for API
run: |
set -e
sudo systemctl stop ollama 2>/dev/null || true
nohup ollama serve > /tmp/ollama-serve.log 2>&1 &
echo "Waiting for http://127.0.0.1:11434 ..."
for i in $(seq 1 90); do
if curl -fsS http://127.0.0.1:11434/api/tags >/dev/null 2>&1; then
echo "Ollama is ready (after ${i}s)"
exit 0
fi
sleep 1
done
echo "--- ollama serve log ---"
cat /tmp/ollama-serve.log || true
exit 1

- name: Pull Ollama model
timeout-minutes: 30
run: ollama pull "$LLM_MODEL"

- name: Smoke test (single LLM case)
run: npm run test:smoke

- name: Upload Playwright report (on failure)
if: failure()
Expand Down
Loading
Loading