diff --git a/.gcloudignore b/.gcloudignore new file mode 100644 index 00000000..b924691d --- /dev/null +++ b/.gcloudignore @@ -0,0 +1,14 @@ +** +!Dockerfile.ghl +!cloudbuild.ghl.yaml +!Makefile.cbm +!REPOS.yaml +!REPOS.local.yaml +!src +!src/** +!internal +!internal/** +!vendored +!vendored/** +!ghl +!ghl/** diff --git a/Dockerfile.ghl b/Dockerfile.ghl new file mode 100644 index 00000000..6d2226d1 --- /dev/null +++ b/Dockerfile.ghl @@ -0,0 +1,88 @@ +# Dockerfile.ghl — GHL fleet server +# +# Multi-stage build: +# stage 1 (cbm): download pre-built codebase-memory-mcp binary for linux/amd64 +# stage 2 (build): compile the Go fleet server +# stage 3 (run): minimal runtime image + +# ── Stage 1: codebase-memory-mcp binary ────────────────────────── +FROM debian:12-slim AS cbm + +RUN apt-get update && apt-get install -y --no-install-recommends \ + build-essential \ + ca-certificates \ + git \ + pkg-config \ + zlib1g-dev \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /src + +COPY Makefile.cbm ./ +COPY src/ ./src/ +COPY internal/ ./internal/ +COPY vendored/ ./vendored/ + +RUN make -f Makefile.cbm cbm && \ + install -m 0755 build/c/codebase-memory-mcp /usr/local/bin/codebase-memory-mcp + +# ── Stage 2: Go fleet server ────────────────────────────────────── +FROM golang:1.25-alpine AS build + +WORKDIR /src + +# Cache dependencies first +COPY ghl/go.mod ghl/go.sum ./ +RUN go mod download + +# Copy source +COPY ghl/ ./ + +RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \ + go build -trimpath -ldflags="-s -w" \ + -o /app/ghl-fleet ./cmd/server + +# ── Stage 3: Runtime ────────────────────────────────────────────── +# Use debian-slim (not distroless) so git is available for repo cloning +FROM debian:12-slim + +RUN apt-get update && apt-get install -y --no-install-recommends \ + git \ + ca-certificates \ + openssh-client \ + && rm -rf /var/lib/apt/lists/* + +# Copy binaries +COPY --from=cbm /usr/local/bin/codebase-memory-mcp /app/codebase-memory-mcp +COPY --from=build /app/ghl-fleet /app/ghl-fleet + +# Copy default manifest +COPY REPOS.yaml /app/REPOS.yaml +COPY REPOS.local.yaml /app/REPOS.local.yaml + +# Git: trust all dirs (needed when running as non-root in containers) +RUN git config --global --add safe.directory '*' + +WORKDIR /app + +# ── Defaults (all overridable via env) ─────────────────────────── +ENV PORT=8080 \ + CBM_BINARY=/app/codebase-memory-mcp \ + CBM_CACHE_DIR=/tmp/codebase-memory-mcp \ + CBM_ARTIFACT_DIR=/data/fleet-cache/indexes \ + FLEET_CACHE_DIR=/data/fleet-cache/repos \ + REPOS_MANIFEST=/app/REPOS.local.yaml \ + BRIDGE_CLIENTS=4 \ + BRIDGE_ACQUIRE_TIMEOUT_MS=1500 \ + FLEET_CONCURRENCY=8 \ + INDEXER_CLIENTS=8 \ + STARTUP_INDEX_ENABLED=false \ + SCHEDULED_INDEXING_ENABLED=false \ + CRON_INCREMENTAL="0 */6 * * *" \ + CRON_FULL="0 2 * * 0" + +EXPOSE 8080 + +VOLUME ["/data/fleet-cache"] + +ENTRYPOINT ["/app/ghl-fleet"] diff --git a/REPOS.local.yaml b/REPOS.local.yaml new file mode 100644 index 00000000..bbfd9eee --- /dev/null +++ b/REPOS.local.yaml @@ -0,0 +1,236 @@ +# REPOS.local.yaml — generated local fleet manifest +# workspace_root: /Users/himanshuranjan/Documents/highlevel +# source_manifest: ../REPOS.yaml +# Regenerate from ./ghl with: go run ./cmd/genlocalmanifest +repos: + - name: clientportal-core + github_url: https://github.com/GoHighLevel/clientportal-core.git + team: platform + type: library + tags: + - vue + - vue3 + - platform + - name: ghl-agentic-workspace + github_url: https://github.com/GoHighLevel/ghl-agentic-workspace.git + team: platform + type: service + tags: + - typescript + - nestjs + - platform + - name: ghl-awesome-studio + github_url: https://github.com/GoHighLevel/ghl-awesome-studio.git + team: platform + type: frontend + tags: + - vue + - vue3 + - platform + - name: ghls-pr + github_url: https://github.com/GoHighLevel/ghls-pr.git + team: platform + type: service + tags: + - typescript + - nestjs + - platform + - name: i18n-analysis + github_url: https://github.com/GoHighLevel/i18n-analysis.git + team: platform + type: service + tags: + - javascript + - nestjs + - platform + - name: image-processing-service + github_url: https://github.com/GoHighLevel/image-processing-service.git + team: platform + type: service + tags: + - go + - platform + - name: infrastructure-as-a-code + github_url: https://github.com/GoHighLevel/infrastructure-as-a-code.git + team: platform + type: infra + tags: + - hcl + - platform + - name: MoltClaw-by-HighLevel + github_url: https://github.com/GoHighLevel/MoltClaw-by-HighLevel.git + team: platform + type: service + tags: + - typescript + - nestjs + - platform + - name: platform-backend + github_url: https://github.com/GoHighLevel/platform-backend.git + team: platform + type: service + tags: + - typescript + - nestjs + - platform + - name: platform-core + github_url: https://github.com/GoHighLevel/platform-core.git + team: platform + type: library + tags: + - typescript + - platform + - name: platform-devtools-backend + github_url: https://github.com/GoHighLevel/platform-devtools-backend.git + team: platform + type: service + tags: + - typescript + - nestjs + - platform + - name: platform-devtools-frontend + github_url: https://github.com/GoHighLevel/platform-devtools-frontend.git + team: platform + type: frontend + tags: + - typescript + - platform + - name: platform-docs + github_url: https://github.com/GoHighLevel/platform-docs.git + team: platform + type: docs + tags: + - html + - platform + - name: platform-jenkins-shared-library + github_url: https://github.com/GoHighLevel/platform-jenkins-shared-library.git + team: platform + type: library + tags: + - groovy + - platform + - name: project-orion + github_url: https://github.com/GoHighLevel/project-orion.git + team: platform + type: other + tags: + - html + - platform + - name: quality-gates + github_url: https://github.com/GoHighLevel/quality-gates.git + team: platform + type: service + tags: + - typescript + - nestjs + - platform + - name: automation-am-client-portal + github_url: https://github.com/GoHighLevel/automation-am-client-portal.git + team: revex + type: frontend + tags: + - vue + - vue3 + - revex + - name: ghl-membership-frontend + github_url: https://github.com/GoHighLevel/ghl-membership-frontend.git + team: revex + type: frontend + tags: + - typescript + - revex + - name: ghl-revex-backend + github_url: https://github.com/GoHighLevel/ghl-revex-backend.git + team: revex + type: service + tags: + - typescript + - nestjs + - revex + - name: ghl-revex-frontend + github_url: https://github.com/GoHighLevel/ghl-revex-frontend.git + team: revex + type: frontend + tags: + - vue + - vue3 + - revex + - name: membership-backend + github_url: https://github.com/GoHighLevel/membership-backend.git + team: revex + type: service + tags: + - typescript + - nestjs + - revex + - name: membership-hmi-app + github_url: https://github.com/GoHighLevel/membership-hmi-app.git + team: revex + type: frontend + tags: + - vue + - vue3 + - revex + - name: membership-hmi-preview + github_url: https://github.com/GoHighLevel/membership-hmi-preview.git + team: revex + type: frontend + tags: + - vue + - vue3 + - revex + - name: ghl-crm-frontend + github_url: https://github.com/GoHighLevel/ghl-crm-frontend.git + team: crm + type: frontend + tags: + - vue + - vue3 + - crm + - name: ghl-email-builder + github_url: https://github.com/GoHighLevel/ghl-email-builder.git + team: conversations + type: frontend + tags: + - vue + - vue3 + - conversations + - name: spm-ts + github_url: https://github.com/GoHighLevel/spm-ts.git + team: funnels + type: frontend + tags: + - vue + - vue3 + - funnels + - name: automation-workflows-frontend + github_url: https://github.com/GoHighLevel/automation-workflows-frontend.git + team: marketing + type: frontend + tags: + - typescript + - marketing + - name: marketplace-backend + github_url: https://github.com/GoHighLevel/marketplace-backend.git + team: saas + type: service + tags: + - typescript + - nestjs + - saas + - name: ai-backend + github_url: https://github.com/GoHighLevel/ai-backend.git + team: ai + type: service + tags: + - typescript + - nestjs + - ai + - name: ai-frontend + github_url: https://github.com/GoHighLevel/ai-frontend.git + team: ai + type: frontend + tags: + - vue + - vue3 + - ai diff --git a/REPOS.yaml b/REPOS.yaml new file mode 100644 index 00000000..640fd1be --- /dev/null +++ b/REPOS.yaml @@ -0,0 +1,2897 @@ +# GHL Fleet Manifest — auto-generated from GoHighLevel GitHub org +# DO NOT EDIT MANUALLY — regenerate with: scripts/generate-repos-manifest.sh +# Total active repos: 480 (archived repos excluded) + +repos: + # ──────────────────── PLATFORM ────────────────────── + - name: a11y-injector + github_url: https://github.com/GoHighLevel/a11y-injector.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: api-documentation + github_url: https://github.com/GoHighLevel/api-documentation.git + team: platform + type: docs + tags: [typescript, platform] + + - name: api-framework + github_url: https://github.com/GoHighLevel/api-framework.git + team: platform + type: library + tags: [typescript, platform] + + - name: api-gateway + github_url: https://github.com/GoHighLevel/api-gateway.git + team: platform + type: service + tags: [csharp, platform] + + - name: ARTS + github_url: https://github.com/GoHighLevel/ARTS.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: backstage + github_url: https://github.com/GoHighLevel/backstage.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: branch-test-repo + github_url: https://github.com/GoHighLevel/branch-test-repo.git + team: platform + type: tests + tags: [testing, platform] + + - name: bugzy-lab + github_url: https://github.com/GoHighLevel/bugzy-lab.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: Build-settings + github_url: https://github.com/GoHighLevel/Build-settings.git + team: platform + type: other + tags: [lua, platform] + + - name: canary-flow + github_url: https://github.com/GoHighLevel/canary-flow.git + team: platform + type: other + tags: [platform] + + - name: cbr + github_url: https://github.com/GoHighLevel/cbr.git + team: platform + type: other + tags: [platform] + + - name: clientportal-core + github_url: https://github.com/GoHighLevel/clientportal-core.git + team: platform + type: library + tags: [vue, vue3, platform] + + - name: cloud-functions + github_url: https://github.com/GoHighLevel/cloud-functions.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: code-coverage + github_url: https://github.com/GoHighLevel/code-coverage.git + team: platform + type: other + tags: [platform] + + - name: colorcounter + github_url: https://github.com/GoHighLevel/colorcounter.git + team: platform + type: other + tags: [dart, platform] + + - name: context-layer + github_url: https://github.com/GoHighLevel/context-layer.git + team: platform + type: service + tags: [python, platform] + + - name: Continuum + github_url: https://github.com/GoHighLevel/Continuum.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: critical-endpoints-servers + github_url: https://github.com/GoHighLevel/critical-endpoints-servers.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: crud-test + github_url: https://github.com/GoHighLevel/crud-test.git + team: platform + type: tests + tags: [typescript, testing, platform] + + - name: csv-xls-exporter + github_url: https://github.com/GoHighLevel/csv-xls-exporter.git + team: platform + type: other + tags: [platform] + + - name: custom-widgets-price-banner + github_url: https://github.com/GoHighLevel/custom-widgets-price-banner.git + team: platform + type: frontend + tags: [vue, vue3, platform] + + - name: Customer_Success_Transcription_App_V2 + github_url: https://github.com/GoHighLevel/Customer_Success_Transcription_App_V2.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: Customer_Support_Transcription_App_V2 + github_url: https://github.com/GoHighLevel/Customer_Support_Transcription_App_V2.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: debounce-service + github_url: https://github.com/GoHighLevel/debounce-service.git + team: platform + type: service + tags: [python, platform] + + - name: deployment-bot + github_url: https://github.com/GoHighLevel/deployment-bot.git + team: platform + type: infra + tags: [shell, platform] + + - name: dev-charon + github_url: https://github.com/GoHighLevel/dev-charon.git + team: platform + type: service + tags: [go, platform] + + - name: dev-charon-assets-viewer + github_url: https://github.com/GoHighLevel/dev-charon-assets-viewer.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: dev-commerce-applications + github_url: https://github.com/GoHighLevel/dev-commerce-applications.git + team: platform + type: frontend + tags: [go, platform] + + - name: dev-commerce-documentx + github_url: https://github.com/GoHighLevel/dev-commerce-documentx.git + team: platform + type: service + tags: [go, platform] + + - name: dev-commerce-engine + github_url: https://github.com/GoHighLevel/dev-commerce-engine.git + team: platform + type: service + tags: [go, platform] + + - name: dev-commerce-frontend + github_url: https://github.com/GoHighLevel/dev-commerce-frontend.git + team: platform + type: frontend + tags: [vue, vue3, platform] + + - name: dev-commerce-img-optimiser + github_url: https://github.com/GoHighLevel/dev-commerce-img-optimiser.git + team: platform + type: other + tags: [c, platform] + + - name: dev-commerce-ledgerx + github_url: https://github.com/GoHighLevel/dev-commerce-ledgerx.git + team: platform + type: service + tags: [go, platform] + + - name: dev-commerce-merchantx + github_url: https://github.com/GoHighLevel/dev-commerce-merchantx.git + team: platform + type: service + tags: [go, platform] + + - name: dev-commerce-ppc + github_url: https://github.com/GoHighLevel/dev-commerce-ppc.git + team: platform + type: service + tags: [go, platform] + + - name: dev-commerce-proto + github_url: https://github.com/GoHighLevel/dev-commerce-proto.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: dev-commerce-transaction-forensics + github_url: https://github.com/GoHighLevel/dev-commerce-transaction-forensics.git + team: platform + type: service + tags: [go, platform] + + - name: dev-conventions + github_url: https://github.com/GoHighLevel/dev-conventions.git + team: platform + type: other + tags: [platform] + + - name: dev-cursor-agents-manager + github_url: https://github.com/GoHighLevel/dev-cursor-agents-manager.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: dev-docs + github_url: https://github.com/GoHighLevel/dev-docs.git + team: platform + type: docs + tags: [platform] + + - name: dev-mobcom-fsb-dashboard + github_url: https://github.com/GoHighLevel/dev-mobcom-fsb-dashboard.git + team: platform + type: frontend + tags: [go, platform] + + - name: DevCapture + github_url: https://github.com/GoHighLevel/DevCapture.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: devlab-internal + github_url: https://github.com/GoHighLevel/devlab-internal.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: disassemble-batch + github_url: https://github.com/GoHighLevel/disassemble-batch.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: docker-nginx-auto-ssl + github_url: https://github.com/GoHighLevel/docker-nginx-auto-ssl.git + team: platform + type: infra + tags: [shell, platform] + + - name: document-chrome-extension + github_url: https://github.com/GoHighLevel/document-chrome-extension.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: documents-contracts-rich-text-mvp + github_url: https://github.com/GoHighLevel/documents-contracts-rich-text-mvp.git + team: platform + type: frontend + tags: [vue, vue3, platform] + + - name: electron-push-receiver + github_url: https://github.com/GoHighLevel/electron-push-receiver.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: email-builder-service + github_url: https://github.com/GoHighLevel/email-builder-service.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: email-builder-tools + github_url: https://github.com/GoHighLevel/email-builder-tools.git + team: platform + type: tooling + tags: [javascript, platform] + + - name: engram + github_url: https://github.com/GoHighLevel/engram.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: ent-reports + github_url: https://github.com/GoHighLevel/ent-reports.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: events-backend + github_url: https://github.com/GoHighLevel/events-backend.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: events-frontend + github_url: https://github.com/GoHighLevel/events-frontend.git + team: platform + type: frontend + tags: [vue, vue3, platform] + + - name: fd-test + github_url: https://github.com/GoHighLevel/fd-test.git + team: platform + type: tests + tags: [vue, vue3, testing, platform] + + - name: figma-importer-plugin + github_url: https://github.com/GoHighLevel/figma-importer-plugin.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: FigmaJSONtoComponent + github_url: https://github.com/GoHighLevel/FigmaJSONtoComponent.git + team: platform + type: other + tags: [platform] + + - name: firestore-rules + github_url: https://github.com/GoHighLevel/firestore-rules.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: flutter-ffmpeg-kit + github_url: https://github.com/GoHighLevel/flutter-ffmpeg-kit.git + team: platform + type: other + tags: [c, platform] + + - name: flutter-layrkit + github_url: https://github.com/GoHighLevel/flutter-layrkit.git + team: platform + type: other + tags: [dart, platform] + + - name: flutter-official-packages + github_url: https://github.com/GoHighLevel/flutter-official-packages.git + team: platform + type: library + tags: [platform] + + - name: flutter_html + github_url: https://github.com/GoHighLevel/flutter_html.git + team: platform + type: other + tags: [dart, platform] + + - name: flutter_icon54 + github_url: https://github.com/GoHighLevel/flutter_icon54.git + team: platform + type: other + tags: [dart, platform] + + - name: flutter_launcher_icons + github_url: https://github.com/GoHighLevel/flutter_launcher_icons.git + team: platform + type: other + tags: [dart, platform] + + - name: flutter_native_splash + github_url: https://github.com/GoHighLevel/flutter_native_splash.git + team: platform + type: other + tags: [platform] + + - name: flutter_untitled_ui_icons + github_url: https://github.com/GoHighLevel/flutter_untitled_ui_icons.git + team: platform + type: other + tags: [dart, platform] + + - name: freshdesk-indexer-ts + github_url: https://github.com/GoHighLevel/freshdesk-indexer-ts.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: freshdesk-indexer-ts-v2 + github_url: https://github.com/GoHighLevel/freshdesk-indexer-ts-v2.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: frontend-codemods + github_url: https://github.com/GoHighLevel/frontend-codemods.git + team: platform + type: other + tags: [platform] + + - name: frontend-debugger + github_url: https://github.com/GoHighLevel/frontend-debugger.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: frontend-memory-leaks + github_url: https://github.com/GoHighLevel/frontend-memory-leaks.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: frontend-performance-utils + github_url: https://github.com/GoHighLevel/frontend-performance-utils.git + team: platform + type: library + tags: [typescript, platform] + + - name: frontend-utils + github_url: https://github.com/GoHighLevel/frontend-utils.git + team: platform + type: library + tags: [platform] + + - name: ghl-agentic-workspace + github_url: https://github.com/GoHighLevel/ghl-agentic-workspace.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: ghl-api-collection + github_url: https://github.com/GoHighLevel/ghl-api-collection.git + team: platform + type: service + tags: [platform] + + - name: ghl-auth3 + github_url: https://github.com/GoHighLevel/ghl-auth3.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: ghl-awesome-onboarding + github_url: https://github.com/GoHighLevel/ghl-awesome-onboarding.git + team: platform + type: frontend + tags: [vue, vue3, platform] + + - name: ghl-awesome-studio + github_url: https://github.com/GoHighLevel/ghl-awesome-studio.git + team: platform + type: frontend + tags: [vue, vue3, platform] + + - name: ghl-backend-repo-template + github_url: https://github.com/GoHighLevel/ghl-backend-repo-template.git + team: platform + type: service + tags: [dockerfile, platform] + + - name: ghl-brand-boards + github_url: https://github.com/GoHighLevel/ghl-brand-boards.git + team: platform + type: frontend + tags: [vue, vue3, platform] + + - name: ghl-browser-mcp + github_url: https://github.com/GoHighLevel/ghl-browser-mcp.git + team: platform + type: service + tags: [javascript, nestjs, mcp, platform] + + - name: ghl-bulk-request + github_url: https://github.com/GoHighLevel/ghl-bulk-request.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: ghl-codebase-mcp + github_url: https://github.com/GoHighLevel/ghl-codebase-mcp.git + team: platform + type: library + tags: [go, mcp, platform] + + - name: ghl-context-builder + github_url: https://github.com/GoHighLevel/ghl-context-builder.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: ghl-ctk-date-time-picker + github_url: https://github.com/GoHighLevel/ghl-ctk-date-time-picker.git + team: platform + type: frontend + tags: [vue, vue3, platform] + + - name: ghl-cursor-rules + github_url: https://github.com/GoHighLevel/ghl-cursor-rules.git + team: platform + type: other + tags: [platform] + + - name: ghl-cursor-skills + github_url: https://github.com/GoHighLevel/ghl-cursor-skills.git + team: platform + type: other + tags: [platform] + + - name: ghl-cursor-skills-mcp + github_url: https://github.com/GoHighLevel/ghl-cursor-skills-mcp.git + team: platform + type: service + tags: [typescript, nestjs, mcp, platform] + + - name: GHL-Design-Memory + github_url: https://github.com/GoHighLevel/GHL-Design-Memory.git + team: platform + type: service + tags: [python, platform] + + - name: ghl-desktop-app + github_url: https://github.com/GoHighLevel/ghl-desktop-app.git + team: platform + type: frontend + tags: [typescript, platform] + + - name: ghl-docs-hub + github_url: https://github.com/GoHighLevel/ghl-docs-hub.git + team: platform + type: docs + tags: [typescript, platform] + + - name: ghl-electron-desktop-apps-test + github_url: https://github.com/GoHighLevel/ghl-electron-desktop-apps-test.git + team: platform + type: frontend + tags: [testing, platform] + + - name: ghl-external-tracking + github_url: https://github.com/GoHighLevel/ghl-external-tracking.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: ghl-federation-dashboard + github_url: https://github.com/GoHighLevel/ghl-federation-dashboard.git + team: platform + type: frontend + tags: [vue, vue3, platform] + + - name: ghl-github-pr-dashboard + github_url: https://github.com/GoHighLevel/ghl-github-pr-dashboard.git + team: platform + type: frontend + tags: [javascript, platform] + + - name: ghl-helm-charts + github_url: https://github.com/GoHighLevel/ghl-helm-charts.git + team: platform + type: infra + tags: [smarty, platform] + + - name: ghl-i18n-feedback + github_url: https://github.com/GoHighLevel/ghl-i18n-feedback.git + team: platform + type: frontend + tags: [vue, vue3, platform] + + - name: ghl-icons + github_url: https://github.com/GoHighLevel/ghl-icons.git + team: platform + type: other + tags: [shell, platform] + + - name: ghl-image-py + github_url: https://github.com/GoHighLevel/ghl-image-py.git + team: platform + type: service + tags: [python, platform] + + - name: ghl-isv-app + github_url: https://github.com/GoHighLevel/ghl-isv-app.git + team: platform + type: frontend + tags: [vue, vue3, platform] + + - name: ghl-kollab-ci-certificates + github_url: https://github.com/GoHighLevel/ghl-kollab-ci-certificates.git + team: platform + type: other + tags: [platform] + + - name: ghl-leadgen-countdowntimer + github_url: https://github.com/GoHighLevel/ghl-leadgen-countdowntimer.git + team: platform + type: frontend + tags: [vue, vue3, platform] + + - name: ghl-leadgen-frontend + github_url: https://github.com/GoHighLevel/ghl-leadgen-frontend.git + team: platform + type: frontend + tags: [platform] + + - name: ghl-liquibase + github_url: https://github.com/GoHighLevel/ghl-liquibase.git + team: platform + type: other + tags: [shell, platform] + + - name: ghl-localisation-v2 + github_url: https://github.com/GoHighLevel/ghl-localisation-v2.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: ghl-localization + github_url: https://github.com/GoHighLevel/ghl-localization.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: ghl-magic-studio + github_url: https://github.com/GoHighLevel/ghl-magic-studio.git + team: platform + type: other + tags: [dockerfile, platform] + + - name: ghl-manifest-viewer + github_url: https://github.com/GoHighLevel/ghl-manifest-viewer.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: ghl-mcp-server + github_url: https://github.com/GoHighLevel/ghl-mcp-server.git + team: platform + type: service + tags: [typescript, nestjs, mcp, platform] + + - name: ghl-media-center + github_url: https://github.com/GoHighLevel/ghl-media-center.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: ghl-mobile-app-customiser + github_url: https://github.com/GoHighLevel/ghl-mobile-app-customiser.git + team: platform + type: frontend + tags: [vue, vue3, platform] + + - name: ghl-mobile-ci-certificates + github_url: https://github.com/GoHighLevel/ghl-mobile-ci-certificates.git + team: platform + type: other + tags: [platform] + + - name: ghl-module-federation-plugin + github_url: https://github.com/GoHighLevel/ghl-module-federation-plugin.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: ghl-monorepo-boilerplate + github_url: https://github.com/GoHighLevel/ghl-monorepo-boilerplate.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: ghl-moz-header + github_url: https://github.com/GoHighLevel/ghl-moz-header.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: ghl-nestjs-boilerplate + github_url: https://github.com/GoHighLevel/ghl-nestjs-boilerplate.git + team: platform + type: other + tags: [platform] + + - name: ghl-ofa + github_url: https://github.com/GoHighLevel/ghl-ofa.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: ghl-operations + github_url: https://github.com/GoHighLevel/ghl-operations.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: ghl-pam-logging + github_url: https://github.com/GoHighLevel/ghl-pam-logging.git + team: platform + type: other + tags: [platform] + + - name: ghl-pdf-compliance + github_url: https://github.com/GoHighLevel/ghl-pdf-compliance.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: ghl-plugins + github_url: https://github.com/GoHighLevel/ghl-plugins.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: ghl-poc + github_url: https://github.com/GoHighLevel/ghl-poc.git + team: platform + type: frontend + tags: [vue, vue3, platform] + + - name: ghl-pr-ops + github_url: https://github.com/GoHighLevel/ghl-pr-ops.git + team: platform + type: other + tags: [platform] + + - name: ghl-pr-tracker + github_url: https://github.com/GoHighLevel/ghl-pr-tracker.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: ghl-proposals + github_url: https://github.com/GoHighLevel/ghl-proposals.git + team: platform + type: frontend + tags: [vue, vue3, platform] + + - name: ghl-public-apis + github_url: https://github.com/GoHighLevel/ghl-public-apis.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: ghl-public-library-ssr + github_url: https://github.com/GoHighLevel/ghl-public-library-ssr.git + team: platform + type: library + tags: [vue, vue3, platform] + + - name: ghl-qr-code + github_url: https://github.com/GoHighLevel/ghl-qr-code.git + team: platform + type: frontend + tags: [vue, vue3, platform] + + - name: ghl-qr-server + github_url: https://github.com/GoHighLevel/ghl-qr-server.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: ghl-rbac-test-suite + github_url: https://github.com/GoHighLevel/ghl-rbac-test-suite.git + team: platform + type: tests + tags: [typescript, testing, platform] + + - name: ghl-repoatlas + github_url: https://github.com/GoHighLevel/ghl-repoatlas.git + team: platform + type: service + tags: [python, platform] + + - name: ghl-route-registry + github_url: https://github.com/GoHighLevel/ghl-route-registry.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: ghl-sdk-examples + github_url: https://github.com/GoHighLevel/ghl-sdk-examples.git + team: platform + type: library + tags: [html, platform] + + - name: ghl-sdk-generator + github_url: https://github.com/GoHighLevel/ghl-sdk-generator.git + team: platform + type: library + tags: [handlebars, platform] + + - name: ghl-seo-app + github_url: https://github.com/GoHighLevel/ghl-seo-app.git + team: platform + type: frontend + tags: [typescript, platform] + + - name: ghl-ssr-boilerplate + github_url: https://github.com/GoHighLevel/ghl-ssr-boilerplate.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: ghl-template-library + github_url: https://github.com/GoHighLevel/ghl-template-library.git + team: platform + type: library + tags: [typescript, platform] + + - name: ghl-test-management + github_url: https://github.com/GoHighLevel/ghl-test-management.git + team: platform + type: tests + tags: [testing, platform] + + - name: ghl-test-platform + github_url: https://github.com/GoHighLevel/ghl-test-platform.git + team: platform + type: tests + tags: [vue, vue3, testing, platform] + + - name: ghl-text-editor + github_url: https://github.com/GoHighLevel/ghl-text-editor.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: ghl-tourguide + github_url: https://github.com/GoHighLevel/ghl-tourguide.git + team: platform + type: docs + tags: [typescript, platform] + + - name: ghl-ui + github_url: https://github.com/GoHighLevel/ghl-ui.git + team: platform + type: frontend + tags: [typescript, platform] + + - name: ghl-v2-api-docs + github_url: https://github.com/GoHighLevel/ghl-v2-api-docs.git + team: platform + type: service + tags: [platform] + + - name: ghl-widgets + github_url: https://github.com/GoHighLevel/ghl-widgets.git + team: platform + type: frontend + tags: [typescript, platform] + + - name: ghl_evalcore + github_url: https://github.com/GoHighLevel/ghl_evalcore.git + team: platform + type: service + tags: [typescript, nestjs, testing, platform] + + - name: ghl_vision_flutter + github_url: https://github.com/GoHighLevel/ghl_vision_flutter.git + team: platform + type: other + tags: [dart, platform] + + - name: ghls-pr + github_url: https://github.com/GoHighLevel/ghls-pr.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: giscus-ghl + github_url: https://github.com/GoHighLevel/giscus-ghl.git + team: platform + type: other + tags: [platform] + + - name: git-jenkins-mcp + github_url: https://github.com/GoHighLevel/git-jenkins-mcp.git + team: platform + type: infra + tags: [typescript, mcp, platform] + + - name: github-actions + github_url: https://github.com/GoHighLevel/github-actions.git + team: platform + type: other + tags: [dockerfile, platform] + + - name: github-digest + github_url: https://github.com/GoHighLevel/github-digest.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: go-platform + github_url: https://github.com/GoHighLevel/go-platform.git + team: platform + type: service + tags: [go, platform] + + - name: go-platform-core + github_url: https://github.com/GoHighLevel/go-platform-core.git + team: platform + type: library + tags: [go, platform] + + - name: GoHighLevel + github_url: https://github.com/GoHighLevel/GoHighLevel.git + team: platform + type: other + tags: [platform] + + - name: grafana-report-generator + github_url: https://github.com/GoHighLevel/grafana-report-generator.git + team: platform + type: tooling + tags: [platform] + + - name: gsd-ghl + github_url: https://github.com/GoHighLevel/gsd-ghl.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: high-rise-flutter-colors + github_url: https://github.com/GoHighLevel/high-rise-flutter-colors.git + team: platform + type: other + tags: [dart, platform] + + - name: high_canopy + github_url: https://github.com/GoHighLevel/high_canopy.git + team: platform + type: other + tags: [dart, platform] + + - name: highlevel-api-docs + github_url: https://github.com/GoHighLevel/highlevel-api-docs.git + team: platform + type: service + tags: [platform] + + - name: highlevel-api-php + github_url: https://github.com/GoHighLevel/highlevel-api-php.git + team: platform + type: service + tags: [php, platform] + + - name: highlevel-api-python + github_url: https://github.com/GoHighLevel/highlevel-api-python.git + team: platform + type: service + tags: [python, platform] + + - name: highlevel-api-sdk + github_url: https://github.com/GoHighLevel/highlevel-api-sdk.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: highlevel-api-sdk-private + github_url: https://github.com/GoHighLevel/highlevel-api-sdk-private.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: highlevel-flutter + github_url: https://github.com/GoHighLevel/highlevel-flutter.git + team: platform + type: other + tags: [dart, platform] + + - name: highlevel-functions + github_url: https://github.com/GoHighLevel/highlevel-functions.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: highlevel-functions-temp + github_url: https://github.com/GoHighLevel/highlevel-functions-temp.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: highlevel-functions-utils + github_url: https://github.com/GoHighLevel/highlevel-functions-utils.git + team: platform + type: library + tags: [platform] + + - name: highlevel-functions-v2 + github_url: https://github.com/GoHighLevel/highlevel-functions-v2.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: highlevel-functions-v3 + github_url: https://github.com/GoHighLevel/highlevel-functions-v3.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: highlevel-html + github_url: https://github.com/GoHighLevel/highlevel-html.git + team: platform + type: other + tags: [html, platform] + + - name: highlevel-infrastructure + github_url: https://github.com/GoHighLevel/highlevel-infrastructure.git + team: platform + type: infra + tags: [lua, platform] + + - name: highlevel-jenkins-shared-libs + github_url: https://github.com/GoHighLevel/highlevel-jenkins-shared-libs.git + team: platform + type: library + tags: [platform] + + - name: highlevel-scraper + github_url: https://github.com/GoHighLevel/highlevel-scraper.git + team: platform + type: service + tags: [python, platform] + + - name: highlevel.handbook.github.io + github_url: https://github.com/GoHighLevel/highlevel.handbook.github.io.git + team: platform + type: other + tags: [html, platform] + + - name: highrise-figmagic + github_url: https://github.com/GoHighLevel/highrise-figmagic.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: highrise-flutter + github_url: https://github.com/GoHighLevel/highrise-flutter.git + team: platform + type: other + tags: [dart, platform] + + - name: highrise-next + github_url: https://github.com/GoHighLevel/highrise-next.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: highrise-nuxt-v3-v4 + github_url: https://github.com/GoHighLevel/highrise-nuxt-v3-v4.git + team: platform + type: frontend + tags: [vue, vue3, nuxt3, platform] + + - name: HighRise-Tokens + github_url: https://github.com/GoHighLevel/HighRise-Tokens.git + team: platform + type: service + tags: [python, platform] + + - name: HighSupply + github_url: https://github.com/GoHighLevel/HighSupply.git + team: platform + type: other + tags: [dart, platform] + + - name: hist + github_url: https://github.com/GoHighLevel/hist.git + team: platform + type: other + tags: [dockerfile, platform] + + - name: hl-base-utils + github_url: https://github.com/GoHighLevel/hl-base-utils.git + team: platform + type: library + tags: [typescript, platform] + + - name: hl-test-manager + github_url: https://github.com/GoHighLevel/hl-test-manager.git + team: platform + type: tests + tags: [vue, vue3, testing, platform] + + - name: hl-utils + github_url: https://github.com/GoHighLevel/hl-utils.git + team: platform + type: library + tags: [typescript, platform] + + - name: hubspot-importer + github_url: https://github.com/GoHighLevel/hubspot-importer.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: hubspot-importer-poc + github_url: https://github.com/GoHighLevel/hubspot-importer-poc.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: hugo-book + github_url: https://github.com/GoHighLevel/hugo-book.git + team: platform + type: other + tags: [html, platform] + + - name: I18_Translations_Detection_Plugin + github_url: https://github.com/GoHighLevel/I18_Translations_Detection_Plugin.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: i18n-analysis + github_url: https://github.com/GoHighLevel/i18n-analysis.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: i18n-as-a-service + github_url: https://github.com/GoHighLevel/i18n-as-a-service.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: i18n-test + github_url: https://github.com/GoHighLevel/i18n-test.git + team: platform + type: tests + tags: [vue, vue3, testing, platform] + + - name: i18n-validator + github_url: https://github.com/GoHighLevel/i18n-validator.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: ideas-board-vis-frontend + github_url: https://github.com/GoHighLevel/ideas-board-vis-frontend.git + team: platform + type: frontend + tags: [html, platform] + + - name: image-processing-service + github_url: https://github.com/GoHighLevel/image-processing-service.git + team: platform + type: service + tags: [go, platform] + + - name: infra-q2 + github_url: https://github.com/GoHighLevel/infra-q2.git + team: platform + type: other + tags: [platform] + + - name: infrastructure-as-a-code + github_url: https://github.com/GoHighLevel/infrastructure-as-a-code.git + team: platform + type: infra + tags: [hcl, platform] + + - name: instagram-webhook-native-posts + github_url: https://github.com/GoHighLevel/instagram-webhook-native-posts.git + team: platform + type: frontend + tags: [javascript, platform] + + - name: internal-api-documentation + github_url: https://github.com/GoHighLevel/internal-api-documentation.git + team: platform + type: service + tags: [platform] + + - name: internaltools-migrations + github_url: https://github.com/GoHighLevel/internaltools-migrations.git + team: platform + type: tooling + tags: [typescript, platform] + + - name: isv-monitoring-service + github_url: https://github.com/GoHighLevel/isv-monitoring-service.git + team: platform + type: service + tags: [platform] + + - name: Jobber-App-React + github_url: https://github.com/GoHighLevel/Jobber-App-React.git + team: platform + type: frontend + tags: [platform] + + - name: kubernetes-mixin + github_url: https://github.com/GoHighLevel/kubernetes-mixin.git + team: platform + type: other + tags: [platform] + + - name: langflow + github_url: https://github.com/GoHighLevel/langflow.git + team: platform + type: service + tags: [python, platform] + + - name: langfuse + github_url: https://github.com/GoHighLevel/langfuse.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: langfuse-region-migration + github_url: https://github.com/GoHighLevel/langfuse-region-migration.git + team: platform + type: tooling + tags: [python, platform] + + - name: lead-tracker + github_url: https://github.com/GoHighLevel/lead-tracker.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: leadgen-ad-publishing-frontend + github_url: https://github.com/GoHighLevel/leadgen-ad-publishing-frontend.git + team: platform + type: frontend + tags: [typescript, platform] + + - name: leadgen-admin + github_url: https://github.com/GoHighLevel/leadgen-admin.git + team: platform + type: frontend + tags: [vue, vue3, platform] + + - name: leadgen-backend + github_url: https://github.com/GoHighLevel/leadgen-backend.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: leadgen-backend-python + github_url: https://github.com/GoHighLevel/leadgen-backend-python.git + team: platform + type: service + tags: [python, platform] + + - name: leadgen-cache-server + github_url: https://github.com/GoHighLevel/leadgen-cache-server.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: leadgen-customer-access-center + github_url: https://github.com/GoHighLevel/leadgen-customer-access-center.git + team: platform + type: frontend + tags: [vue, vue3, platform] + + - name: leadgen-fastpaydirect-static + github_url: https://github.com/GoHighLevel/leadgen-fastpaydirect-static.git + team: platform + type: other + tags: [html, platform] + + - name: leadgen-ipinfo + github_url: https://github.com/GoHighLevel/leadgen-ipinfo.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: leadgen-kaizen-backend + github_url: https://github.com/GoHighLevel/leadgen-kaizen-backend.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: leadgen-loyalty-frontend + github_url: https://github.com/GoHighLevel/leadgen-loyalty-frontend.git + team: platform + type: frontend + tags: [typescript, platform] + + - name: leadgen-store-frontend + github_url: https://github.com/GoHighLevel/leadgen-store-frontend.git + team: platform + type: frontend + tags: [typescript, platform] + + - name: leadgen-tests + github_url: https://github.com/GoHighLevel/leadgen-tests.git + team: platform + type: tests + tags: [typescript, testing, platform] + + - name: lighthouse-worker + github_url: https://github.com/GoHighLevel/lighthouse-worker.git + team: platform + type: service + tags: [typescript, nestjs, worker, platform] + + - name: localization-lib + github_url: https://github.com/GoHighLevel/localization-lib.git + team: platform + type: library + tags: [javascript, platform] + + - name: location-prospect + github_url: https://github.com/GoHighLevel/location-prospect.git + team: platform + type: frontend + tags: [vue, vue3, platform] + + - name: logger-rust + github_url: https://github.com/GoHighLevel/logger-rust.git + team: platform + type: service + tags: [rust, platform] + + - name: mail_beam + github_url: https://github.com/GoHighLevel/mail_beam.git + team: platform + type: other + tags: [php, platform] + + - name: manifest + github_url: https://github.com/GoHighLevel/manifest.git + team: platform + type: other + tags: [platform] + + - name: mcpserver-rules + github_url: https://github.com/GoHighLevel/mcpserver-rules.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: mimt-proxy + github_url: https://github.com/GoHighLevel/mimt-proxy.git + team: platform + type: service + tags: [python, platform] + + - name: mobile-backend + github_url: https://github.com/GoHighLevel/mobile-backend.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: mobile-patch-release-dispatch + github_url: https://github.com/GoHighLevel/mobile-patch-release-dispatch.git + team: platform + type: other + tags: [platform] + + - name: mobile-pipeline-auditor + github_url: https://github.com/GoHighLevel/mobile-pipeline-auditor.git + team: platform + type: infra + tags: [go, platform] + + - name: mobile-prds + github_url: https://github.com/GoHighLevel/mobile-prds.git + team: platform + type: other + tags: [css, platform] + + - name: mobile-whitelabelcustomizer-dasboard + github_url: https://github.com/GoHighLevel/mobile-whitelabelcustomizer-dasboard.git + team: platform + type: other + tags: [dart, platform] + + - name: mobile_native_app_theme + github_url: https://github.com/GoHighLevel/mobile_native_app_theme.git + team: platform + type: other + tags: [dart, platform] + + - name: Module-Federated-Code-generator + github_url: https://github.com/GoHighLevel/Module-Federated-Code-generator.git + team: platform + type: tooling + tags: [javascript, platform] + + - name: MoltClaw-by-HighLevel + github_url: https://github.com/GoHighLevel/MoltClaw-by-HighLevel.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: naive-ui + github_url: https://github.com/GoHighLevel/naive-ui.git + team: platform + type: frontend + tags: [typescript, platform] + + - name: nginx-vod-module + github_url: https://github.com/GoHighLevel/nginx-vod-module.git + team: platform + type: service + tags: [go, platform] + + - name: nik-shivam + github_url: https://github.com/GoHighLevel/nik-shivam.git + team: platform + type: other + tags: [platform] + + - name: nodejs-logging + github_url: https://github.com/GoHighLevel/nodejs-logging.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: nodejs-logging-bunyan + github_url: https://github.com/GoHighLevel/nodejs-logging-bunyan.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: nuxt-highrise-module + github_url: https://github.com/GoHighLevel/nuxt-highrise-module.git + team: platform + type: service + tags: [typescript, nestjs, nuxt3, platform] + + - name: nuxt-highrise-ssr + github_url: https://github.com/GoHighLevel/nuxt-highrise-ssr.git + team: platform + type: service + tags: [typescript, nestjs, nuxt3, platform] + + - name: objective-builder-ui + github_url: https://github.com/GoHighLevel/objective-builder-ui.git + team: platform + type: frontend + tags: [vue, vue3, platform] + + - name: outscrapper-ghl + github_url: https://github.com/GoHighLevel/outscrapper-ghl.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: pdf-core-engine + github_url: https://github.com/GoHighLevel/pdf-core-engine.git + team: platform + type: library + tags: [typescript, platform] + + - name: platform-backend + github_url: https://github.com/GoHighLevel/platform-backend.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: platform-backend-demo + github_url: https://github.com/GoHighLevel/platform-backend-demo.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: platform-common-argo-apps + github_url: https://github.com/GoHighLevel/platform-common-argo-apps.git + team: platform + type: frontend + tags: [platform] + + - name: platform-common-helm-charts + github_url: https://github.com/GoHighLevel/platform-common-helm-charts.git + team: platform + type: library + tags: [go-template, platform] + + - name: platform-core + github_url: https://github.com/GoHighLevel/platform-core.git + team: platform + type: library + tags: [typescript, platform] + + - name: platform-devtools-backend + github_url: https://github.com/GoHighLevel/platform-devtools-backend.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: platform-devtools-frontend + github_url: https://github.com/GoHighLevel/platform-devtools-frontend.git + team: platform + type: frontend + tags: [typescript, platform] + + - name: platform-docs + github_url: https://github.com/GoHighLevel/platform-docs.git + team: platform + type: docs + tags: [html, platform] + + - name: platform-experiments + github_url: https://github.com/GoHighLevel/platform-experiments.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: platform-frontend-backend + github_url: https://github.com/GoHighLevel/platform-frontend-backend.git + team: platform + type: service + tags: [platform] + + - name: platform-frontend-docs + github_url: https://github.com/GoHighLevel/platform-frontend-docs.git + team: platform + type: frontend + tags: [platform] + + - name: platform-frontend-playground + github_url: https://github.com/GoHighLevel/platform-frontend-playground.git + team: platform + type: frontend + tags: [vue, vue3, platform] + + - name: platform-infra-argo-apps + github_url: https://github.com/GoHighLevel/platform-infra-argo-apps.git + team: platform + type: frontend + tags: [platform] + + - name: platform-infra-helm-charts + github_url: https://github.com/GoHighLevel/platform-infra-helm-charts.git + team: platform + type: infra + tags: [mustache, platform] + + - name: platform-jenkins-shared-library + github_url: https://github.com/GoHighLevel/platform-jenkins-shared-library.git + team: platform + type: library + tags: [groovy, platform] + + - name: platform-planning-internal + github_url: https://github.com/GoHighLevel/platform-planning-internal.git + team: platform + type: other + tags: [shell, platform] + + - name: platform-pocs + github_url: https://github.com/GoHighLevel/platform-pocs.git + team: platform + type: service + tags: [python, platform] + + - name: platform-sample-java-app + github_url: https://github.com/GoHighLevel/platform-sample-java-app.git + team: platform + type: frontend + tags: [java, platform] + + - name: platform-sample-nodejs-app + github_url: https://github.com/GoHighLevel/platform-sample-nodejs-app.git + team: platform + type: frontend + tags: [typescript, platform] + + - name: platform-shared-changes + github_url: https://github.com/GoHighLevel/platform-shared-changes.git + team: platform + type: library + tags: [go-template, platform] + + - name: platform-templates + github_url: https://github.com/GoHighLevel/platform-templates.git + team: platform + type: other + tags: [platform] + + - name: platform-terraform-gcp-infra + github_url: https://github.com/GoHighLevel/platform-terraform-gcp-infra.git + team: platform + type: infra + tags: [hcl, platform] + + - name: platform-terraform-gcp-modules + github_url: https://github.com/GoHighLevel/platform-terraform-gcp-modules.git + team: platform + type: infra + tags: [hcl, platform] + + - name: platform-ui + github_url: https://github.com/GoHighLevel/platform-ui.git + team: platform + type: frontend + tags: [typescript, platform] + + - name: pocketpub + github_url: https://github.com/GoHighLevel/pocketpub.git + team: platform + type: other + tags: [dart, platform] + + - name: pr-buddy + github_url: https://github.com/GoHighLevel/pr-buddy.git + team: platform + type: other + tags: [dockerfile, platform] + + - name: preference-management-frontend + github_url: https://github.com/GoHighLevel/preference-management-frontend.git + team: platform + type: frontend + tags: [vue, vue3, platform] + + - name: product-central + github_url: https://github.com/GoHighLevel/product-central.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: project-orion + github_url: https://github.com/GoHighLevel/project-orion.git + team: platform + type: other + tags: [html, platform] + + - name: pulse + github_url: https://github.com/GoHighLevel/pulse.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: push-docker-gcr + github_url: https://github.com/GoHighLevel/push-docker-gcr.git + team: platform + type: infra + tags: [shell, platform] + + - name: quality-gates + github_url: https://github.com/GoHighLevel/quality-gates.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: quickchart + github_url: https://github.com/GoHighLevel/quickchart.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: rca-analysis + github_url: https://github.com/GoHighLevel/rca-analysis.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: rdialr + github_url: https://github.com/GoHighLevel/rdialr.git + team: platform + type: service + tags: [go, platform] + + - name: redis-backup-cloud-function-gcp + github_url: https://github.com/GoHighLevel/redis-backup-cloud-function-gcp.git + team: platform + type: service + tags: [python, platform] + + - name: revops-mozart-transforms + github_url: https://github.com/GoHighLevel/revops-mozart-transforms.git + team: platform + type: other + tags: [platform] + + - name: revops-transcription-app + github_url: https://github.com/GoHighLevel/revops-transcription-app.git + team: platform + type: frontend + tags: [javascript, platform] + + - name: revops-transcription-app-ooh + github_url: https://github.com/GoHighLevel/revops-transcription-app-ooh.git + team: platform + type: frontend + tags: [javascript, platform] + + - name: Sandbox + github_url: https://github.com/GoHighLevel/Sandbox.git + team: platform + type: tooling + tags: [javascript, platform] + + - name: screenshot-service + github_url: https://github.com/GoHighLevel/screenshot-service.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: sdet-performance-test + github_url: https://github.com/GoHighLevel/sdet-performance-test.git + team: platform + type: tests + tags: [typescript, testing, platform] + + - name: sdet-platform + github_url: https://github.com/GoHighLevel/sdet-platform.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: sdet-platform-backend + github_url: https://github.com/GoHighLevel/sdet-platform-backend.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: sdet-platform-frontend + github_url: https://github.com/GoHighLevel/sdet-platform-frontend.git + team: platform + type: frontend + tags: [vue, vue3, platform] + + - name: sdet-platform-performance-test + github_url: https://github.com/GoHighLevel/sdet-platform-performance-test.git + team: platform + type: tests + tags: [typescript, testing, platform] + + - name: seed-module + github_url: https://github.com/GoHighLevel/seed-module.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: sentry + github_url: https://github.com/GoHighLevel/sentry.git + team: platform + type: other + tags: [shell, platform] + + - name: single-endpoint-get-by-id-servers + github_url: https://github.com/GoHighLevel/single-endpoint-get-by-id-servers.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: single-endpoint-servers + github_url: https://github.com/GoHighLevel/single-endpoint-servers.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: sonarcloud-test-repo-public + github_url: https://github.com/GoHighLevel/sonarcloud-test-repo-public.git + team: platform + type: tests + tags: [testing, platform] + + - name: sonarqube-jenkins-test + github_url: https://github.com/GoHighLevel/sonarqube-jenkins-test.git + team: platform + type: tests + tags: [typescript, testing, platform] + + - name: sonarqube-jenkins-test-2 + github_url: https://github.com/GoHighLevel/sonarqube-jenkins-test-2.git + team: platform + type: tests + tags: [typescript, testing, platform] + + - name: Squire + github_url: https://github.com/GoHighLevel/Squire.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: sravanth-docs + github_url: https://github.com/GoHighLevel/sravanth-docs.git + team: platform + type: docs + tags: [html, platform] + + - name: ssl-clerk + github_url: https://github.com/GoHighLevel/ssl-clerk.git + team: platform + type: service + tags: [python, platform] + + - name: supportAILabs + github_url: https://github.com/GoHighLevel/supportAILabs.git + team: platform + type: other + tags: [platform] + + - name: test-repo + github_url: https://github.com/GoHighLevel/test-repo.git + team: platform + type: tests + tags: [testing, platform] + + - name: TPRA + github_url: https://github.com/GoHighLevel/TPRA.git + team: platform + type: other + tags: [platform] + + - name: traffic-cop + github_url: https://github.com/GoHighLevel/traffic-cop.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: ui-ux-gap-analysis + github_url: https://github.com/GoHighLevel/ui-ux-gap-analysis.git + team: platform + type: other + tags: [platform] + + - name: update-recent-message-service + github_url: https://github.com/GoHighLevel/update-recent-message-service.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: utils + github_url: https://github.com/GoHighLevel/utils.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: vibe-builder + github_url: https://github.com/GoHighLevel/vibe-builder.git + team: platform + type: service + tags: [python, platform] + + - name: vibe-creator + github_url: https://github.com/GoHighLevel/vibe-creator.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: vibe-platform + github_url: https://github.com/GoHighLevel/vibe-platform.git + team: platform + type: service + tags: [go, platform] + + - name: video-transcoding-service + github_url: https://github.com/GoHighLevel/video-transcoding-service.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: vue-ssr-demo + github_url: https://github.com/GoHighLevel/vue-ssr-demo.git + team: platform + type: tooling + tags: [typescript, platform] + + - name: webstore-extensions + github_url: https://github.com/GoHighLevel/webstore-extensions.git + team: platform + type: other + tags: [platform] + + - name: whitelabel-customizer-frontend + github_url: https://github.com/GoHighLevel/whitelabel-customizer-frontend.git + team: platform + type: frontend + tags: [dart, platform] + + - name: wordpress-core + github_url: https://github.com/GoHighLevel/wordpress-core.git + team: platform + type: library + tags: [platform] + + - name: wordpress-uptime-monitor + github_url: https://github.com/GoHighLevel/wordpress-uptime-monitor.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: Wordpress-V2-Support + github_url: https://github.com/GoHighLevel/Wordpress-V2-Support.git + team: platform + type: service + tags: [javascript, nestjs, platform] + + - name: wordpress-widget + github_url: https://github.com/GoHighLevel/wordpress-widget.git + team: platform + type: frontend + tags: [javascript, platform] + + - name: wordpress_plugins + github_url: https://github.com/GoHighLevel/wordpress_plugins.git + team: platform + type: other + tags: [php, platform] + + - name: yarn-poc + github_url: https://github.com/GoHighLevel/yarn-poc.git + team: platform + type: other + tags: [platform] + + - name: yarn-v4-nest-poc + github_url: https://github.com/GoHighLevel/yarn-v4-nest-poc.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + - name: zoom-scribe + github_url: https://github.com/GoHighLevel/zoom-scribe.git + team: platform + type: service + tags: [typescript, nestjs, platform] + + # ──────────────────── REVEX ───────────────────────── + - name: assets-drm-client + github_url: https://github.com/GoHighLevel/assets-drm-client.git + team: revex + type: library + tags: [vue, vue3, revex] + + - name: automation-am-client-portal + github_url: https://github.com/GoHighLevel/automation-am-client-portal.git + team: revex + type: frontend + tags: [vue, vue3, revex] + + - name: communities-flutter-poc + github_url: https://github.com/GoHighLevel/communities-flutter-poc.git + team: revex + type: other + tags: [dart, revex] + + - name: ghl-membership-frontend + github_url: https://github.com/GoHighLevel/ghl-membership-frontend.git + team: revex + type: frontend + tags: [typescript, revex] + + - name: ghl-revex-backend + github_url: https://github.com/GoHighLevel/ghl-revex-backend.git + team: revex + type: service + tags: [typescript, nestjs, revex] + + - name: ghl-revex-clientportal-apps + github_url: https://github.com/GoHighLevel/ghl-revex-clientportal-apps.git + team: revex + type: frontend + tags: [revex] + + - name: ghl-revex-frontend + github_url: https://github.com/GoHighLevel/ghl-revex-frontend.git + team: revex + type: frontend + tags: [vue, vue3, revex] + + - name: ghl-revex-interviews + github_url: https://github.com/GoHighLevel/ghl-revex-interviews.git + team: revex + type: service + tags: [typescript, nestjs, revex] + + - name: ghl-revex-membership-frontend + github_url: https://github.com/GoHighLevel/ghl-revex-membership-frontend.git + team: revex + type: frontend + tags: [javascript, revex] + + - name: membership-backend + github_url: https://github.com/GoHighLevel/membership-backend.git + team: revex + type: service + tags: [typescript, nestjs, revex] + + - name: membership-flutter-app + github_url: https://github.com/GoHighLevel/membership-flutter-app.git + team: revex + type: frontend + tags: [dart, revex] + + - name: membership-highline + github_url: https://github.com/GoHighLevel/membership-highline.git + team: revex + type: other + tags: [dart, revex] + + - name: membership-hmi-app + github_url: https://github.com/GoHighLevel/membership-hmi-app.git + team: revex + type: frontend + tags: [vue, vue3, revex] + + - name: membership-hmi-preview + github_url: https://github.com/GoHighLevel/membership-hmi-preview.git + team: revex + type: frontend + tags: [vue, vue3, revex] + + - name: membership-ui-core + github_url: https://github.com/GoHighLevel/membership-ui-core.git + team: revex + type: frontend + tags: [typescript, revex] + + - name: revex-pyrw-dev-helper-chrome-ext + github_url: https://github.com/GoHighLevel/revex-pyrw-dev-helper-chrome-ext.git + team: revex + type: service + tags: [javascript, nestjs, revex] + + - name: revex-tests + github_url: https://github.com/GoHighLevel/revex-tests.git + team: revex + type: tests + tags: [typescript, testing, revex] + + - name: revex-tools-pyrw-audit-and-automation + github_url: https://github.com/GoHighLevel/revex-tools-pyrw-audit-and-automation.git + team: revex + type: tooling + tags: [javascript, revex] + + - name: revex-wordpress-internal-tools + github_url: https://github.com/GoHighLevel/revex-wordpress-internal-tools.git + team: revex + type: tooling + tags: [javascript, revex] + + - name: revex-wordpress-lc-easy-migrator + github_url: https://github.com/GoHighLevel/revex-wordpress-lc-easy-migrator.git + team: revex + type: service + tags: [javascript, nestjs, revex] + + - name: revex-wordpress-lc-easy-migrator-front-end + github_url: https://github.com/GoHighLevel/revex-wordpress-lc-easy-migrator-front-end.git + team: revex + type: frontend + tags: [vue, vue3, revex] + + - name: revex-wordpress-leadconnector-plugin + github_url: https://github.com/GoHighLevel/revex-wordpress-leadconnector-plugin.git + team: revex + type: service + tags: [javascript, nestjs, revex] + + - name: revex-wordpress-leadconnector-plugin-frontend + github_url: https://github.com/GoHighLevel/revex-wordpress-leadconnector-plugin-frontend.git + team: revex + type: frontend + tags: [vue, vue3, revex] + + - name: revex-wordpress-threatlens + github_url: https://github.com/GoHighLevel/revex-wordpress-threatlens.git + team: revex + type: service + tags: [python, revex] + + - name: RevexMobileTestAutomation + github_url: https://github.com/GoHighLevel/RevexMobileTestAutomation.git + team: revex + type: tests + tags: [javascript, testing, revex] + + # ──────────────────── CRM ─────────────────────────── + - name: appengine-local-taskqueue + github_url: https://github.com/GoHighLevel/appengine-local-taskqueue.git + team: crm + type: service + tags: [javascript, nestjs, worker, crm] + + - name: chrome-ext-crm + github_url: https://github.com/GoHighLevel/chrome-ext-crm.git + team: crm + type: service + tags: [javascript, nestjs, crm] + + - name: core-crm-tests + github_url: https://github.com/GoHighLevel/core-crm-tests.git + team: crm + type: tests + tags: [typescript, testing, crm] + + - name: crm-common-libs + github_url: https://github.com/GoHighLevel/crm-common-libs.git + team: crm + type: library + tags: [typescript, crm] + + - name: crm-extension-privacy-policy + github_url: https://github.com/GoHighLevel/crm-extension-privacy-policy.git + team: crm + type: other + tags: [crm] + + - name: flutter_contacts + github_url: https://github.com/GoHighLevel/flutter_contacts.git + team: crm + type: other + tags: [dart, crm] + + - name: ghl-crm-frontend + github_url: https://github.com/GoHighLevel/ghl-crm-frontend.git + team: crm + type: frontend + tags: [vue, vue3, crm] + + - name: vibe-tagger + github_url: https://github.com/GoHighLevel/vibe-tagger.git + team: crm + type: service + tags: [typescript, nestjs, crm] + + # ──────────────────── CONVERSATIONS ───────────────── + - name: ghl-chat-widget + github_url: https://github.com/GoHighLevel/ghl-chat-widget.git + team: conversations + type: frontend + tags: [vue, vue3, conversations] + + - name: ghl-email-builder + github_url: https://github.com/GoHighLevel/ghl-email-builder.git + team: conversations + type: frontend + tags: [vue, vue3, conversations] + + - name: ghl-smtp-service + github_url: https://github.com/GoHighLevel/ghl-smtp-service.git + team: conversations + type: service + tags: [javascript, nestjs, conversations] + + - name: py-chatbot + github_url: https://github.com/GoHighLevel/py-chatbot.git + team: conversations + type: service + tags: [python, conversations] + + - name: revops-chatgpt-mcp-snowflake-server + github_url: https://github.com/GoHighLevel/revops-chatgpt-mcp-snowflake-server.git + team: conversations + type: service + tags: [javascript, nestjs, mcp, conversations] + + - name: whatsapp-analytics-backup-scipts + github_url: https://github.com/GoHighLevel/whatsapp-analytics-backup-scipts.git + team: conversations + type: service + tags: [python, conversations] + + # ──────────────────── CALENDARS ───────────────────── + - name: abhi_collective_calendar + github_url: https://github.com/GoHighLevel/abhi_collective_calendar.git + team: calendars + type: other + tags: [calendars] + + - name: assignment_calendar + github_url: https://github.com/GoHighLevel/assignment_calendar.git + team: calendars + type: service + tags: [typescript, nestjs, calendars] + + - name: automation-calendars-deep-links + github_url: https://github.com/GoHighLevel/automation-calendars-deep-links.git + team: calendars + type: service + tags: [java, calendars] + + - name: automation-calendars-frontend + github_url: https://github.com/GoHighLevel/automation-calendars-frontend.git + team: calendars + type: frontend + tags: [vue, vue3, calendars] + + - name: automation-calendars-frontend-monorepo + github_url: https://github.com/GoHighLevel/automation-calendars-frontend-monorepo.git + team: calendars + type: frontend + tags: [vue, vue3, calendars] + + - name: automation-calendars-preview + github_url: https://github.com/GoHighLevel/automation-calendars-preview.git + team: calendars + type: frontend + tags: [typescript, calendars] + + - name: automation-calendars-reserve-backend + github_url: https://github.com/GoHighLevel/automation-calendars-reserve-backend.git + team: calendars + type: service + tags: [typescript, nestjs, calendars] + + - name: calendars-learning-go + github_url: https://github.com/GoHighLevel/calendars-learning-go.git + team: calendars + type: other + tags: [calendars] + + - name: ghl-calendars-ai-skills + github_url: https://github.com/GoHighLevel/ghl-calendars-ai-skills.git + team: calendars + type: service + tags: [go, calendars] + + - name: ghl-calendars-platform + github_url: https://github.com/GoHighLevel/ghl-calendars-platform.git + team: calendars + type: service + tags: [go, calendars] + + - name: schedulers_dart + github_url: https://github.com/GoHighLevel/schedulers_dart.git + team: calendars + type: other + tags: [dart, calendars] + + - name: vue-tuicalendar + github_url: https://github.com/GoHighLevel/vue-tuicalendar.git + team: calendars + type: service + tags: [javascript, nestjs, calendars] + + # ──────────────────── FUNNELS ─────────────────────── + - name: builder-preview + github_url: https://github.com/GoHighLevel/builder-preview.git + team: funnels + type: frontend + tags: [vue, vue3, funnels] + + - name: funnel-preview-cache + github_url: https://github.com/GoHighLevel/funnel-preview-cache.git + team: funnels + type: frontend + tags: [typescript, funnels] + + - name: ghl-blogging + github_url: https://github.com/GoHighLevel/ghl-blogging.git + team: funnels + type: frontend + tags: [vue, vue3, funnels] + + - name: ghl-form-ai-studio + github_url: https://github.com/GoHighLevel/ghl-form-ai-studio.git + team: funnels + type: service + tags: [typescript, nestjs, funnels] + + - name: ghl-form-element + github_url: https://github.com/GoHighLevel/ghl-form-element.git + team: funnels + type: frontend + tags: [vue, vue3, funnels] + + - name: ghl-form-embed + github_url: https://github.com/GoHighLevel/ghl-form-embed.git + team: funnels + type: service + tags: [typescript, nestjs, funnels] + + - name: ghl-form-survey + github_url: https://github.com/GoHighLevel/ghl-form-survey.git + team: funnels + type: frontend + tags: [vue, vue3, funnels] + + - name: ghl-funnel-website + github_url: https://github.com/GoHighLevel/ghl-funnel-website.git + team: funnels + type: frontend + tags: [vue, vue3, funnels] + + - name: leadgen-funnels-backend + github_url: https://github.com/GoHighLevel/leadgen-funnels-backend.git + team: funnels + type: service + tags: [funnels] + + - name: page-builder + github_url: https://github.com/GoHighLevel/page-builder.git + team: funnels + type: frontend + tags: [vue, vue3, funnels] + + - name: spm-appengine + github_url: https://github.com/GoHighLevel/spm-appengine.git + team: funnels + type: frontend + tags: [typescript, funnels] + + - name: spm-proxy-server + github_url: https://github.com/GoHighLevel/spm-proxy-server.git + team: funnels + type: service + tags: [javascript, nestjs, funnels] + + - name: spm-ts + github_url: https://github.com/GoHighLevel/spm-ts.git + team: funnels + type: frontend + tags: [vue, vue3, funnels] + + # ──────────────────── PAYMENTS ────────────────────── + - name: affiliate-signup-page + github_url: https://github.com/GoHighLevel/affiliate-signup-page.git + team: payments + type: frontend + tags: [vue, vue3, payments] + + - name: authorize-net-playground + github_url: https://github.com/GoHighLevel/authorize-net-playground.git + team: payments + type: tooling + tags: [typescript, payments] + + - name: dev-commerce-subscriptionsx + github_url: https://github.com/GoHighLevel/dev-commerce-subscriptionsx.git + team: payments + type: other + tags: [payments] + + - name: ghl-invoice-preview + github_url: https://github.com/GoHighLevel/ghl-invoice-preview.git + team: payments + type: frontend + tags: [vue, vue3, payments] + + - name: ghl-leadgen-payments + github_url: https://github.com/GoHighLevel/ghl-leadgen-payments.git + team: payments + type: frontend + tags: [vue, vue3, payments] + + - name: ghl-payment-element + github_url: https://github.com/GoHighLevel/ghl-payment-element.git + team: payments + type: frontend + tags: [vue, vue3, payments] + + - name: ghl-payments-flutter + github_url: https://github.com/GoHighLevel/ghl-payments-flutter.git + team: payments + type: other + tags: [swift, payments] + + - name: leadgen-payment-products-backend + github_url: https://github.com/GoHighLevel/leadgen-payment-products-backend.git + team: payments + type: service + tags: [payments] + + - name: mobile-square-in-app-payments + github_url: https://github.com/GoHighLevel/mobile-square-in-app-payments.git + team: payments + type: frontend + tags: [payments] + + - name: module-stripe + github_url: https://github.com/GoHighLevel/module-stripe.git + team: payments + type: service + tags: [typescript, nestjs, payments] + + - name: payment-products-preview + github_url: https://github.com/GoHighLevel/payment-products-preview.git + team: payments + type: frontend + tags: [vue, vue3, payments] + + - name: payment-service + github_url: https://github.com/GoHighLevel/payment-service.git + team: payments + type: service + tags: [typescript, nestjs, payments] + + # ──────────────────── MARKETING ───────────────────── + - name: automation-am-external-script + github_url: https://github.com/GoHighLevel/automation-am-external-script.git + team: marketing + type: tooling + tags: [typescript, marketing] + + - name: automation-am-frontend + github_url: https://github.com/GoHighLevel/automation-am-frontend.git + team: marketing + type: frontend + tags: [vue, vue3, marketing] + + - name: automation-am-reward-fronted + github_url: https://github.com/GoHighLevel/automation-am-reward-fronted.git + team: marketing + type: service + tags: [typescript, nestjs, marketing] + + - name: automation-apps-backend + github_url: https://github.com/GoHighLevel/automation-apps-backend.git + team: marketing + type: service + tags: [typescript, nestjs, marketing] + + - name: automation-data-bi-platform + github_url: https://github.com/GoHighLevel/automation-data-bi-platform.git + team: marketing + type: service + tags: [python, marketing] + + - name: automation-eliza-backend + github_url: https://github.com/GoHighLevel/automation-eliza-backend.git + team: marketing + type: service + tags: [typescript, nestjs, marketing] + + - name: automation-eliza-frontend + github_url: https://github.com/GoHighLevel/automation-eliza-frontend.git + team: marketing + type: frontend + tags: [vue, vue3, marketing] + + - name: automation-migration + github_url: https://github.com/GoHighLevel/automation-migration.git + team: marketing + type: tooling + tags: [typescript, marketing] + + - name: automation-next-apps-backend + github_url: https://github.com/GoHighLevel/automation-next-apps-backend.git + team: marketing + type: service + tags: [go, marketing] + + - name: automation-sync-engine + github_url: https://github.com/GoHighLevel/automation-sync-engine.git + team: marketing + type: other + tags: [marketing] + + - name: automation-workflows-ai + github_url: https://github.com/GoHighLevel/automation-workflows-ai.git + team: marketing + type: service + tags: [typescript, nestjs, marketing] + + - name: automation-workflows-ai-pilot + github_url: https://github.com/GoHighLevel/automation-workflows-ai-pilot.git + team: marketing + type: service + tags: [typescript, nestjs, marketing] + + - name: automation-workflows-backend + github_url: https://github.com/GoHighLevel/automation-workflows-backend.git + team: marketing + type: service + tags: [typescript, nestjs, marketing] + + - name: automation-workflows-frontend + github_url: https://github.com/GoHighLevel/automation-workflows-frontend.git + team: marketing + type: frontend + tags: [typescript, marketing] + + - name: automation-workflows-iatf-ai-agent + github_url: https://github.com/GoHighLevel/automation-workflows-iatf-ai-agent.git + team: marketing + type: service + tags: [typescript, nestjs, marketing] + + - name: automation-workflows-iatf-frontend + github_url: https://github.com/GoHighLevel/automation-workflows-iatf-frontend.git + team: marketing + type: frontend + tags: [vue, vue3, marketing] + + - name: automation-workflows-ui-mcp + github_url: https://github.com/GoHighLevel/automation-workflows-ui-mcp.git + team: marketing + type: frontend + tags: [typescript, mcp, marketing] + + - name: automation-workflows-validators + github_url: https://github.com/GoHighLevel/automation-workflows-validators.git + team: marketing + type: service + tags: [typescript, nestjs, marketing] + + - name: Calender_Automation_Assignment_Daksh + github_url: https://github.com/GoHighLevel/Calender_Automation_Assignment_Daksh.git + team: marketing + type: service + tags: [typescript, nestjs, marketing] + + - name: clickup-automation + github_url: https://github.com/GoHighLevel/clickup-automation.git + team: marketing + type: other + tags: [marketing] + + - name: doc-preview + github_url: https://github.com/GoHighLevel/doc-preview.git + team: marketing + type: frontend + tags: [vue, vue3, marketing] + + - name: domain-reputation + github_url: https://github.com/GoHighLevel/domain-reputation.git + team: marketing + type: service + tags: [python, marketing] + + - name: email-preview + github_url: https://github.com/GoHighLevel/email-preview.git + team: marketing + type: frontend + tags: [vue, vue3, marketing] + + - name: ghl-mobileAutomation + github_url: https://github.com/GoHighLevel/ghl-mobileAutomation.git + team: marketing + type: service + tags: [java, marketing] + + - name: ghl-social-media-external + github_url: https://github.com/GoHighLevel/ghl-social-media-external.git + team: marketing + type: service + tags: [typescript, nestjs, marketing] + + - name: ghl-social-media-posting + github_url: https://github.com/GoHighLevel/ghl-social-media-posting.git + team: marketing + type: service + tags: [typescript, nestjs, marketing] + + - name: Gokollab-Native-Automation + github_url: https://github.com/GoHighLevel/Gokollab-Native-Automation.git + team: marketing + type: service + tags: [javascript, nestjs, marketing] + + - name: hiring-live-ai-workflows + github_url: https://github.com/GoHighLevel/hiring-live-ai-workflows.git + team: marketing + type: other + tags: [marketing] + + - name: hl-automation-project-template + github_url: https://github.com/GoHighLevel/hl-automation-project-template.git + team: marketing + type: service + tags: [typescript, nestjs, marketing] + + - name: leadgen-store-preview + github_url: https://github.com/GoHighLevel/leadgen-store-preview.git + team: marketing + type: frontend + tags: [marketing] + + - name: marketplace-app-review-agents + github_url: https://github.com/GoHighLevel/marketplace-app-review-agents.git + team: marketing + type: frontend + tags: [javascript, marketing] + + - name: private-github-workflows + github_url: https://github.com/GoHighLevel/private-github-workflows.git + team: marketing + type: service + tags: [javascript, nestjs, marketing] + + - name: revops-automation + github_url: https://github.com/GoHighLevel/revops-automation.git + team: marketing + type: service + tags: [python, marketing] + + - name: WhiteLabel_Automation + github_url: https://github.com/GoHighLevel/WhiteLabel_Automation.git + team: marketing + type: other + tags: [shell, marketing] + + - name: workflow-importers-IR-model + github_url: https://github.com/GoHighLevel/workflow-importers-IR-model.git + team: marketing + type: service + tags: [typescript, nestjs, marketing] + + - name: workflow-mcp-server + github_url: https://github.com/GoHighLevel/workflow-mcp-server.git + team: marketing + type: service + tags: [javascript, nestjs, mcp, marketing] + + # ──────────────────── PHONE ───────────────────────── + - name: flutter_libphonenumber + github_url: https://github.com/GoHighLevel/flutter_libphonenumber.git + team: phone + type: other + tags: [dart, phone] + + - name: twilio_voice_federated + github_url: https://github.com/GoHighLevel/twilio_voice_federated.git + team: phone + type: service + tags: [kotlin, phone] + + - name: voice-ai-mindcast + github_url: https://github.com/GoHighLevel/voice-ai-mindcast.git + team: phone + type: service + tags: [go, phone] + + # ──────────────────── REPORTING ───────────────────── + - name: data-dbt-analytics + github_url: https://github.com/GoHighLevel/data-dbt-analytics.git + team: reporting + type: other + tags: [reporting] + + - name: data-dbt-data-foundation + github_url: https://github.com/GoHighLevel/data-dbt-data-foundation.git + team: reporting + type: other + tags: [jupyter-notebook, reporting] + + - name: data-dbt-starburst + github_url: https://github.com/GoHighLevel/data-dbt-starburst.git + team: reporting + type: other + tags: [reporting] + + - name: data-platform-core + github_url: https://github.com/GoHighLevel/data-platform-core.git + team: reporting + type: library + tags: [java, reporting] + + - name: ghl-attribution-external-script + github_url: https://github.com/GoHighLevel/ghl-attribution-external-script.git + team: reporting + type: tooling + tags: [typescript, reporting] + + - name: leadgen-reporting-ads-backend + github_url: https://github.com/GoHighLevel/leadgen-reporting-ads-backend.git + team: reporting + type: service + tags: [python, reporting] + + - name: leadgen-reporting-ai + github_url: https://github.com/GoHighLevel/leadgen-reporting-ai.git + team: reporting + type: other + tags: [reporting] + + - name: leadgen-reporting-attribution-backend + github_url: https://github.com/GoHighLevel/leadgen-reporting-attribution-backend.git + team: reporting + type: service + tags: [typescript, nestjs, reporting] + + - name: leadgen-reporting-frontend + github_url: https://github.com/GoHighLevel/leadgen-reporting-frontend.git + team: reporting + type: frontend + tags: [vue, vue3, reporting] + + - name: leadgen-reporting-messages-backend + github_url: https://github.com/GoHighLevel/leadgen-reporting-messages-backend.git + team: reporting + type: service + tags: [typescript, nestjs, reporting] + + - name: marketplace-reporting-scripts + github_url: https://github.com/GoHighLevel/marketplace-reporting-scripts.git + team: reporting + type: tooling + tags: [javascript, reporting] + + # ──────────────────── SAAS ────────────────────────── + - name: AgencyUX + github_url: https://github.com/GoHighLevel/AgencyUX.git + team: saas + type: frontend + tags: [vue, vue3, saas] + + - name: ai-marketplace-tests + github_url: https://github.com/GoHighLevel/ai-marketplace-tests.git + team: saas + type: tests + tags: [typescript, testing, saas] + + - name: ghl-marketplace-app-template + github_url: https://github.com/GoHighLevel/ghl-marketplace-app-template.git + team: saas + type: frontend + tags: [typescript, saas] + + - name: leadgen-marketplace-backend + github_url: https://github.com/GoHighLevel/leadgen-marketplace-backend.git + team: saas + type: service + tags: [typescript, nestjs, saas] + + - name: marketplace-backend + github_url: https://github.com/GoHighLevel/marketplace-backend.git + team: saas + type: service + tags: [typescript, nestjs, saas] + + - name: marketplace-backend-demo + github_url: https://github.com/GoHighLevel/marketplace-backend-demo.git + team: saas + type: service + tags: [typescript, nestjs, saas] + + - name: marketplace-frontend + github_url: https://github.com/GoHighLevel/marketplace-frontend.git + team: saas + type: frontend + tags: [vue, vue3, saas] + + - name: saas-service + github_url: https://github.com/GoHighLevel/saas-service.git + team: saas + type: service + tags: [typescript, nestjs, saas] + + # ──────────────────── INTEGRATIONS ────────────────── + - name: highlevel-zapier + github_url: https://github.com/GoHighLevel/highlevel-zapier.git + team: integrations + type: service + tags: [javascript, nestjs, integrations] + + - name: hr-integration + github_url: https://github.com/GoHighLevel/hr-integration.git + team: integrations + type: frontend + tags: [vue, vue3, integrations] + + - name: integration-core + github_url: https://github.com/GoHighLevel/integration-core.git + team: integrations + type: library + tags: [dockerfile, integrations] + + - name: leadconnector + github_url: https://github.com/GoHighLevel/leadconnector.git + team: integrations + type: service + tags: [typescript, nestjs, integrations] + + - name: leadconnector-plugin-wordpress + github_url: https://github.com/GoHighLevel/leadconnector-plugin-wordpress.git + team: integrations + type: other + tags: [php, integrations] + + - name: oauth-demo + github_url: https://github.com/GoHighLevel/oauth-demo.git + team: integrations + type: tooling + tags: [javascript, integrations] + + # ──────────────────── AI ──────────────────────────── + - name: ai-backend + github_url: https://github.com/GoHighLevel/ai-backend.git + team: ai + type: service + tags: [typescript, nestjs, ai] + + - name: ai-employees-evals + github_url: https://github.com/GoHighLevel/ai-employees-evals.git + team: ai + type: tests + tags: [javascript, testing, ai] + + - name: ai-frontend + github_url: https://github.com/GoHighLevel/ai-frontend.git + team: ai + type: frontend + tags: [vue, vue3, ai] + + - name: ai-partners-frontend + github_url: https://github.com/GoHighLevel/ai-partners-frontend.git + team: ai + type: frontend + tags: [ai] + + - name: ai-supervisor-prototype + github_url: https://github.com/GoHighLevel/ai-supervisor-prototype.git + team: ai + type: tooling + tags: [vue, vue3, ai] + + - name: evaluations-ai-frontend + github_url: https://github.com/GoHighLevel/evaluations-ai-frontend.git + team: ai + type: frontend + tags: [vue, vue3, testing, ai] + + - name: ghl-ai-skills + github_url: https://github.com/GoHighLevel/ghl-ai-skills.git + team: ai + type: other + tags: [shell, ai] + + - name: ghl-ai-test-generator + github_url: https://github.com/GoHighLevel/ghl-ai-test-generator.git + team: ai + type: tests + tags: [javascript, testing, ai] + + - name: ghl-aip + github_url: https://github.com/GoHighLevel/ghl-aip.git + team: ai + type: other + tags: [ai] + + - name: ghl-content-ai + github_url: https://github.com/GoHighLevel/ghl-content-ai.git + team: ai + type: frontend + tags: [vue, vue3, ai] + + - name: ghl-rag-framework + github_url: https://github.com/GoHighLevel/ghl-rag-framework.git + team: ai + type: library + tags: [javascript, ai] + + - name: highlevel-employee-portal + github_url: https://github.com/GoHighLevel/highlevel-employee-portal.git + team: ai + type: frontend + tags: [vue, vue3, ai] + + - name: onboarding-fuzzy-inference + github_url: https://github.com/GoHighLevel/onboarding-fuzzy-inference.git + team: ai + type: service + tags: [typescript, nestjs, ai] + + - name: onboarding-fuzzy-inference-system + github_url: https://github.com/GoHighLevel/onboarding-fuzzy-inference-system.git + team: ai + type: other + tags: [ai] + + - name: platform-ai + github_url: https://github.com/GoHighLevel/platform-ai.git + team: ai + type: service + tags: [python, ai] + + - name: vertical-ai + github_url: https://github.com/GoHighLevel/vertical-ai.git + team: ai + type: service + tags: [typescript, nestjs, ai] + + - name: visibility-ai + github_url: https://github.com/GoHighLevel/visibility-ai.git + team: ai + type: other + tags: [ai] + + - name: zai-demo + github_url: https://github.com/GoHighLevel/zai-demo.git + team: ai + type: tooling + tags: [ai] diff --git a/cloudbuild.ghl.yaml b/cloudbuild.ghl.yaml new file mode 100644 index 00000000..c0666a00 --- /dev/null +++ b/cloudbuild.ghl.yaml @@ -0,0 +1,17 @@ +steps: + - name: 'gcr.io/cloud-builders/docker' + args: + - build + - -f + - Dockerfile.ghl + - -t + - gcr.io/$PROJECT_ID/codebase-memory-mcp-ghl:latest + - . + timeout: 1200s + +images: + - gcr.io/$PROJECT_ID/codebase-memory-mcp-ghl:latest + +options: + machineType: E2_HIGHCPU_32 + logging: CLOUD_LOGGING_ONLY diff --git a/deployments/ghl/helm/Chart.yaml b/deployments/ghl/helm/Chart.yaml new file mode 100644 index 00000000..7f7d1f63 --- /dev/null +++ b/deployments/ghl/helm/Chart.yaml @@ -0,0 +1,17 @@ +apiVersion: v2 +name: codebase-memory-mcp +description: GHL fleet server for codebase-memory-mcp — indexes all 200 GHL repos and exposes them via an HTTP MCP endpoint +type: application +version: 0.1.0 +appVersion: "1.0.0" +keywords: + - mcp + - code-intelligence + - ai + - ghl +home: https://github.com/GoHighLevel/codebase-memory-mcp +sources: + - https://github.com/GoHighLevel/codebase-memory-mcp +maintainers: + - name: platform-infra + email: platform@gohighlevel.com diff --git a/deployments/ghl/helm/templates/_helpers.tpl b/deployments/ghl/helm/templates/_helpers.tpl new file mode 100644 index 00000000..84da1556 --- /dev/null +++ b/deployments/ghl/helm/templates/_helpers.tpl @@ -0,0 +1,67 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "codebase-memory-mcp.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +*/}} +{{- define "codebase-memory-mcp.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart label. +*/}} +{{- define "codebase-memory-mcp.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels. +*/}} +{{- define "codebase-memory-mcp.labels" -}} +helm.sh/chart: {{ include "codebase-memory-mcp.chart" . }} +{{ include "codebase-memory-mcp.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels. +*/}} +{{- define "codebase-memory-mcp.selectorLabels" -}} +app.kubernetes.io/name: {{ include "codebase-memory-mcp.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +ServiceAccount name. +*/}} +{{- define "codebase-memory-mcp.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "codebase-memory-mcp.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Image tag (defaults to appVersion). +*/}} +{{- define "codebase-memory-mcp.imageTag" -}} +{{- .Values.image.tag | default .Chart.AppVersion }} +{{- end }} diff --git a/deployments/ghl/helm/templates/configmap.yaml b/deployments/ghl/helm/templates/configmap.yaml new file mode 100644 index 00000000..7319744a --- /dev/null +++ b/deployments/ghl/helm/templates/configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.reposManifest.configMap.enabled -}} +# Optional: override REPOS.yaml from a ConfigMap instead of baking it into the image. +# Set reposManifest.configMap.enabled=true and supply the full REPOS.yaml content +# in a values override or via --set-file. +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.reposManifest.configMap.name | default (printf "%s-repos" (include "codebase-memory-mcp.fullname" .)) }} + labels: + {{- include "codebase-memory-mcp.labels" . | nindent 4 }} +data: + REPOS.yaml: | + # Populated at deploy time via --set-file or Helm values +{{- end }} diff --git a/deployments/ghl/helm/templates/deployment.yaml b/deployments/ghl/helm/templates/deployment.yaml new file mode 100644 index 00000000..1aaec306 --- /dev/null +++ b/deployments/ghl/helm/templates/deployment.yaml @@ -0,0 +1,120 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "codebase-memory-mcp.fullname" . }} + labels: + {{- include "codebase-memory-mcp.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + # StatefulSet-like: only 1 replica writing to the PVC; Recreate avoids two pods fighting over the volume + strategy: + type: Recreate + selector: + matchLabels: + {{- include "codebase-memory-mcp.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + # Restart pods when the ConfigMap changes + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + labels: + {{- include "codebase-memory-mcp.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "codebase-memory-mcp.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: fleet + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ include "codebase-memory-mcp.imageTag" . }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: 8080 + protocol: TCP + env: + {{- range $key, $value := .Values.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + # Secrets from GCP Secret Manager + - name: BEARER_TOKEN + valueFrom: + secretKeyRef: + name: {{ .Values.secrets.bearerToken.secretName }} + key: {{ .Values.secrets.bearerToken.key }} + optional: true + - name: GITHUB_WEBHOOK_SECRET + valueFrom: + secretKeyRef: + name: {{ .Values.secrets.webhookSecret.secretName }} + key: {{ .Values.secrets.webhookSecret.key }} + optional: true + - name: GITHUB_TOKEN + valueFrom: + secretKeyRef: + name: {{ .Values.secrets.githubToken.secretName }} + key: {{ .Values.secrets.githubToken.key }} + optional: true + {{- if .Values.reposManifest.configMap.enabled }} + - name: REPOS_MANIFEST + value: /config/REPOS.yaml + {{- end }} + volumeMounts: + - name: fleet-cache + mountPath: {{ .Values.persistence.mountPath }} + {{- if .Values.reposManifest.configMap.enabled }} + - name: repos-manifest + mountPath: /config + readOnly: true + {{- end }} + {{- if .Values.githubDeployKey.enabled }} + - name: github-deploy-key + mountPath: /root/.ssh + readOnly: true + {{- end }} + livenessProbe: + {{- toYaml .Values.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumes: + - name: fleet-cache + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ include "codebase-memory-mcp.fullname" . }}-cache + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.reposManifest.configMap.enabled }} + - name: repos-manifest + configMap: + name: {{ .Values.reposManifest.configMap.name | default (printf "%s-repos" (include "codebase-memory-mcp.fullname" .)) }} + {{- end }} + {{- if .Values.githubDeployKey.enabled }} + - name: github-deploy-key + secret: + secretName: {{ .Values.githubDeployKey.secretName }} + defaultMode: 0400 + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/deployments/ghl/helm/templates/pvc.yaml b/deployments/ghl/helm/templates/pvc.yaml new file mode 100644 index 00000000..03bee522 --- /dev/null +++ b/deployments/ghl/helm/templates/pvc.yaml @@ -0,0 +1,20 @@ +{{- if .Values.persistence.enabled -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "codebase-memory-mcp.fullname" . }}-cache + labels: + {{- include "codebase-memory-mcp.labels" . | nindent 4 }} + annotations: + # Retain the PVC even if the Helm release is deleted — the index is expensive to rebuild + helm.sh/resource-policy: keep +spec: + accessModes: + - {{ .Values.persistence.accessMode }} + {{- if .Values.persistence.storageClass }} + storageClassName: {{ .Values.persistence.storageClass }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size }} +{{- end }} diff --git a/deployments/ghl/helm/templates/service.yaml b/deployments/ghl/helm/templates/service.yaml new file mode 100644 index 00000000..54e7af33 --- /dev/null +++ b/deployments/ghl/helm/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "codebase-memory-mcp.fullname" . }} + labels: + {{- include "codebase-memory-mcp.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "codebase-memory-mcp.selectorLabels" . | nindent 4 }} diff --git a/deployments/ghl/helm/templates/serviceaccount.yaml b/deployments/ghl/helm/templates/serviceaccount.yaml new file mode 100644 index 00000000..868983a2 --- /dev/null +++ b/deployments/ghl/helm/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "codebase-memory-mcp.serviceAccountName" . }} + labels: + {{- include "codebase-memory-mcp.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/deployments/ghl/helm/templates/virtualservice.yaml b/deployments/ghl/helm/templates/virtualservice.yaml new file mode 100644 index 00000000..3ebc6015 --- /dev/null +++ b/deployments/ghl/helm/templates/virtualservice.yaml @@ -0,0 +1,29 @@ +{{- if .Values.virtualService.enabled -}} +apiVersion: networking.istio.io/v1beta1 +kind: VirtualService +metadata: + name: {{ include "codebase-memory-mcp.fullname" . }} + labels: + {{- include "codebase-memory-mcp.labels" . | nindent 4 }} +spec: + hosts: + - {{ .Values.virtualService.host }} + {{- if .Values.virtualService.gateway }} + gateways: + - {{ .Values.virtualService.gateway }} + {{- end }} + http: + - match: + - uri: + prefix: / + route: + - destination: + host: {{ include "codebase-memory-mcp.fullname" . }} + port: + number: {{ .Values.service.port }} + timeout: 300s # fleet indexing can take a while + retries: + attempts: 3 + perTryTimeout: 10s + retryOn: connect-failure,refused-stream,unavailable,retriable-4xx +{{- end }} diff --git a/deployments/ghl/helm/values-staging.yaml b/deployments/ghl/helm/values-staging.yaml new file mode 100644 index 00000000..3e7aec4f --- /dev/null +++ b/deployments/ghl/helm/values-staging.yaml @@ -0,0 +1,12 @@ +# values-staging.yaml — staging overrides +image: + tag: "latest" + +env: + FLEET_CONCURRENCY: "8" + INDEXER_CLIENTS: "8" + GITHUB_AUTH_ENABLED: "true" + GITHUB_ALLOWED_ORGS: "GoHighLevel" + +persistence: + size: "20Gi" diff --git a/deployments/ghl/helm/values.yaml b/deployments/ghl/helm/values.yaml new file mode 100644 index 00000000..893f6077 --- /dev/null +++ b/deployments/ghl/helm/values.yaml @@ -0,0 +1,121 @@ +# values.yaml — codebase-memory-mcp GHL fleet +# Override these in values-staging.yaml / values-production.yaml + +replicaCount: 1 + +image: + repository: gcr.io/highlevel-common-layer/codebase-memory-mcp-ghl + pullPolicy: IfNotPresent + tag: "" # defaults to .Chart.AppVersion + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + create: true + annotations: {} + name: "" + +podAnnotations: {} + +podSecurityContext: + fsGroup: 65532 # nonroot + +securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: false # SQLite writes to /data + runAsNonRoot: true + runAsUser: 65532 + capabilities: + drop: + - ALL + +service: + type: ClusterIP + port: 8080 + +# Expose via Istio VirtualService (GHL standard) +virtualService: + enabled: true + host: "codebase-memory-mcp.internal.svc.cluster.local" + gateway: "" # uses mesh by default + +ingress: + enabled: false + +resources: + limits: + cpu: "2" + memory: "4Gi" + requests: + cpu: "500m" + memory: "1Gi" + +autoscaling: + enabled: false # fleet server is stateful (PVC); don't autoscale by default + +# Persistent volume for SQLite fleet cache (~200 repos) +persistence: + enabled: true + storageClass: "standard-rwo" + size: "50Gi" + accessMode: ReadWriteOnce + mountPath: /data/fleet-cache + +# Environment — secrets injected from GCP Secret Manager via GHL secret-manager pattern +env: + PORT: "8080" + FLEET_CONCURRENCY: "8" + INDEXER_CLIENTS: "8" + CRON_INCREMENTAL: "0 */6 * * *" + CRON_FULL: "0 2 * * 0" + CBM_CACHE_DIR: "/tmp/codebase-memory-mcp" + FLEET_CACHE_DIR: "/data/fleet-cache" + REPOS_MANIFEST: "/app/REPOS.local.yaml" + +# Secrets — reference GCP Secret Manager secrets +# These are injected as env vars at runtime +secrets: + bearerToken: + secretName: "codebase-memory-mcp-bearer-token" + key: "token" + webhookSecret: + secretName: "codebase-memory-mcp-webhook-secret" + key: "secret" + githubToken: + secretName: "codebase-memory-mcp-github-token" + key: "token" + +# Optional: override REPOS.yaml via ConfigMap instead of baked image +reposManifest: + configMap: + enabled: false + name: "" + +livenessProbe: + httpGet: + path: /health + port: 8080 + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 5 + failureThreshold: 3 + +readinessProbe: + httpGet: + path: /health + port: 8080 + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + +nodeSelector: {} +tolerations: [] +affinity: {} + +# GitHub deploy key for private repo cloning +githubDeployKey: + enabled: false + secretName: "github-deploy-key" # SSH private key diff --git a/docs/CBM_VS_PROJECT_ORION_COMPARISON.md b/docs/CBM_VS_PROJECT_ORION_COMPARISON.md new file mode 100644 index 00000000..26c871f1 --- /dev/null +++ b/docs/CBM_VS_PROJECT_ORION_COMPARISON.md @@ -0,0 +1,325 @@ +# Codebase Memory MCP vs Project Orion + +_Prepared on April 15, 2026_ + +## Executive Summary + +This is an end-to-end implementation comparison between: + +- **Codebase Memory MCP (CBM)**: the indexing and graph-analysis engine in this repository +- **Project Orion**: the Python-based multi-repo retrieval, MCP, and LLM analysis service in `~/Documents/highlevel/project-orion` + +These systems solve related problems, but they are **not equivalent architectures**. + +- **CBM is stronger as a code intelligence engine.** + It has the better indexing core, richer graph model, native impact-analysis surface, stronger storage discipline, and much broader test coverage. +- **Project Orion is stronger as a developer-facing MCP application.** + It has the cleaner native HTTP MCP serving layer, easier local-workspace onboarding, and a more explicit retrieval-plus-LLM answer flow. +- **Neither deployment is truly multi-pod ready today.** + Both are currently implemented and configured as effectively single-writer systems. + +The correct non-biased conclusion is: + +- If the goal is **deep structural code intelligence at scale**, CBM is the stronger foundation. +- If the goal is **fast local developer enablement and a simple MCP-hosted UX**, Orion is ahead on the serving/control-plane side. + +--- + +## What Each System Really Is + +| System | What it fundamentally is | Primary implementation style | Core value | +|---|---|---|---| +| **CBM** | A graph-native code indexing engine with an MCP tool surface | C engine + Go fleet wrapper + HTTP bridge | Deep code structure, tracing, impact analysis, semantic relationships | +| **Project Orion** | A multi-repo code retrieval and LLM-analysis service with MCP + REST | Python FastAPI + FastMCP + ChromaDB/BM25 | Developer-friendly repo discovery, search, summarization, and answer generation | + +### CBM key implementation anchors + +- Fleet/server wrapper: `ghl/cmd/server/main.go` +- MCP subprocess client: `ghl/internal/mcp/client.go` +- Fleet indexing orchestration: `ghl/internal/indexer/indexer.go` +- HTTP bridge: `ghl/internal/bridge/bridge.go` +- Core indexing pipeline: `src/pipeline/pipeline.c` +- Parallel extraction pipeline: `src/pipeline/pass_parallel.c` +- MCP tool definitions and store resolution: `src/mcp/mcp.c` +- SQLite tuning and dump safety: `src/store/store.c` + +### Project Orion key implementation anchors + +- FastMCP server: `orion/mcp_server.py` +- FastAPI app: `orion/api/main.py` +- Workspace services: `orion/app_services.py` +- Retrieval pipeline: `orion/search/retriever.py` +- Context expansion: `orion/search/context_expander.py` +- LLM analysis engine: `orion/engine/query_engine.py` +- Index storage pipeline: `orion/indexer/store.py` +- Parser/scanner/embedder: `orion/indexer/parser.py`, `orion/indexer/scanner.py`, `orion/indexer/embedder.py` + +--- + +## End-to-End Architecture Comparison + +| Dimension | Codebase Memory MCP | Project Orion | What is better right now | +|---|---|---|---| +| **Core architecture** | Multi-pass graph indexing engine with project DBs | Retrieval-oriented local repo indexing service | **CBM** | +| **Primary data model** | Nodes, edges, graph schema, semantic edges, structural relationships | Chunk embeddings + BM25 + lightweight import/call graph | **CBM** | +| **Serving model** | HTTP bridge over a single stdio MCP subprocess | Native FastMCP over Streamable HTTP | **Orion** | +| **Repo onboarding** | Manifest-driven fleet indexing, webhooks, manual re-index endpoints | Local path indexing and Git repo discovery | **Orion** for local dev | +| **Index persistence** | Per-project SQLite DB files with query-only reopen and integrity checks | ChromaDB local persistence + pickle BM25 + JSON graph/meta | **CBM** | +| **Natural-language answer flow** | Tool-driven; analysis comes from graph tools and downstream client behavior | Explicit hybrid search -> rerank -> expand -> LLM answer pipeline | **Orion** | +| **Impact analysis surface** | Native via graph tools like `trace_path`, `detect_changes`, `query_graph` | Indirect via retrieved chunks + LLM synthesis | **CBM** | +| **Durability discipline** | WAL, integrity checks, atomic dump flow, explicit query-only open | Local files, limited safety model, simpler but weaker persistence story | **CBM** | +| **Operational simplicity** | More moving parts | Simpler runtime shape | **Orion** | +| **Scaling readiness** | Strong engine, weaker orchestration layer | Simpler service, weaker indexing/storage model | **Split** | + +--- + +## Indexing Pipeline: One-to-One Comparison + +### High-level flow + +| Step | Codebase Memory MCP | Project Orion | Better implementation | +|---|---|---|---| +| 1. Repo input | Clone/update repo from manifest into cache dir | Discover local Git repos or accept explicit repo path | Depends on use case | +| 2. File discovery | Structured discover pass in C pipeline | `scan_repo()` walks repo and filters files | **CBM** | +| 3. Parse/extract | Parallel extract/resolve workers | Sequential parser loop per file batch | **CBM** | +| 4. Intermediate model | In-memory graph buffer + registry | Batch chunk list + BM25 record list + graph record list | **CBM** | +| 5. Semantic layer | Native semantic edge generation and graph enrichment | Vector search index built from chunks; no graph-native semantic edge layer | **CBM** | +| 6. Storage output | Single project SQLite DB with graph + indexes | Chroma collection + BM25 pickle + graph JSON + meta JSON | **CBM** | +| 7. Re-index behavior | Supports incremental mode in engine | Deletes collection and rebuilds from scratch | **CBM** | + +### Why CBM's indexer is technically stronger + +| Capability | CBM | Orion | Gap | +|---|---|---|---| +| Parallel parse/extract | Yes | No | Major CBM advantage | +| Incremental indexing | Yes | No | Major CBM advantage | +| Rich structural graph | Yes | Partial | Major CBM advantage | +| Single-source storage artifact | Mostly yes, per project DB | No, split across multiple file types | CBM advantage | +| Built-in semantic graph layer | Yes | No, relies on retrieval embeddings instead | CBM advantage | +| Query-time graph-native impact tracing | Yes | No | CBM advantage | + +### Why Orion still feels good for some workflows + +| Capability | CBM | Orion | Gap | +|---|---|---|---| +| Index arbitrary local repo path quickly | Not the primary UX | Yes | Orion advantage | +| Discover repos in a workspace automatically | Not the primary UX | Yes | Orion advantage | +| Explain code with explicit retrieval pipeline | Indirect | Yes | Orion advantage | +| Surface NL-friendly telemetry from search/rerank/LLM | Limited at bridge level | Yes | Orion advantage | + +--- + +## Retrieval and Querying: One-to-One Comparison + +| Dimension | Codebase Memory MCP | Project Orion | Better implementation | +|---|---|---|---| +| **Primary query primitive** | Graph and tool calls | Hybrid retrieval + LLM synthesis | Depends on task | +| **Best for "find exact structural impact"** | Excellent | Weaker | **CBM** | +| **Best for "answer my question in natural language"** | Requires tool orchestration | Native design | **Orion** | +| **Best for "where should I make the change?"** | Strong because of graph tracing and change impact | Good when retrieval finds the right chunks | **CBM** | +| **Best for "give me context quickly"** | Good if indexed repo is healthy and query tools are used correctly | Very good due to rerank/expand flow | Slight **Orion** advantage | + +### Query strategy comparison + +| Query layer | Codebase Memory MCP | Project Orion | +|---|---|---| +| Full-text search | Native `search_graph` / `search_code` with structural ranking | BM25 over chunk tokens | +| Symbol search | Graph-native identifiers and qualified names | Symbol extraction + metadata heuristics | +| Semantic search | Engine-level semantic embeddings and semantic edges | Embedding similarity plus HyDE | +| Multi-hop analysis | Native graph traversal | BFS expansion over stored import/call graph | +| LLM answer generation | External/client-side orchestration pattern | First-class in the engine | + +### What CBM does better on analysis quality + +- It operates on a stronger representation of the codebase. +- It can answer structural questions without forcing everything through an LLM. +- It has native tools for graph schema, architecture, path tracing, and change detection. + +### What Orion does better on analysis UX + +- It makes the retrieval pipeline explicit and inspectable. +- It combines vector search, BM25, HyDE, symbol search, reranking, and context expansion in a clean path. +- It is easier to understand why an answer was produced. + +--- + +## MCP and API Serving Comparison + +| Dimension | Codebase Memory MCP | Project Orion | Better implementation | +|---|---|---|---| +| **MCP server type** | HTTP bridge to stdio subprocess | Native FastMCP HTTP server | **Orion** | +| **Transport shape** | Bridge layer converts HTTP JSON-RPC into subprocess calls | Streamable HTTP MCP directly | **Orion** | +| **Concurrency model** | Bridge serializes through a single subprocess client | Native server process, simpler runtime path | **Orion** | +| **Auth model** | Bearer token at bridge layer | Bearer token middleware + transport security | Slight **Orion** advantage | +| **Operational complexity** | Higher | Lower | **Orion** | + +### Important implementation truth + +CBM's main serving weakness is **not** the engine. It is the wrapper design: + +- `ghl/internal/mcp/client.go` serializes all requests behind one mutex. +- `ghl/internal/bridge/bridge.go` is still a bridge pattern, not a fully direct engine-native HTTP service. + +By contrast, Orion's MCP surface is conceptually cleaner: + +- `FastMCP` +- `streamable_http_path="/"` +- explicit transport security settings + +So on MCP hosting quality alone, Orion is ahead. + +--- + +## Storage, Durability, and Reliability Comparison + +| Dimension | Codebase Memory MCP | Project Orion | Better implementation | +|---|---|---|---| +| **Storage unit** | One DB per indexed project | Multiple local artifacts per repo | **CBM** | +| **Integrity checks** | Yes | Minimal | **CBM** | +| **Crash safety** | Stronger | Weaker | **CBM** | +| **Read-only query open** | Yes | No equivalent discipline | **CBM** | +| **Re-index safety** | Better in engine design | Rebuild-oriented | **CBM** | + +### Reliability observations + +| Concern | Codebase Memory MCP | Project Orion | +|---|---|---| +| Corrupt store detection | Explicitly checks integrity before use | No equivalent strong guard observed | +| Project existence validation | Explicitly validates project exists in DB | Uses metadata + collection lookup | +| Atomic persistence story | Stronger | Weaker | +| Live deployment reliability | Currently reduced by wrapper/deployment issues | Simpler single-node app, but not platform-grade durable | + +### Important non-biased caveat + +CBM's **implementation** is stronger than its **current deployment behavior**. + +In practice today: + +- the CBM engine is strong +- the current fleet wrapper and deployment choices are the main reliability bottleneck + +That distinction matters. The weakness is mostly in orchestration, cache-pathing, and wrapper behavior, not in the engine design itself. + +--- + +## Scaling and Multi-Pod Readiness + +| Dimension | Codebase Memory MCP | Project Orion | Better implementation | +|---|---|---|---| +| **Current replica strategy** | Single replica, `Recreate`, `ReadWriteOnce` PVC | Single replica, `Recreate`, `emptyDir` | Neither | +| **Multi-writer safety today** | No | No | Neither | +| **Reader/writer split potential** | High | Moderate | **CBM** | +| **Current shared-state design** | Better engine foundation, but wrapper is not horizontally safe | Explicitly local-only | **CBM**, but still not ready | + +### Direct comparison + +| Scaling question | Codebase Memory MCP | Project Orion | +|---|---|---| +| Can it safely run multi-pod as deployed now? | No | No | +| Can it evolve into 1 writer + N readers? | Yes, with the right topology | Harder, because storage and state model need larger changes | +| Is the current deployment intentionally single-writer? | Yes | Yes | + +### Bottom line on scale + +- CBM has the better **path to scale** +- Orion has the simpler **single-node path** +- neither is a genuine multi-pod, shared-state, horizontally safe service today + +--- + +## Test and Validation Surface + +| Dimension | Codebase Memory MCP | Project Orion | Better implementation | +|---|---|---|---| +| **Breadth of tests** | Broad C + Go test coverage across engine, store, MCP, incremental indexing, parallelism | Minimal API/discovery tests | **CBM** | +| **Depth of engine validation** | High | Low | **CBM** | +| **MCP/server validation** | Present | Present but smaller | **CBM** overall | + +### Practical meaning + +This is one of the clearest objective gaps in the codebases. + +- CBM looks like a system that has been tested as an engine. +- Orion looks like a system that has been proven enough to demo and iterate, but not hardened to the same degree. + +--- + +## What Is Working Well in Codebase Memory MCP + +| Area | What is working well | Why it matters | +|---|---|---| +| Indexing engine | Parallel, graph-native, structurally rich | Better throughput and better analysis primitives | +| Change impact tooling | Native tracing and change-detection tools | Better for real engineering workflows | +| Persistence model | SQLite per project with integrity/dump discipline | Better reliability and easier query correctness guarantees | +| Semantic layer | Built into the engine | More useful structural-semantic analysis | +| Test coverage | Broad and deep | Higher confidence in correctness | + +--- + +## What Is Working Well in Project Orion + +| Area | What is working well | Why it matters | +|---|---|---| +| MCP serving | Native FastMCP streamable HTTP | Cleaner client experience | +| Local repo UX | Easy discovery and path-based indexing | Faster developer adoption | +| Retrieval flow | Hybrid search + rerank + context expansion | Better natural-language answer pipeline | +| Simplicity | Fewer architectural layers | Easier to reason about and debug | +| Developer-facing telemetry | Exposes retrieval and LLM stages clearly | Better explainability for analysis results | + +--- + +## Real Gaps: One-to-One + +| Gap | CBM status | Orion status | Who is ahead | +|---|---|---|---| +| Graph-native code intelligence | Strong | Partial | **CBM** | +| Hosted MCP quality | Good enough after bridge fixes, but still bridge-based | Cleaner native implementation | **Orion** | +| Incremental indexing | Present | Missing | **CBM** | +| Natural-language answer pipeline | External/client-oriented | First-class | **Orion** | +| Large-scale index economics | Better foundation | Poor today | **CBM** | +| Local developer usability | Weaker | Stronger | **Orion** | +| Durability discipline | Stronger | Weaker | **CBM** | +| Test maturity | Stronger | Weaker | **CBM** | + +--- + +## Final Recommendation + +### If the team must choose a technical foundation + +Choose **Codebase Memory MCP** as the foundation for long-term code intelligence. + +Reason: + +- better engine +- better graph model +- better impact-analysis tools +- better storage discipline +- better test surface +- better path to serious scale + +### If the team must choose a short-term developer experience winner + +Choose **Project Orion's serving model and UX patterns**. + +Reason: + +- simpler HTTP MCP surface +- easier local repo onboarding +- stronger natural-language retrieval pipeline +- easier to operate as a straightforward service + +### Best combined direction + +The strongest combined architecture is: + +1. **Keep CBM as the indexer and graph engine** +2. **Borrow Orion's cleaner server/retrieval UX ideas** +3. **Do not replace CBM's engine with Orion's current indexer** +4. **Do not treat Orion as multi-pod or large-scale ready without major rework** + +--- + +## Bottom Line in One Sentence + +**Codebase Memory MCP is the stronger technical engine; Project Orion is the cleaner developer-facing service; the best platform direction is to keep CBM's core and adopt Orion's best UX and transport ideas.** diff --git a/ghl/cmd/genlocalmanifest/main.go b/ghl/cmd/genlocalmanifest/main.go new file mode 100644 index 00000000..2152fe4f --- /dev/null +++ b/ghl/cmd/genlocalmanifest/main.go @@ -0,0 +1,137 @@ +package main + +import ( + "flag" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/GoHighLevel/codebase-memory-mcp/ghl/internal/manifest" + "gopkg.in/yaml.v3" +) + +func main() { + repoRoot := mustFindRepoRoot() + defaultWorkspace := filepath.Dir(repoRoot) + + workspaceRoot := flag.String("workspace-root", defaultWorkspace, "Workspace root containing local Git repos") + inputPath := flag.String("input", filepath.Join(repoRoot, "REPOS.yaml"), "Source manifest path") + outputPath := flag.String("output", filepath.Join(repoRoot, "REPOS.local.yaml"), "Generated local manifest path") + flag.Parse() + + m, err := manifest.Load(*inputPath) + if err != nil { + exitf("load manifest: %v", err) + } + + localRemotes, localDirs, err := scanWorkspace(*workspaceRoot) + if err != nil { + exitf("scan workspace: %v", err) + } + + filtered := manifest.Manifest{Repos: make([]manifest.Repo, 0, len(m.Repos))} + for _, repo := range m.Repos { + if localRemotes[canonicalGitHubURL(repo.GitHubURL)] || localDirs[repo.Name] { + filtered.Repos = append(filtered.Repos, repo) + } + } + + if err := writeManifest(*outputPath, *workspaceRoot, *inputPath, filtered); err != nil { + exitf("write manifest: %v", err) + } + + fmt.Printf("generated %s with %d repos (from %d total)\n", *outputPath, len(filtered.Repos), len(m.Repos)) +} + +func mustFindRepoRoot() string { + wd, err := os.Getwd() + if err != nil { + exitf("getwd: %v", err) + } + current := wd + for { + if _, err := os.Stat(filepath.Join(current, "REPOS.yaml")); err == nil { + return current + } + parent := filepath.Dir(current) + if parent == current { + exitf("could not locate repo root from %s", wd) + } + current = parent + } +} + +func scanWorkspace(workspaceRoot string) (map[string]bool, map[string]bool, error) { + entries, err := os.ReadDir(workspaceRoot) + if err != nil { + return nil, nil, err + } + + remotes := make(map[string]bool, len(entries)) + dirs := make(map[string]bool, len(entries)) + for _, entry := range entries { + if !entry.IsDir() { + continue + } + repoDir := filepath.Join(workspaceRoot, entry.Name()) + if _, err := os.Stat(filepath.Join(repoDir, ".git")); err != nil { + continue + } + dirs[entry.Name()] = true + remote, err := gitRemote(repoDir) + if err != nil { + continue + } + remotes[canonicalGitHubURL(remote)] = true + } + return remotes, dirs, nil +} + +func gitRemote(repoDir string) (string, error) { + cmd := exec.Command("git", "-C", repoDir, "remote", "get-url", "origin") + out, err := cmd.Output() + if err != nil { + return "", err + } + return strings.TrimSpace(string(out)), nil +} + +func canonicalGitHubURL(raw string) string { + url := strings.TrimSpace(raw) + switch { + case strings.HasPrefix(url, "git@github.com:"): + url = "https://github.com/" + strings.TrimPrefix(url, "git@github.com:") + case strings.HasPrefix(url, "ssh://git@github.com/"): + url = "https://github.com/" + strings.TrimPrefix(url, "ssh://git@github.com/") + } + url = strings.TrimSuffix(url, ".git") + url = strings.TrimRight(url, "/") + return strings.ToLower(url) +} + +func writeManifest(outputPath, workspaceRoot, inputPath string, m manifest.Manifest) error { + data, err := yaml.Marshal(m) + if err != nil { + return err + } + + header := []string{ + "# REPOS.local.yaml — generated local fleet manifest", + fmt.Sprintf("# workspace_root: %s", workspaceRoot), + fmt.Sprintf("# source_manifest: %s", inputPath), + "# Regenerate from ./ghl with: go run ./cmd/genlocalmanifest", + "", + } + + if err := os.MkdirAll(filepath.Dir(outputPath), 0750); err != nil { + return err + } + return os.WriteFile(outputPath, []byte(strings.Join(header, "\n")+string(data)), 0644) +} + +func exitf(format string, args ...interface{}) { + fmt.Fprintf(os.Stderr, format+"\n", args...) + os.Exit(1) +} diff --git a/ghl/cmd/server/main.go b/ghl/cmd/server/main.go new file mode 100644 index 00000000..a07053e4 --- /dev/null +++ b/ghl/cmd/server/main.go @@ -0,0 +1,1289 @@ +// ghl-fleet — GHL additions to codebase-memory-mcp. +// +// Runs three services in one process: +// - HTTP bridge: exposes the codebase-memory-mcp binary as an HTTP MCP endpoint +// - Fleet indexer: clones + indexes all 200 GHL repos on a schedule +// - Webhook handler: triggers re-index on GitHub push events +package main + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "log/slog" + "net/http" + "os" + "os/exec" + "os/signal" + "path/filepath" + "runtime" + "strings" + "sync" + "sync/atomic" + "syscall" + "time" + + "github.com/go-chi/chi/v5" + "github.com/go-chi/chi/v5/middleware" + "github.com/robfig/cron/v3" + + ghlauth "github.com/GoHighLevel/codebase-memory-mcp/ghl/internal/auth" + "github.com/GoHighLevel/codebase-memory-mcp/ghl/internal/bridge" + "github.com/GoHighLevel/codebase-memory-mcp/ghl/internal/cachepersist" + "github.com/GoHighLevel/codebase-memory-mcp/ghl/internal/discovery" + "github.com/GoHighLevel/codebase-memory-mcp/ghl/internal/indexer" + "github.com/GoHighLevel/codebase-memory-mcp/ghl/internal/manifest" + "github.com/GoHighLevel/codebase-memory-mcp/ghl/internal/mcp" + "github.com/GoHighLevel/codebase-memory-mcp/ghl/internal/webhook" +) + +var supportedProtocolVersions = []string{ + "2025-11-25", + "2025-06-18", + "2025-03-26", + "2024-11-05", +} + +func main() { + logger := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelInfo})) + slog.SetDefault(logger) + + cfg := loadConfig() + ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) + defer stop() + + if err := os.MkdirAll(cfg.CloneCacheDir, 0o750); err != nil { + slog.Error("failed to create clone cache dir", "path", cfg.CloneCacheDir, "err", err) + os.Exit(1) + } + if err := os.MkdirAll(cfg.CBMCacheDir, 0o750); err != nil { + slog.Error("failed to create cbm cache dir", "path", cfg.CBMCacheDir, "err", err) + os.Exit(1) + } + + var artifactSync *cachepersist.Syncer + if cfg.ArtifactsEnabled { + var err error + switch strings.ToLower(strings.TrimSpace(cfg.ArtifactsBackend)) { + case "gcs": + artifactSync, err = cachepersist.NewGCS(ctx, cfg.CBMCacheDir, cfg.ArtifactsBucket, cfg.ArtifactsPrefix) + default: + artifactSync, err = cachepersist.New(cfg.CBMCacheDir, cfg.ArtifactDir) + } + if err != nil { + slog.Error("failed to initialize artifact sync", "runtime_dir", cfg.CBMCacheDir, "artifact_dir", cfg.ArtifactDir, "err", err) + os.Exit(1) + } + defer func() { + if err := artifactSync.Close(); err != nil { + slog.Warn("failed to close artifact sync", "err", err) + } + }() + if cfg.ArtifactsSkipHydrate { + slog.Info("skipping persisted index hydrate", "artifact_dir", cfg.ArtifactDir, "cache_dir", cfg.CBMCacheDir) + } else { + hydrated, err := artifactSync.Hydrate() + if err != nil { + slog.Error("failed to hydrate persisted indexes", "artifact_dir", cfg.ArtifactDir, "cache_dir", cfg.CBMCacheDir, "err", err) + os.Exit(1) + } + slog.Info("hydrated persisted indexes", "count", hydrated, "artifact_dir", cfg.ArtifactDir, "cache_dir", cfg.CBMCacheDir) + } + } + + // ── Load fleet manifest ────────────────────────────────── + + m, err := manifest.Load(cfg.ReposManifest) + if err != nil { + slog.Error("failed to load repos manifest", "path", cfg.ReposManifest, "err", err) + os.Exit(1) + } + slog.Info("fleet manifest loaded", "repos", len(m.Repos)) + + cloner := &gitCloner{ + logger: logger, + githubToken: cfg.GitHubToken, + } + + newFleetIndexer := func(client indexer.Client, discoverySvc *discovery.Discoverer) *indexer.Indexer { + return indexer.New(indexer.Config{ + Client: client, + Cloner: cloner, + CacheDir: cfg.CloneCacheDir, + Concurrency: cfg.Concurrency, + OnRepoStart: func(slug string) { slog.Info("indexing repo", "repo", slug) }, + OnRepoDone: func(slug string, err error) { + if err != nil { + slog.Error("repo indexing failed", "repo", slug, "err", err) + return + } + if artifactSync != nil { + projectName := projectNameFromPath(filepath.Join(cfg.CloneCacheDir, slug)) + persisted, persistErr := artifactSync.PersistProject(projectName) + if persistErr != nil { + slog.Error("failed to persist project index", "repo", slug, "project", projectName, "err", persistErr) + } else { + slog.Info("persisted project index", "repo", slug, "project", projectName, "files", persisted) + } + } + if discoverySvc != nil { + discoverySvc.Invalidate() + } + slog.Info("repo indexed", "repo", slug) + }, + }) + } + + if cfg.RunMode == "index-all" { + indexPool, err := newMCPIndexClientPool(ctx, cfg.BinaryPath, cfg.IndexerClients, cfg.IndexerClientMaxUses) + if err != nil { + slog.Error("failed to start indexer client pool", "clients", cfg.IndexerClients, "err", err) + os.Exit(1) + } + defer indexPool.Close() + slog.Info("indexer client pool started", "clients", cfg.IndexerClients, "max_uses", cfg.IndexerClientMaxUses) + + idx := newFleetIndexer(indexPool, nil) + slog.Info("running one-shot fleet indexing job", "force", cfg.RunForce) + result := idx.IndexAll(context.Background(), m.Repos, cfg.RunForce) + slog.Info("one-shot fleet indexing complete", "total", result.Total, "ok", result.Succeeded, "failed", result.Failed) + if result.Failed > 0 { + os.Exit(1) + } + return + } + + // ── Start MCP binary clients ───────────────────────────── + + bridgePool, err := newMCPBridgeClientPool(ctx, cfg.BinaryPath, cfg.BridgeClients, cfg.BridgeAcquireTimeout) + if err != nil { + slog.Error("failed to start bridge client pool", "binary", cfg.BinaryPath, "clients", cfg.BridgeClients, "err", err) + os.Exit(1) + } + defer bridgePool.Close() + slog.Info( + "bridge client pool started", + "name", bridgePool.ServerInfo().Name, + "version", bridgePool.ServerInfo().Version, + "clients", cfg.BridgeClients, + "acquire_timeout_ms", cfg.BridgeAcquireTimeout.Milliseconds(), + ) + + indexPool, err := newMCPIndexClientPool(ctx, cfg.BinaryPath, cfg.IndexerClients, cfg.IndexerClientMaxUses) + if err != nil { + slog.Error("failed to start indexer client pool", "clients", cfg.IndexerClients, "err", err) + os.Exit(1) + } + defer indexPool.Close() + slog.Info("indexer client pool started", "clients", cfg.IndexerClients, "max_uses", cfg.IndexerClientMaxUses) + + discoveryPool, err := newMCPDiscoveryClientPool(ctx, cfg.BinaryPath, cfg.DiscoveryClients) + if err != nil { + slog.Error("failed to start discovery client pool", "clients", cfg.DiscoveryClients, "err", err) + os.Exit(1) + } + defer discoveryPool.Close() + slog.Info("discovery client pool started", "clients", cfg.DiscoveryClients) + + var requestAuthenticator bridge.Authenticator + if cfg.GitHubAuthEnabled { + requestAuthenticator = ghlauth.NewGitHubAuthenticator(ghlauth.GitHubConfig{ + BaseURL: cfg.GitHubAPIBaseURL, + AllowedOrgs: cfg.GitHubAllowedOrgs, + CacheTTL: cfg.GitHubAuthCacheTTL, + }) + slog.Info("github bearer auth enabled", "allowed_orgs", cfg.GitHubAllowedOrgs) + } + + // ── Build indexer ──────────────────────────────────────── + + var discoverySvc *discovery.Discoverer + maxGraphCandidates := 3 + if cfg.DiscoveryMaxCandidates > 0 && cfg.DiscoveryMaxCandidates < maxGraphCandidates { + maxGraphCandidates = cfg.DiscoveryMaxCandidates + } + discoverySvc = discovery.NewService(discoveryPool, *m, discovery.Options{ + MaxBM25Candidates: cfg.DiscoveryMaxCandidates, + MaxGraphCandidates: maxGraphCandidates, + RequestTimeout: cfg.DiscoveryTimeout, + }) + idx := newFleetIndexer(indexPool, discoverySvc) + + var fleetIndexing atomic.Bool + startFleetIndex := func(reason string, force bool) bool { + if !fleetIndexing.CompareAndSwap(false, true) { + slog.Warn("fleet index already running", "reason", reason, "force", force) + return false + } + go func() { + defer fleetIndexing.Store(false) + slog.Info("fleet index starting", "reason", reason, "force", force) + result := idx.IndexAll(context.Background(), m.Repos, force) + slog.Info("fleet index complete", "reason", reason, "force", force, "total", result.Total, "ok", result.Succeeded, "failed", result.Failed) + }() + return true + } + + // ── Fleet scheduler ────────────────────────────────────── + + c := cron.New() + if cfg.ScheduledIndexingEnabled { + c.AddFunc(cfg.IncrementalCron, func() { + startFleetIndex("cron-incremental", false) + }) + c.AddFunc(cfg.FullCron, func() { + startFleetIndex("cron-full", true) + }) + c.Start() + defer c.Stop() + slog.Info("scheduled indexing enabled", "incremental_cron", cfg.IncrementalCron, "full_cron", cfg.FullCron) + } else { + slog.Info("scheduled indexing disabled") + } + + // ── HTTP router ────────────────────────────────────────── + + r := chi.NewRouter() + r.Use(middleware.RequestID) + r.Use(middleware.RealIP) + r.Use(middleware.Recoverer) + r.Use(middleware.Timeout(5 * time.Minute)) + + // Bridge: forward MCP calls to the binary + bridgeHandler := bridge.NewHandler( + &mcpBridgeBackend{client: bridgePool, discovery: discoverySvc}, + bridge.Config{BearerToken: cfg.BearerToken, Authenticator: requestAuthenticator}, + ) + r.Mount("/mcp", bridgeHandler) + r.Get("/health", bridgeHandler.ServeHTTP) + + requireAuth := makeAuthMiddleware(cfg.BearerToken, requestAuthenticator) + + // Webhook: trigger re-index on GitHub push + wh := webhook.NewHandler(webhook.Config{ + Secret: []byte(cfg.WebhookSecret), + OnPush: func(repoSlug string) { + repo, ok := m.FindByName(repoSlug) + if !ok { + slog.Warn("webhook: repo not in manifest", "repo", repoSlug) + return + } + slog.Info("webhook: re-indexing repo", "repo", repoSlug) + if err := idx.IndexRepo(context.Background(), repo, false); err != nil { + slog.Error("webhook: index failed", "repo", repoSlug, "err", err) + } + }, + }) + r.Post("/webhooks/github", wh.ServeHTTP) + + // Manual trigger: index a single repo by slug + r.Post("/index/{repoSlug}", requireAuth(func(w http.ResponseWriter, req *http.Request) { + slug := chi.URLParam(req, "repoSlug") + repo, ok := m.FindByName(slug) + if !ok { + http.Error(w, "repo not found in manifest", http.StatusNotFound) + return + } + go func() { + if err := idx.IndexRepo(context.Background(), repo, true); err != nil { + slog.Error("manual index failed", "repo", slug, "err", err) + } + }() + w.WriteHeader(http.StatusAccepted) + fmt.Fprintf(w, `{"accepted":true,"repo":%q}`, slug) + })) + + r.Post("/index-all", requireAuth(func(w http.ResponseWriter, req *http.Request) { + force := req.URL.Query().Get("force") == "1" || strings.EqualFold(req.URL.Query().Get("force"), "true") + if !startFleetIndex("manual", force) { + http.Error(w, "fleet index already running", http.StatusConflict) + return + } + w.WriteHeader(http.StatusAccepted) + fmt.Fprintf(w, `{"accepted":true,"force":%t}`, force) + })) + + // Fleet status endpoint + r.Get("/status", requireAuth(func(w http.ResponseWriter, req *http.Request) { + artifactCount := 0 + artifactLocation := cfg.ArtifactDir + if artifactSync != nil { + count, err := artifactSync.CountArtifacts() + if err != nil { + slog.Warn("failed to count persisted indexes", "err", err) + } else { + artifactCount = count + } + artifactLocation = artifactSync.ArtifactDir + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]interface{}{ + "repos": len(m.Repos), + "version": bridgePool.ServerInfo().Version, + "binary": cfg.BinaryPath, + "clone_cache": cfg.CloneCacheDir, + "cbm_cache": cfg.CBMCacheDir, + "artifact_dir": artifactLocation, + "artifact_files": artifactCount, + "artifacts_enabled": cfg.ArtifactsEnabled, + "manifest": cfg.ReposManifest, + "concurrency": cfg.Concurrency, + "bridge_clients": cfg.BridgeClients, + "bridge_acquire_timeout": cfg.BridgeAcquireTimeout.Milliseconds(), + "indexer_clients": cfg.IndexerClients, + "discovery_clients": cfg.DiscoveryClients, + "discovery_max_candidates": cfg.DiscoveryMaxCandidates, + "discovery_timeout_ms": cfg.DiscoveryTimeout.Milliseconds(), + "startup_index_enabled": cfg.StartupIndexEnabled, + "scheduled_index_enabled": cfg.ScheduledIndexingEnabled, + "fleet_index_running": fleetIndexing.Load(), + "github_auth_enabled": cfg.GitHubAuthEnabled, + }) + })) + + srv := &http.Server{ + Addr: ":" + cfg.Port, + Handler: r, + ReadTimeout: 30 * time.Second, + WriteTimeout: 10 * time.Minute, + IdleTimeout: 120 * time.Second, + } + + // ── Startup indexing pass ──────────────────────────────── + + if cfg.StartupIndexEnabled { + startFleetIndex("startup", false) + } else { + slog.Info("startup indexing disabled") + } + + // ── Serve ──────────────────────────────────────────────── + + go func() { + slog.Info("server listening", "addr", srv.Addr) + if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed { + slog.Error("server error", "err", err) + stop() + } + }() + + <-ctx.Done() + slog.Info("shutting down...") + + shutdownCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + if err := srv.Shutdown(shutdownCtx); err != nil { + slog.Error("server shutdown error", "err", err) + } +} + +func makeAuthMiddleware(staticToken string, auth bridge.Authenticator) func(http.HandlerFunc) http.HandlerFunc { + return func(next http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + authHeader := req.Header.Get("Authorization") + if auth != nil { + if !strings.HasPrefix(authHeader, "Bearer ") { + http.Error(w, "unauthorized", http.StatusUnauthorized) + return + } + if err := auth.Authenticate(req.Context(), strings.TrimPrefix(authHeader, "Bearer ")); err != nil { + http.Error(w, "unauthorized", http.StatusUnauthorized) + return + } + } else if staticToken != "" { + if !strings.HasPrefix(authHeader, "Bearer ") || strings.TrimPrefix(authHeader, "Bearer ") != staticToken { + http.Error(w, "unauthorized", http.StatusUnauthorized) + return + } + } + next(w, req) + } + } +} + +// ── Config ───────────────────────────────────────────────────── + +type config struct { + Port string + BinaryPath string + CloneCacheDir string + CBMCacheDir string + ArtifactDir string + ArtifactsEnabled bool + ArtifactsBackend string + ArtifactsBucket string + ArtifactsPrefix string + ArtifactsSkipHydrate bool + ReposManifest string + BearerToken string + GitHubToken string + GitHubAuthEnabled bool + GitHubAllowedOrgs []string + GitHubAPIBaseURL string + GitHubAuthCacheTTL time.Duration + WebhookSecret string + Concurrency int + BridgeClients int + BridgeAcquireTimeout time.Duration + IndexerClients int + IndexerClientMaxUses int + DiscoveryClients int + DiscoveryMaxCandidates int + DiscoveryTimeout time.Duration + IncrementalCron string + FullCron string + StartupIndexEnabled bool + ScheduledIndexingEnabled bool + RunMode string + RunForce bool +} + +func loadConfig() config { + getEnv := func(key, def string) string { + if v := os.Getenv(key); v != "" { + return v + } + return def + } + getBool := func(key string, def bool) bool { + v := strings.TrimSpace(getEnv(key, "")) + if v == "" { + return def + } + switch strings.ToLower(v) { + case "1", "true", "yes", "on": + return true + case "0", "false", "no", "off": + return false + default: + return def + } + } + getStringList := func(key string) []string { + raw := strings.TrimSpace(getEnv(key, "")) + if raw == "" { + return nil + } + parts := strings.Split(raw, ",") + out := make([]string, 0, len(parts)) + for _, part := range parts { + part = strings.TrimSpace(part) + if part != "" { + out = append(out, part) + } + } + return out + } + getConcurrency := func() int { + v := getEnv("FLEET_CONCURRENCY", "5") + n := 5 + fmt.Sscanf(v, "%d", &n) + return n + } + getBridgeClients := func() int { + v := getEnv("BRIDGE_CLIENTS", "") + if v == "" { + n := runtime.GOMAXPROCS(0) + if n < 2 { + return 2 + } + if n > 4 { + return 4 + } + return n + } + n := 1 + fmt.Sscanf(v, "%d", &n) + if n <= 0 { + return 1 + } + return n + } + getBridgeAcquireTimeout := func() time.Duration { + v := getEnv("BRIDGE_ACQUIRE_TIMEOUT_MS", "1500") + n := 1500 + fmt.Sscanf(v, "%d", &n) + if n <= 0 { + return 1500 * time.Millisecond + } + return time.Duration(n) * time.Millisecond + } + getIndexerClients := func(concurrency int) int { + v := getEnv("INDEXER_CLIENTS", "") + if v == "" { + return concurrency + } + n := concurrency + fmt.Sscanf(v, "%d", &n) + if n <= 0 { + return concurrency + } + return n + } + getIndexerClientMaxUses := func() int { + v := getEnv("INDEXER_CLIENT_MAX_USES", "1") + n := 1 + fmt.Sscanf(v, "%d", &n) + if n <= 0 { + return 1 + } + return n + } + getDiscoveryClients := func(concurrency int) int { + v := getEnv("DISCOVERY_CLIENTS", "") + if v == "" { + if concurrency < 2 { + return 2 + } + return concurrency + } + n := concurrency + fmt.Sscanf(v, "%d", &n) + if n <= 0 { + if concurrency < 2 { + return 2 + } + return concurrency + } + return n + } + getDiscoveryMaxCandidates := func() int { + v := getEnv("DISCOVERY_MAX_CANDIDATES", "5") + n := 5 + fmt.Sscanf(v, "%d", &n) + if n <= 0 { + return 5 + } + return n + } + getDiscoveryTimeout := func() time.Duration { + v := getEnv("DISCOVERY_TIMEOUT_MS", "5000") + n := 5000 + fmt.Sscanf(v, "%d", &n) + if n <= 0 { + return 5 * time.Second + } + return time.Duration(n) * time.Millisecond + } + getGitHubAuthCacheTTL := func() time.Duration { + v := getEnv("GITHUB_AUTH_CACHE_TTL_MS", "300000") + n := 300000 + fmt.Sscanf(v, "%d", &n) + if n <= 0 { + return 5 * time.Minute + } + return time.Duration(n) * time.Millisecond + } + concurrency := getConcurrency() + return config{ + Port: getEnv("PORT", "8080"), + BinaryPath: getEnv("CBM_BINARY", defaultBinaryPath()), + CloneCacheDir: getEnv("FLEET_CACHE_DIR", "/data/fleet-cache/repos"), + CBMCacheDir: getEnv("CBM_CACHE_DIR", "/tmp/codebase-memory-mcp"), + ArtifactDir: getEnv("CBM_ARTIFACT_DIR", "/data/fleet-cache/indexes"), + ArtifactsEnabled: getBool("ARTIFACTS_ENABLED", true), + ArtifactsBackend: getEnv("ARTIFACTS_BACKEND", "filesystem"), + ArtifactsBucket: getEnv("ARTIFACTS_BUCKET", ""), + ArtifactsPrefix: getEnv("ARTIFACTS_PREFIX", ""), + ArtifactsSkipHydrate: getBool("ARTIFACTS_SKIP_HYDRATE", false), + ReposManifest: getEnv("REPOS_MANIFEST", defaultManifestPath()), + BearerToken: getEnv("BEARER_TOKEN", ""), + GitHubToken: getEnv("GITHUB_TOKEN", ""), + GitHubAuthEnabled: getBool("GITHUB_AUTH_ENABLED", false), + GitHubAllowedOrgs: getStringList("GITHUB_ALLOWED_ORGS"), + GitHubAPIBaseURL: getEnv("GITHUB_API_BASE_URL", "https://api.github.com"), + GitHubAuthCacheTTL: getGitHubAuthCacheTTL(), + WebhookSecret: getEnv("GITHUB_WEBHOOK_SECRET", ""), + Concurrency: concurrency, + BridgeClients: getBridgeClients(), + BridgeAcquireTimeout: getBridgeAcquireTimeout(), + IndexerClients: getIndexerClients(concurrency), + IndexerClientMaxUses: getIndexerClientMaxUses(), + DiscoveryClients: getDiscoveryClients(concurrency), + DiscoveryMaxCandidates: getDiscoveryMaxCandidates(), + DiscoveryTimeout: getDiscoveryTimeout(), + IncrementalCron: getEnv("CRON_INCREMENTAL", "0 */6 * * *"), + FullCron: getEnv("CRON_FULL", "0 2 * * 0"), + StartupIndexEnabled: getBool("STARTUP_INDEX_ENABLED", false), + ScheduledIndexingEnabled: getBool("SCHEDULED_INDEXING_ENABLED", false), + RunMode: strings.TrimSpace(getEnv("RUN_MODE", "serve")), + RunForce: getBool("RUN_FORCE", false), + } +} + +func defaultManifestPath() string { + candidates := []string{ + "/app/REPOS.local.yaml", + "/app/REPOS.yaml", + } + for _, candidate := range candidates { + if _, err := os.Stat(candidate); err == nil { + return candidate + } + } + return "/app/REPOS.yaml" +} + +func projectNameFromPath(absPath string) string { + path := filepath.ToSlash(strings.TrimSpace(absPath)) + if path == "" { + return "root" + } + + var b strings.Builder + b.Grow(len(path)) + prevDash := false + for _, r := range path { + if r == '/' || r == ':' { + if prevDash { + continue + } + b.WriteByte('-') + prevDash = true + continue + } + b.WriteRune(r) + prevDash = r == '-' + } + + project := strings.Trim(b.String(), "-") + if project == "" { + return "root" + } + return project +} + +func defaultBinaryPath() string { + name := "codebase-memory-mcp" + if runtime.GOOS == "windows" { + name += ".exe" + } + exe, _ := os.Executable() + dir := filepath.Dir(exe) + candidate := filepath.Join(dir, name) + if _, err := os.Stat(candidate); err == nil { + return candidate + } + // Fallback: find in PATH + if path, err := exec.LookPath(name); err == nil { + return path + } + return name +} + +// ── Adapters ─────────────────────────────────────────────────── + +// gitCloner implements indexer.Cloner using git CLI. +type gitCloner struct { + logger *slog.Logger + githubToken string +} + +func (g *gitCloner) EnsureClone(ctx context.Context, githubURL, localPath string) error { + if _, err := os.Stat(filepath.Join(localPath, ".git")); err == nil { + // Already cloned — fetch latest + g.logger.Debug("updating clone", "path", localPath) + cmd := g.gitCommand(ctx, localPath, githubURL, "fetch", "--depth=1", "origin", "HEAD") + if out, err := cmd.CombinedOutput(); err != nil { + if isGitHubHTTPSAuthError(string(out)) { + g.logger.Warn("git fetch auth failed, using existing clone", "path", localPath) + if err := g.restoreWorkingTree(ctx, githubURL, localPath, "HEAD"); err != nil { + return err + } + return g.validateClone(localPath) + } + return fmt.Errorf("git fetch: %w\n%s", err, out) + } + if err := g.restoreWorkingTree(ctx, githubURL, localPath, "FETCH_HEAD"); err != nil { + return err + } + return g.validateClone(localPath) + } + // Fresh clone + if err := os.MkdirAll(localPath, 0750); err != nil { + return fmt.Errorf("mkdir %q: %w", localPath, err) + } + // Remove empty dir to allow clone into it + os.Remove(localPath) + g.logger.Info("cloning repo", "url", githubURL, "path", localPath) + cloneCtx, cancel := context.WithTimeout(ctx, 120*time.Second) + defer cancel() + cmd := g.gitCommand(cloneCtx, "", githubURL, "clone", "--depth=1", githubURL, localPath) + if out, err := cmd.CombinedOutput(); err != nil { + return fmt.Errorf("git clone %q: %w\n%s", githubURL, err, out) + } + return g.validateClone(localPath) +} + +func isGitHubHTTPSAuthError(output string) bool { + return strings.Contains(output, "could not read Username for 'https://github.com'") +} + +func (g *gitCloner) gitCommand(ctx context.Context, dir, githubURL string, args ...string) *exec.Cmd { + gitArgs := make([]string, 0, len(args)+4) + if g.githubToken != "" && strings.HasPrefix(githubURL, "https://github.com/") { + auth := base64.StdEncoding.EncodeToString([]byte("x-access-token:" + g.githubToken)) + gitArgs = append(gitArgs, + "-c", "credential.helper=", + "-c", "http.https://github.com/.extraheader=AUTHORIZATION: basic "+auth, + ) + } + gitArgs = append(gitArgs, args...) + cmd := exec.CommandContext(ctx, "git", gitArgs...) + if dir != "" { + cmd.Dir = dir + } + return cmd +} + +func (g *gitCloner) restoreWorkingTree(ctx context.Context, githubURL, localPath, ref string) error { + cmd := g.gitCommand(ctx, localPath, githubURL, "reset", "--hard", ref) + if out, err := cmd.CombinedOutput(); err != nil { + return fmt.Errorf("git reset --hard %s: %w\n%s", ref, err, out) + } + cmd = g.gitCommand(ctx, localPath, githubURL, "clean", "-fd") + if out, err := cmd.CombinedOutput(); err != nil { + return fmt.Errorf("git clean -fd: %w\n%s", err, out) + } + return nil +} + +func (g *gitCloner) validateClone(localPath string) error { + ok, err := hasWorkingTreeFiles(localPath) + if err != nil { + return err + } + if !ok { + return fmt.Errorf("clone at %q has no checked out files", localPath) + } + return nil +} + +func hasWorkingTreeFiles(root string) (bool, error) { + var found bool + stop := errors.New("found working tree file") + err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if path == root { + return nil + } + if info.IsDir() { + if info.Name() == ".git" { + return filepath.SkipDir + } + return nil + } + found = true + return stop + }) + if err != nil && !errors.Is(err, stop) { + return false, err + } + return found, nil +} + +type bridgePoolClient interface { + ServerInfo() mcp.ServerInfo + Call(ctx context.Context, method string, params interface{}) (json.RawMessage, error) + CallTool(ctx context.Context, name string, params map[string]interface{}) (*mcp.ToolResult, error) + Close() +} + +var newBridgePoolClient = func(ctx context.Context, binPath string) (bridgePoolClient, error) { + return mcp.NewClient(ctx, binPath) +} + +type mcpBridgeClientPool struct { + binPath string + acquireTimeout time.Duration + mu sync.Mutex + clients chan bridgePoolClient + all []bridgePoolClient + info mcp.ServerInfo +} + +func newMCPBridgeClientPool(ctx context.Context, binPath string, size int, acquireTimeout time.Duration) (*mcpBridgeClientPool, error) { + if size <= 0 { + size = 1 + } + pool := &mcpBridgeClientPool{ + binPath: binPath, + acquireTimeout: acquireTimeout, + clients: make(chan bridgePoolClient, size), + all: make([]bridgePoolClient, 0, size), + } + for i := 0; i < size; i++ { + client, err := newBridgePoolClient(ctx, binPath) + if err != nil { + pool.Close() + return nil, fmt.Errorf("start bridge client %d/%d: %w", i+1, size, err) + } + if i == 0 { + pool.info = client.ServerInfo() + } + pool.all = append(pool.all, client) + pool.clients <- client + } + return pool, nil +} + +func (p *mcpBridgeClientPool) ServerInfo() mcp.ServerInfo { + return p.info +} + +func (p *mcpBridgeClientPool) Close() { + for _, client := range p.all { + client.Close() + } +} + +func (p *mcpBridgeClientPool) borrow(ctx context.Context) (bridgePoolClient, error) { + if p.acquireTimeout <= 0 { + select { + case client := <-p.clients: + return client, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } + + acquireCtx, cancel := context.WithTimeoutCause(ctx, p.acquireTimeout, bridge.ErrBackendBusy) + defer cancel() + + select { + case client := <-p.clients: + return client, nil + case <-acquireCtx.Done(): + if errors.Is(context.Cause(acquireCtx), bridge.ErrBackendBusy) { + return nil, bridge.ErrBackendBusy + } + return nil, ctx.Err() + } +} + +func (p *mcpBridgeClientPool) release(client bridgePoolClient) { + if client == nil { + return + } + p.clients <- client +} + +func (p *mcpBridgeClientPool) Call(ctx context.Context, method string, params interface{}) (json.RawMessage, error) { + client, err := p.borrow(ctx) + if err != nil { + return nil, err + } + + type callResult struct { + result json.RawMessage + err error + } + + resultCh := make(chan callResult, 1) + go func() { + result, callErr := client.Call(ctx, method, params) + resultCh <- callResult{result: result, err: callErr} + }() + + select { + case out := <-resultCh: + p.release(client) + return out.result, out.err + case <-ctx.Done(): + client.Close() + go p.replaceClientAsync(client) + return nil, ctx.Err() + } +} + +func (p *mcpBridgeClientPool) CallTool(ctx context.Context, name string, params map[string]interface{}) (*mcp.ToolResult, error) { + client, err := p.borrow(ctx) + if err != nil { + return nil, err + } + + type toolCallResult struct { + result *mcp.ToolResult + err error + } + + resultCh := make(chan toolCallResult, 1) + go func() { + result, callErr := client.CallTool(ctx, name, params) + resultCh <- toolCallResult{result: result, err: callErr} + }() + + select { + case out := <-resultCh: + p.release(client) + return out.result, out.err + case <-ctx.Done(): + client.Close() + go p.replaceClientAsync(client) + return nil, ctx.Err() + } +} + +func (p *mcpBridgeClientPool) replaceClientAsync(dead bridgePoolClient) { + replacementCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + replacement, err := newBridgePoolClient(replacementCtx, p.binPath) + if err != nil { + slog.Error("failed to replace timed out bridge client", "err", err) + return + } + + p.mu.Lock() + for i, client := range p.all { + if client == dead { + p.all[i] = replacement + break + } + } + p.mu.Unlock() + + p.release(replacement) +} + +type indexToolClient interface { + CallTool(ctx context.Context, name string, params map[string]interface{}) (*mcp.ToolResult, error) + Close() +} + +var newIndexToolClient = func(ctx context.Context, binPath string) (indexToolClient, error) { + return mcp.NewClient(ctx, binPath) +} + +type mcpToolClientPool struct { + binPath string + maxUses int + mu sync.Mutex + clients chan indexToolClient + all []indexToolClient + uses map[indexToolClient]int +} + +func newMCPToolClientPool(ctx context.Context, binPath string, size int, maxUses int) (*mcpToolClientPool, error) { + if size <= 0 { + size = 1 + } + pool := &mcpToolClientPool{ + binPath: binPath, + maxUses: maxUses, + clients: make(chan indexToolClient, size), + all: make([]indexToolClient, 0, size), + uses: make(map[indexToolClient]int, size), + } + for i := 0; i < size; i++ { + client, err := newIndexToolClient(ctx, binPath) + if err != nil { + pool.Close() + return nil, fmt.Errorf("start indexer client %d/%d: %w", i+1, size, err) + } + pool.all = append(pool.all, client) + pool.uses[client] = 0 + pool.clients <- client + } + return pool, nil +} + +func (p *mcpToolClientPool) Close() { + for _, client := range p.all { + client.Close() + } +} + +func (p *mcpToolClientPool) borrow(ctx context.Context) (indexToolClient, error) { + select { + case client := <-p.clients: + return client, nil + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +func (p *mcpToolClientPool) release(client indexToolClient) { + if client == nil { + return + } + p.clients <- client +} + +func (p *mcpToolClientPool) retire(client indexToolClient) { + if client == nil { + return + } + client.Close() + go p.replaceClientAsync(client) +} + +func (p *mcpToolClientPool) shouldRecycle(client indexToolClient) bool { + if p.maxUses <= 0 || client == nil { + return false + } + + p.mu.Lock() + defer p.mu.Unlock() + + next := p.uses[client] + 1 + p.uses[client] = next + return next >= p.maxUses +} + +func (p *mcpToolClientPool) CallTool(ctx context.Context, name string, params map[string]interface{}) (*mcp.ToolResult, error) { + client, err := p.borrow(ctx) + if err != nil { + return nil, err + } + + type toolCallResult struct { + result *mcp.ToolResult + err error + } + + resultCh := make(chan toolCallResult, 1) + go func() { + result, err := client.CallTool(ctx, name, params) + resultCh <- toolCallResult{result: result, err: err} + }() + + select { + case out := <-resultCh: + if out.err != nil { + p.retire(client) + return nil, out.err + } + if p.shouldRecycle(client) { + p.retire(client) + } else { + p.release(client) + } + return out.result, out.err + case <-ctx.Done(): + p.retire(client) + return nil, ctx.Err() + } +} + +func (p *mcpToolClientPool) replaceClientAsync(dead indexToolClient) { + replacementCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + replacement, err := newIndexToolClient(replacementCtx, p.binPath) + if err != nil { + slog.Error("failed to replace timed out MCP client", "err", err) + return + } + + p.mu.Lock() + delete(p.uses, dead) + for i, client := range p.all { + if client == dead { + p.all[i] = replacement + break + } + } + p.uses[replacement] = 0 + p.mu.Unlock() + + p.release(replacement) +} + +type mcpIndexClientPool struct { + *mcpToolClientPool +} + +func newMCPIndexClientPool(ctx context.Context, binPath string, size int, maxUses int) (*mcpIndexClientPool, error) { + pool, err := newMCPToolClientPool(ctx, binPath, size, maxUses) + if err != nil { + return nil, err + } + return &mcpIndexClientPool{mcpToolClientPool: pool}, nil +} + +func (p *mcpIndexClientPool) IndexRepository(ctx context.Context, repoPath, mode string) error { + result, err := p.CallTool(ctx, "index_repository", map[string]interface{}{ + "repo_path": repoPath, + "mode": mode, + }) + if err != nil { + return fmt.Errorf("index_repository: %w", err) + } + if result.IsError { + msg := "index_repository returned error" + if len(result.Content) > 0 { + msg = result.Content[0].Text + } + return fmt.Errorf("index_repository: %s", msg) + } + return nil +} + +type mcpDiscoveryClientPool struct { + *mcpToolClientPool +} + +func newMCPDiscoveryClientPool(ctx context.Context, binPath string, size int) (*mcpDiscoveryClientPool, error) { + pool, err := newMCPToolClientPool(ctx, binPath, size, 0) + if err != nil { + return nil, err + } + return &mcpDiscoveryClientPool{mcpToolClientPool: pool}, nil +} + +type bridgeClient interface { + ServerInfo() mcp.ServerInfo + Call(ctx context.Context, method string, params interface{}) (json.RawMessage, error) + CallTool(ctx context.Context, name string, params map[string]interface{}) (*mcp.ToolResult, error) +} + +// mcpBridgeBackend implements bridge.Backend by forwarding to the MCP client. +type mcpBridgeBackend struct { + client bridgeClient + discovery discovery.Service +} + +func (b *mcpBridgeBackend) Call(ctx context.Context, method string, params json.RawMessage) (json.RawMessage, error) { + if b.client == nil { + return nil, bridge.ErrBackendUnavailable + } + + switch method { + case "initialize": + return b.initialize(params) + case "ping": + return json.RawMessage(`{}`), nil + case "tools/list": + raw, err := b.client.Call(ctx, "tools/list", nil) + if err != nil { + return nil, err + } + return b.appendDiscoveryTool(raw) + case "tools/call": + var paramMap map[string]interface{} + if len(params) > 0 { + if err := json.Unmarshal(params, ¶mMap); err != nil { + return nil, fmt.Errorf("parse params: %w", err) + } + } + + name, _ := paramMap["name"].(string) + if name == "" { + return nil, errors.New("missing tool name") + } + args, _ := paramMap["arguments"].(map[string]interface{}) + if name == discovery.NewDefinition().Name { + return b.callDiscoveryTool(ctx, args) + } + + result, err := b.client.CallTool(ctx, name, args) + if err != nil { + return nil, err + } + + return json.Marshal(result) + default: + return nil, bridge.ErrMethodNotFound + } +} + +func (b *mcpBridgeBackend) appendDiscoveryTool(raw json.RawMessage) (json.RawMessage, error) { + if b.discovery == nil { + return raw, nil + } + + var payload struct { + Tools []map[string]interface{} `json:"tools"` + } + if err := json.Unmarshal(raw, &payload); err != nil { + return nil, fmt.Errorf("parse tools/list response: %w", err) + } + + def := b.discovery.Definition() + tool := map[string]interface{}{ + "name": def.Name, + "description": def.Description, + "inputSchema": def.InputSchema, + } + payload.Tools = append(payload.Tools, tool) + return json.Marshal(payload) +} + +func (b *mcpBridgeBackend) callDiscoveryTool(ctx context.Context, args map[string]interface{}) (json.RawMessage, error) { + if b.discovery == nil { + return nil, errors.New("discover_projects unavailable") + } + + var req discovery.Request + if args != nil { + rawArgs, err := json.Marshal(args) + if err != nil { + return nil, fmt.Errorf("marshal discover_projects args: %w", err) + } + if err := json.Unmarshal(rawArgs, &req); err != nil { + return nil, fmt.Errorf("parse discover_projects args: %w", err) + } + } + req.Query = strings.TrimSpace(req.Query) + if req.Query == "" { + return nil, errors.New("discover_projects: query is required") + } + if req.Limit <= 0 { + req.Limit = 5 + } + if _, ok := args["include_graph_confidence"]; !ok { + req.IncludeGraphConfidence = true + } + + resp, err := b.discovery.DiscoverProjects(ctx, req) + if err != nil { + return nil, err + } + text, err := json.Marshal(resp) + if err != nil { + return nil, fmt.Errorf("marshal discover_projects response: %w", err) + } + + return json.Marshal(mcp.ToolResult{ + Content: []mcp.Content{{Type: "text", Text: string(text)}}, + IsError: false, + }) +} + +func (b *mcpBridgeBackend) initialize(params json.RawMessage) (json.RawMessage, error) { + type initializeParams struct { + ProtocolVersion string `json:"protocolVersion"` + } + type initializeResult struct { + ProtocolVersion string `json:"protocolVersion"` + Capabilities map[string]interface{} `json:"capabilities"` + ServerInfo mcp.ServerInfo `json:"serverInfo"` + } + + version := supportedProtocolVersions[0] + if len(params) > 0 { + var p initializeParams + if err := json.Unmarshal(params, &p); err != nil { + return nil, fmt.Errorf("parse initialize params: %w", err) + } + for _, supported := range supportedProtocolVersions { + if p.ProtocolVersion == supported { + version = supported + break + } + } + } + + return json.Marshal(initializeResult{ + ProtocolVersion: version, + Capabilities: map[string]interface{}{ + "tools": map[string]interface{}{}, + }, + ServerInfo: b.client.ServerInfo(), + }) +} diff --git a/ghl/cmd/server/main_test.go b/ghl/cmd/server/main_test.go new file mode 100644 index 00000000..ace4e325 --- /dev/null +++ b/ghl/cmd/server/main_test.go @@ -0,0 +1,774 @@ +package main + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/GoHighLevel/codebase-memory-mcp/ghl/internal/bridge" + "github.com/GoHighLevel/codebase-memory-mcp/ghl/internal/discovery" + "github.com/GoHighLevel/codebase-memory-mcp/ghl/internal/mcp" +) + +type fakeRequestAuthenticator struct { + token string + calls int +} + +func (f *fakeRequestAuthenticator) Authenticate(_ context.Context, bearerToken string) error { + f.calls++ + if bearerToken != f.token { + return errors.New("unauthorized") + } + return nil +} + +type fakeBridgeClient struct { + info mcp.ServerInfo + callCtx context.Context + callMethod string + callParams interface{} + callResult json.RawMessage + callErr error + toolCtx context.Context + toolName string + toolArgs map[string]interface{} + toolResult *mcp.ToolResult + toolErr error +} + +func (f *fakeBridgeClient) ServerInfo() mcp.ServerInfo { + return f.info +} + +func (f *fakeBridgeClient) Call(ctx context.Context, method string, params interface{}) (json.RawMessage, error) { + f.callCtx = ctx + f.callMethod = method + f.callParams = params + return f.callResult, f.callErr +} + +func (f *fakeBridgeClient) CallTool(ctx context.Context, name string, params map[string]interface{}) (*mcp.ToolResult, error) { + f.toolCtx = ctx + f.toolName = name + f.toolArgs = params + return f.toolResult, f.toolErr +} + +type fakeDiscoverer struct { + definition discovery.ToolDefinition + request discovery.Request + response discovery.Response + err error +} + +func (f *fakeDiscoverer) Definition() discovery.ToolDefinition { + return f.definition +} + +func (f *fakeDiscoverer) DiscoverProjects(_ context.Context, req discovery.Request) (discovery.Response, error) { + f.request = req + return f.response, f.err +} + +func TestMCPBridgeBackendInitializeNegotiatesProtocol(t *testing.T) { + backend := &mcpBridgeBackend{ + client: &fakeBridgeClient{ + info: mcp.ServerInfo{Name: "codebase-memory-mcp", Version: "0.10.0"}, + }, + } + + raw, err := backend.Call(context.Background(), "initialize", json.RawMessage(`{"protocolVersion":"2025-03-26"}`)) + if err != nil { + t.Fatalf("initialize: %v", err) + } + + var result struct { + ProtocolVersion string `json:"protocolVersion"` + Capabilities map[string]interface{} `json:"capabilities"` + ServerInfo mcp.ServerInfo `json:"serverInfo"` + } + if err := json.Unmarshal(raw, &result); err != nil { + t.Fatalf("parse initialize result: %v", err) + } + + if result.ProtocolVersion != "2025-03-26" { + t.Errorf("protocolVersion: want 2025-03-26, got %q", result.ProtocolVersion) + } + if result.ServerInfo.Version != "0.10.0" { + t.Errorf("server version: want 0.10.0, got %q", result.ServerInfo.Version) + } + if _, ok := result.Capabilities["tools"]; !ok { + t.Errorf("capabilities.tools: expected tools capability") + } +} + +func TestMCPBridgeBackendForwardsToolsList(t *testing.T) { + client := &fakeBridgeClient{ + callResult: json.RawMessage(`{"tools":[{"name":"list_projects"}]}`), + } + backend := &mcpBridgeBackend{client: client} + + raw, err := backend.Call(context.Background(), "tools/list", nil) + if err != nil { + t.Fatalf("tools/list: %v", err) + } + + if client.callMethod != "tools/list" { + t.Errorf("call method: want tools/list, got %q", client.callMethod) + } + if client.callCtx == nil { + t.Error("call ctx: expected non-nil context") + } + if string(raw) != `{"tools":[{"name":"list_projects"}]}` { + t.Errorf("raw result: got %s", raw) + } +} + +func TestMCPBridgeBackendToolsListIncludesDiscoverProjects(t *testing.T) { + client := &fakeBridgeClient{ + callResult: json.RawMessage(`{"tools":[{"name":"list_projects"}]}`), + } + backend := &mcpBridgeBackend{ + client: client, + discovery: &fakeDiscoverer{ + definition: discovery.ToolDefinition{ + Name: "discover_projects", + Description: "Discover likely repos", + InputSchema: map[string]interface{}{"type": "object"}, + }, + }, + } + + raw, err := backend.Call(context.Background(), "tools/list", nil) + if err != nil { + t.Fatalf("tools/list: %v", err) + } + + var result struct { + Tools []struct { + Name string `json:"name"` + } `json:"tools"` + } + if err := json.Unmarshal(raw, &result); err != nil { + t.Fatalf("parse tools/list result: %v", err) + } + + if len(result.Tools) != 2 { + t.Fatalf("tools count: want 2, got %d", len(result.Tools)) + } + if result.Tools[0].Name != "list_projects" { + t.Fatalf("first tool: want list_projects, got %q", result.Tools[0].Name) + } + if result.Tools[1].Name != "discover_projects" { + t.Fatalf("second tool: want discover_projects, got %q", result.Tools[1].Name) + } +} + +func TestMCPBridgeBackendForwardsToolsCall(t *testing.T) { + client := &fakeBridgeClient{ + toolResult: &mcp.ToolResult{ + Content: []mcp.Content{{Type: "text", Text: "ok"}}, + }, + } + backend := &mcpBridgeBackend{client: client} + + raw, err := backend.Call(context.Background(), "tools/call", json.RawMessage(`{"name":"list_projects","arguments":{"project":"demo"}}`)) + if err != nil { + t.Fatalf("tools/call: %v", err) + } + + if client.toolName != "list_projects" { + t.Errorf("tool name: want list_projects, got %q", client.toolName) + } + if client.toolCtx == nil { + t.Error("tool ctx: expected non-nil context") + } + if got := client.toolArgs["project"]; got != "demo" { + t.Errorf("tool args.project: want demo, got %v", got) + } + if string(raw) != `{"content":[{"type":"text","text":"ok"}],"isError":false}` { + t.Errorf("raw result: got %s", raw) + } +} + +func TestMCPBridgeBackendHandlesDiscoverProjects(t *testing.T) { + backend := &mcpBridgeBackend{ + client: &fakeBridgeClient{}, + discovery: &fakeDiscoverer{ + response: discovery.Response{ + Query: "membership checkout lock", + PrimaryRepos: []discovery.Candidate{ + {Project: "app-fleet-cache-membership-backend", RepoSlug: "membership-backend"}, + }, + }, + }, + } + + raw, err := backend.Call(context.Background(), "tools/call", json.RawMessage(`{"name":"discover_projects","arguments":{"query":"membership checkout lock","limit":3}}`)) + if err != nil { + t.Fatalf("tools/call discover_projects: %v", err) + } + + var result struct { + Content []struct { + Type string `json:"type"` + Text string `json:"text"` + } `json:"content"` + IsError bool `json:"isError"` + } + if err := json.Unmarshal(raw, &result); err != nil { + t.Fatalf("parse discover_projects result: %v", err) + } + if result.IsError { + t.Fatal("discover_projects result unexpectedly marked as error") + } + if len(result.Content) != 1 { + t.Fatalf("content count: want 1, got %d", len(result.Content)) + } + + var payload discovery.Response + if err := json.Unmarshal([]byte(result.Content[0].Text), &payload); err != nil { + t.Fatalf("parse discover_projects payload: %v", err) + } + if payload.Query != "membership checkout lock" { + t.Fatalf("query: want %q, got %q", "membership checkout lock", payload.Query) + } + if len(payload.PrimaryRepos) != 1 || payload.PrimaryRepos[0].RepoSlug != "membership-backend" { + t.Fatalf("unexpected primary repos: %+v", payload.PrimaryRepos) + } +} + +func TestMCPBridgeBackendRejectsUnknownMethod(t *testing.T) { + backend := &mcpBridgeBackend{client: &fakeBridgeClient{}} + + _, err := backend.Call(context.Background(), "resources/list", nil) + if err == nil { + t.Fatal("expected error for unknown method") + } + if err != bridge.ErrMethodNotFound { + t.Fatalf("want ErrMethodNotFound, got %v", err) + } +} + +func TestMakeAuthMiddlewareUsesAuthenticatorWhenConfigured(t *testing.T) { + auth := &fakeRequestAuthenticator{token: "ghp-valid"} + handler := makeAuthMiddleware("legacy-token", auth)(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusAccepted) + }) + + req := httptest.NewRequest(http.MethodGet, "/status", nil) + req.Header.Set("Authorization", "Bearer ghp-valid") + rr := httptest.NewRecorder() + handler(rr, req) + + if rr.Code != http.StatusAccepted { + t.Fatalf("status: want %d, got %d", http.StatusAccepted, rr.Code) + } + if auth.calls != 1 { + t.Fatalf("auth calls: want 1, got %d", auth.calls) + } +} + +func TestMakeAuthMiddlewareRejectsLegacyBearerWhenAuthenticatorConfigured(t *testing.T) { + auth := &fakeRequestAuthenticator{token: "ghp-valid"} + handler := makeAuthMiddleware("legacy-token", auth)(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusAccepted) + }) + + req := httptest.NewRequest(http.MethodGet, "/status", nil) + req.Header.Set("Authorization", "Bearer legacy-token") + rr := httptest.NewRecorder() + handler(rr, req) + + if rr.Code != http.StatusUnauthorized { + t.Fatalf("status: want %d, got %d", http.StatusUnauthorized, rr.Code) + } + if auth.calls != 1 { + t.Fatalf("auth calls: want 1, got %d", auth.calls) + } +} + +func TestMakeAuthMiddlewareFallsBackToStaticBearerToken(t *testing.T) { + handler := makeAuthMiddleware("legacy-token", nil)(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusAccepted) + }) + + req := httptest.NewRequest(http.MethodGet, "/status", nil) + req.Header.Set("Authorization", "Bearer legacy-token") + rr := httptest.NewRecorder() + handler(rr, req) + + if rr.Code != http.StatusAccepted { + t.Fatalf("status: want %d, got %d", http.StatusAccepted, rr.Code) + } +} + +type fakeIndexToolClient struct { + inFlight *atomic.Int64 + maxFlight *atomic.Int64 + delay time.Duration + toolErr error + result *mcp.ToolResult +} + +func (f *fakeIndexToolClient) CallTool(ctx context.Context, name string, params map[string]interface{}) (*mcp.ToolResult, error) { + if name != "index_repository" { + return nil, errors.New("unexpected tool") + } + current := f.inFlight.Add(1) + for { + old := f.maxFlight.Load() + if current <= old || f.maxFlight.CompareAndSwap(old, current) { + break + } + } + defer f.inFlight.Add(-1) + + if f.delay > 0 { + select { + case <-time.After(f.delay): + case <-ctx.Done(): + return nil, ctx.Err() + } + } + if f.toolErr != nil { + return nil, f.toolErr + } + if f.result != nil { + return f.result, nil + } + return &mcp.ToolResult{}, nil +} + +func (f *fakeIndexToolClient) Close() {} + +type blockingToolClient struct { + started chan struct{} + closed chan struct{} + once sync.Once +} + +func newBlockingToolClient() *blockingToolClient { + return &blockingToolClient{ + started: make(chan struct{}), + closed: make(chan struct{}), + } +} + +func (f *blockingToolClient) CallTool(ctx context.Context, name string, params map[string]interface{}) (*mcp.ToolResult, error) { + close(f.started) + select { + case <-f.closed: + return nil, context.DeadlineExceeded + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +func (f *blockingToolClient) Close() { + f.once.Do(func() { + close(f.closed) + }) +} + +type fastToolClient struct { + result *mcp.ToolResult +} + +func (f *fastToolClient) CallTool(ctx context.Context, name string, params map[string]interface{}) (*mcp.ToolResult, error) { + if f.result != nil { + return f.result, nil + } + return &mcp.ToolResult{}, nil +} + +func (f *fastToolClient) Close() {} + +type failingToolClient struct { + err error +} + +func (f *failingToolClient) CallTool(ctx context.Context, name string, params map[string]interface{}) (*mcp.ToolResult, error) { + return nil, f.err +} + +func (f *failingToolClient) Close() {} + +type blockingBridgeClient struct { + info mcp.ServerInfo + started chan struct{} + once sync.Once +} + +func newBlockingBridgeClient() *blockingBridgeClient { + return &blockingBridgeClient{ + info: mcp.ServerInfo{Name: "codebase-memory-mcp", Version: "test"}, + started: make(chan struct{}), + } +} + +func (f *blockingBridgeClient) ServerInfo() mcp.ServerInfo { + return f.info +} + +func (f *blockingBridgeClient) Call(ctx context.Context, method string, params interface{}) (json.RawMessage, error) { + f.once.Do(func() { close(f.started) }) + <-ctx.Done() + return nil, ctx.Err() +} + +func (f *blockingBridgeClient) CallTool(ctx context.Context, name string, params map[string]interface{}) (*mcp.ToolResult, error) { + f.once.Do(func() { close(f.started) }) + <-ctx.Done() + return nil, ctx.Err() +} + +func (f *blockingBridgeClient) Close() {} + +type fastBridgeClient struct { + info mcp.ServerInfo + result json.RawMessage +} + +func (f *fastBridgeClient) ServerInfo() mcp.ServerInfo { + return f.info +} + +func (f *fastBridgeClient) Call(ctx context.Context, method string, params interface{}) (json.RawMessage, error) { + if f.result != nil { + return f.result, nil + } + return json.RawMessage(`{}`), nil +} + +func (f *fastBridgeClient) CallTool(ctx context.Context, name string, params map[string]interface{}) (*mcp.ToolResult, error) { + return &mcp.ToolResult{}, nil +} + +func (f *fastBridgeClient) Close() {} + +func TestMCPIndexClientPoolRunsConcurrentIndexing(t *testing.T) { + var inFlight atomic.Int64 + var maxFlight atomic.Int64 + + prevFactory := newIndexToolClient + newIndexToolClient = func(ctx context.Context, binPath string) (indexToolClient, error) { + return &fakeIndexToolClient{ + inFlight: &inFlight, + maxFlight: &maxFlight, + delay: 20 * time.Millisecond, + }, nil + } + defer func() { newIndexToolClient = prevFactory }() + + pool, err := newMCPIndexClientPool(context.Background(), "/tmp/cbm", 3, 0) + if err != nil { + t.Fatalf("newMCPIndexClientPool: %v", err) + } + defer pool.Close() + + errCh := make(chan error, 6) + for i := 0; i < 6; i++ { + go func() { + errCh <- pool.IndexRepository(context.Background(), "/tmp/repo", "moderate") + }() + } + for i := 0; i < 6; i++ { + if err := <-errCh; err != nil { + t.Fatalf("IndexRepository: %v", err) + } + } + + if got := maxFlight.Load(); got < 2 { + t.Fatalf("max concurrent workers: want >= 2, got %d", got) + } + if got := maxFlight.Load(); got > 3 { + t.Fatalf("max concurrent workers: want <= 3, got %d", got) + } +} + +func TestMCPIndexClientPoolPropagatesToolErrors(t *testing.T) { + prevFactory := newIndexToolClient + newIndexToolClient = func(ctx context.Context, binPath string) (indexToolClient, error) { + return &fakeIndexToolClient{ + inFlight: &atomic.Int64{}, + maxFlight: &atomic.Int64{}, + result: &mcp.ToolResult{ + IsError: true, + Content: []mcp.Content{{Type: "text", Text: "bad repo"}}, + }, + }, nil + } + defer func() { newIndexToolClient = prevFactory }() + + pool, err := newMCPIndexClientPool(context.Background(), "/tmp/cbm", 1, 0) + if err != nil { + t.Fatalf("newMCPIndexClientPool: %v", err) + } + defer pool.Close() + + err = pool.IndexRepository(context.Background(), "/tmp/repo", "full") + if err == nil { + t.Fatal("expected tool error") + } + if got := err.Error(); got != "index_repository: bad repo" { + t.Fatalf("unexpected error: %s", got) + } +} + +func TestMCPToolClientPoolReplacesTimedOutClient(t *testing.T) { + blocking := newBlockingToolClient() + replacement := &fastToolClient{ + result: &mcp.ToolResult{Content: []mcp.Content{{Type: "text", Text: "ok"}}}, + } + + var factoryCalls atomic.Int64 + prevFactory := newIndexToolClient + newIndexToolClient = func(ctx context.Context, binPath string) (indexToolClient, error) { + switch factoryCalls.Add(1) { + case 1: + return blocking, nil + case 2: + return replacement, nil + default: + return &fastToolClient{ + result: &mcp.ToolResult{Content: []mcp.Content{{Type: "text", Text: "ok"}}}, + }, nil + } + } + defer func() { newIndexToolClient = prevFactory }() + + pool, err := newMCPToolClientPool(context.Background(), "/tmp/cbm", 1, 0) + if err != nil { + t.Fatalf("newMCPToolClientPool: %v", err) + } + defer pool.Close() + + select { + case <-blocking.started: + default: + } + + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Millisecond) + defer cancel() + start := time.Now() + _, err = pool.CallTool(ctx, "search_graph", map[string]interface{}{"project": "demo"}) + if !errors.Is(err, context.DeadlineExceeded) { + t.Fatalf("expected context deadline exceeded, got %v", err) + } + if elapsed := time.Since(start); elapsed > 500*time.Millisecond { + t.Fatalf("timed out call returned too slowly: %s", elapsed) + } + + result, err := pool.CallTool(context.Background(), "search_graph", map[string]interface{}{"project": "demo"}) + if err != nil { + t.Fatalf("replacement client call failed: %v", err) + } + if len(result.Content) != 1 || result.Content[0].Text != "ok" { + t.Fatalf("unexpected replacement result: %+v", result) + } + if got := factoryCalls.Load(); got < 2 { + t.Fatalf("expected replacement factory call, got %d", got) + } +} + +func TestMCPToolClientPoolReplacesErroredClient(t *testing.T) { + failing := &failingToolClient{err: errors.New("write |1: broken pipe")} + replacement := &fastToolClient{ + result: &mcp.ToolResult{Content: []mcp.Content{{Type: "text", Text: "ok"}}}, + } + + var factoryCalls atomic.Int64 + prevFactory := newIndexToolClient + newIndexToolClient = func(ctx context.Context, binPath string) (indexToolClient, error) { + switch factoryCalls.Add(1) { + case 1: + return failing, nil + case 2: + return replacement, nil + default: + return &fastToolClient{ + result: &mcp.ToolResult{Content: []mcp.Content{{Type: "text", Text: "ok"}}}, + }, nil + } + } + defer func() { newIndexToolClient = prevFactory }() + + pool, err := newMCPToolClientPool(context.Background(), "/tmp/cbm", 1, 0) + if err != nil { + t.Fatalf("newMCPToolClientPool: %v", err) + } + defer pool.Close() + + _, err = pool.CallTool(context.Background(), "index_repository", map[string]interface{}{"repo_path": "/tmp/repo"}) + if err == nil || !strings.Contains(err.Error(), "broken pipe") { + t.Fatalf("expected broken pipe error, got %v", err) + } + + result, err := pool.CallTool(context.Background(), "index_repository", map[string]interface{}{"repo_path": "/tmp/repo"}) + if err != nil { + t.Fatalf("replacement client call failed: %v", err) + } + if len(result.Content) != 1 || result.Content[0].Text != "ok" { + t.Fatalf("unexpected replacement result: %+v", result) + } + if got := factoryCalls.Load(); got < 2 { + t.Fatalf("expected replacement factory call, got %d", got) + } +} + +func TestMCPToolClientPoolRecyclesClientAfterMaxUses(t *testing.T) { + var factoryCalls atomic.Int64 + prevFactory := newIndexToolClient + newIndexToolClient = func(ctx context.Context, binPath string) (indexToolClient, error) { + switch factoryCalls.Add(1) { + case 1: + return &fastToolClient{ + result: &mcp.ToolResult{Content: []mcp.Content{{Type: "text", Text: "first"}}}, + }, nil + default: + return &fastToolClient{ + result: &mcp.ToolResult{Content: []mcp.Content{{Type: "text", Text: "second"}}}, + }, nil + } + } + defer func() { newIndexToolClient = prevFactory }() + + pool, err := newMCPToolClientPool(context.Background(), "/tmp/cbm", 1, 1) + if err != nil { + t.Fatalf("newMCPToolClientPool: %v", err) + } + defer pool.Close() + + first, err := pool.CallTool(context.Background(), "index_repository", map[string]interface{}{"repo_path": "/tmp/repo"}) + if err != nil { + t.Fatalf("first CallTool: %v", err) + } + if len(first.Content) != 1 || first.Content[0].Text != "first" { + t.Fatalf("unexpected first result: %+v", first) + } + + second, err := pool.CallTool(context.Background(), "index_repository", map[string]interface{}{"repo_path": "/tmp/repo"}) + if err != nil { + t.Fatalf("second CallTool: %v", err) + } + if len(second.Content) != 1 || second.Content[0].Text != "second" { + t.Fatalf("unexpected second result: %+v", second) + } + if got := factoryCalls.Load(); got < 2 { + t.Fatalf("expected recycled client, factory calls=%d", got) + } +} + +func TestProjectNameFromPath(t *testing.T) { + cases := map[string]string{ + "/tmp/fleet-cache/platform-backend": "tmp-fleet-cache-platform-backend", + "/tmp//fleet-cache//platform-backend/": "tmp-fleet-cache-platform-backend", + "C:/tmp/fleet-cache/platform-backend": "C-tmp-fleet-cache-platform-backend", + "": "root", + "/": "root", + } + + for input, want := range cases { + if got := projectNameFromPath(input); got != want { + t.Fatalf("projectNameFromPath(%q): want %q, got %q", input, want, got) + } + } +} + +func TestMCPBridgeClientPoolReturnsBusyWhenAcquireTimesOut(t *testing.T) { + blocking := newBlockingBridgeClient() + + prevFactory := newBridgePoolClient + newBridgePoolClient = func(ctx context.Context, binPath string) (bridgePoolClient, error) { + return blocking, nil + } + defer func() { newBridgePoolClient = prevFactory }() + + pool, err := newMCPBridgeClientPool(context.Background(), "/tmp/cbm", 1, 10*time.Millisecond) + if err != nil { + t.Fatalf("newMCPBridgeClientPool: %v", err) + } + defer pool.Close() + + firstCtx, firstCancel := context.WithCancel(context.Background()) + defer firstCancel() + + errCh := make(chan error, 1) + go func() { + _, callErr := pool.Call(firstCtx, "tools/list", nil) + errCh <- callErr + }() + + select { + case <-blocking.started: + case <-time.After(time.Second): + t.Fatal("first bridge call did not start") + } + + start := time.Now() + _, err = pool.Call(context.Background(), "tools/list", nil) + if !errors.Is(err, bridge.ErrBackendBusy) { + t.Fatalf("expected ErrBackendBusy, got %v", err) + } + if elapsed := time.Since(start); elapsed > 500*time.Millisecond { + t.Fatalf("busy call returned too slowly: %s", elapsed) + } + + firstCancel() + if callErr := <-errCh; !errors.Is(callErr, context.Canceled) { + t.Fatalf("expected first call to be canceled, got %v", callErr) + } +} + +func TestIsGitHubHTTPSAuthError(t *testing.T) { + if !isGitHubHTTPSAuthError("fatal: could not read Username for 'https://github.com': No such device or address") { + t.Fatal("expected GitHub HTTPS auth error to be detected") + } + if isGitHubHTTPSAuthError("fatal: some other git failure") { + t.Fatal("unexpected auth error match") + } +} + +func TestHasWorkingTreeFilesRejectsGitOnlyClone(t *testing.T) { + root := t.TempDir() + if err := os.Mkdir(filepath.Join(root, ".git"), 0o755); err != nil { + t.Fatalf("mkdir .git: %v", err) + } + + ok, err := hasWorkingTreeFiles(root) + if err != nil { + t.Fatalf("hasWorkingTreeFiles: %v", err) + } + if ok { + t.Fatal("expected git-only directory to be rejected") + } +} + +func TestHasWorkingTreeFilesAcceptsCheckedOutFile(t *testing.T) { + root := t.TempDir() + if err := os.Mkdir(filepath.Join(root, ".git"), 0o755); err != nil { + t.Fatalf("mkdir .git: %v", err) + } + if err := os.WriteFile(filepath.Join(root, "package.json"), []byte("{}"), 0o644); err != nil { + t.Fatalf("write package.json: %v", err) + } + + ok, err := hasWorkingTreeFiles(root) + if err != nil { + t.Fatalf("hasWorkingTreeFiles: %v", err) + } + if !ok { + t.Fatal("expected checked out file to be accepted") + } +} diff --git a/ghl/go.mod b/ghl/go.mod new file mode 100644 index 00000000..1469a1f5 --- /dev/null +++ b/ghl/go.mod @@ -0,0 +1,60 @@ +module github.com/GoHighLevel/codebase-memory-mcp/ghl + +go 1.25.0 + +require ( + github.com/go-chi/chi/v5 v5.2.5 + github.com/robfig/cron/v3 v3.0.1 + gopkg.in/yaml.v3 v3.0.1 +) + +require ( + cel.dev/expr v0.25.1 // indirect + cloud.google.com/go v0.123.0 // indirect + cloud.google.com/go/auth v0.20.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect + cloud.google.com/go/compute/metadata v0.9.0 // indirect + cloud.google.com/go/iam v1.7.0 // indirect + cloud.google.com/go/monitoring v1.24.3 // indirect + cloud.google.com/go/storage v1.62.1 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.31.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.55.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 // indirect + github.com/envoyproxy/go-control-plane/envoy v1.36.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.3.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-jose/go-jose/v4 v4.1.4 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/s2a-go v0.1.9 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.14 // indirect + github.com/googleapis/gax-go/v2 v2.21.0 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect + github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.39.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 // indirect + go.opentelemetry.io/otel v1.43.0 // indirect + go.opentelemetry.io/otel/metric v1.43.0 // indirect + go.opentelemetry.io/otel/sdk v1.43.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.43.0 // indirect + go.opentelemetry.io/otel/trace v1.43.0 // indirect + golang.org/x/crypto v0.49.0 // indirect + golang.org/x/net v0.52.0 // indirect + golang.org/x/oauth2 v0.36.0 // indirect + golang.org/x/sync v0.20.0 // indirect + golang.org/x/sys v0.42.0 // indirect + golang.org/x/text v0.35.0 // indirect + golang.org/x/time v0.15.0 // indirect + google.golang.org/api v0.276.0 // indirect + google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 // indirect + google.golang.org/grpc v1.80.0 // indirect + google.golang.org/protobuf v1.36.11 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect +) diff --git a/ghl/go.sum b/ghl/go.sum new file mode 100644 index 00000000..b10ce161 --- /dev/null +++ b/ghl/go.sum @@ -0,0 +1,109 @@ +cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4= +cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4= +cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE= +cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU= +cloud.google.com/go/auth v0.20.0 h1:kXTssoVb4azsVDoUiF8KvxAqrsQcQtB53DcSgta74CA= +cloud.google.com/go/auth v0.20.0/go.mod h1:942/yi/itH1SsmpyrbnTMDgGfdy2BUqIKyd0cyYLc5Q= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= +cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= +cloud.google.com/go/iam v1.7.0 h1:JD3zh0C6LHl16aCn5Akff0+GELdp1+4hmh6ndoFLl8U= +cloud.google.com/go/iam v1.7.0/go.mod h1:tetWZW1PD/m6vcuY2Zj/aU0eCHNPuxedbnbRTyKXvdY= +cloud.google.com/go/monitoring v1.24.3 h1:dde+gMNc0UhPZD1Azu6at2e79bfdztVDS5lvhOdsgaE= +cloud.google.com/go/monitoring v1.24.3/go.mod h1:nYP6W0tm3N9H/bOw8am7t62YTzZY+zUeQ+Bi6+2eonI= +cloud.google.com/go/storage v1.62.1 h1:Os0G3XbUbjZumkpDUf2Y0rLoXJTCF1kU2kWUujKYXD8= +cloud.google.com/go/storage v1.62.1/go.mod h1:cpYz/kRVZ+UQAF1uHeea10/9ewcRbxGoGNKsS9daSXA= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.31.0 h1:DHa2U07rk8syqvCge0QIGMCE1WxGj9njT44GH7zNJLQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.31.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0 h1:UnDZ/zFfG1JhH/DqxIZYU/1CUAlTUScoXD/LcM2Ykk8= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0/go.mod h1:IA1C1U7jO/ENqm/vhi7V9YYpBsp+IMyqNrEN94N7tVc= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.55.0 h1:0s6TxfCu2KHkkZPnBfsQ2y5qia0jl3MMrmBhu3nCOYk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.55.0/go.mod h1:Mf6O40IAyB9zR/1J8nGDDPirZQQPbYJni8Yisy7NTMc= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 h1:6xNmx7iTtyBRev0+D/Tv1FZd4SCg8axKApyNyRsAt/w= +github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI= +github.com/envoyproxy/go-control-plane/envoy v1.36.0 h1:yg/JjO5E7ubRyKX3m07GF3reDNEnfOboJ0QySbH736g= +github.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9OtKeEkQLTb+Lkz0k8v9W0Oxsv98= +github.com/envoyproxy/protoc-gen-validate v1.3.0 h1:TvGH1wof4H33rezVKWSpqKz5NXWg5VPuZ0uONDT6eb4= +github.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/go-chi/chi/v5 v5.2.5 h1:Eg4myHZBjyvJmAFjFvWgrqDTXFyOzjj7YIm3L3mu6Ug= +github.com/go-chi/chi/v5 v5.2.5/go.mod h1:X7Gx4mteadT3eDOMTsXzmI4/rwUpOwBHLpAfupzFJP0= +github.com/go-jose/go-jose/v4 v4.1.4 h1:moDMcTHmvE6Groj34emNPLs/qtYXRVcd6S7NHbHz3kA= +github.com/go-jose/go-jose/v4 v4.1.4/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.14 h1:yh8ncqsbUY4shRD5dA6RlzjJaT4hi3kII+zYw8wmLb8= +github.com/googleapis/enterprise-certificate-proxy v0.3.14/go.mod h1:vqVt9yG9480NtzREnTlmGSBmFrA+bzb0yl0TxoBQXOg= +github.com/googleapis/gax-go/v2 v2.21.0 h1:h45NjjzEO3faG9Lg/cFrBh2PgegVVgzqKzuZl/wMbiI= +github.com/googleapis/gax-go/v2 v2.21.0/go.mod h1:But/NJU6TnZsrLai/xBAQLLz+Hc7fHZJt/hsCz3Fih4= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= +github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/detectors/gcp v1.39.0 h1:kWRNZMsfBHZ+uHjiH4y7Etn2FK26LAGkNFw7RHv1DhE= +go.opentelemetry.io/contrib/detectors/gcp v1.39.0/go.mod h1:t/OGqzHBa5v6RHZwrDBJ2OirWc+4q/w2fTbLZwAKjTk= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0 h1:yI1/OhfEPy7J9eoa6Sj051C7n5dvpj0QX8g4sRchg04= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0/go.mod h1:NoUCKYWK+3ecatC4HjkRktREheMeEtrXoQxrqYFeHSc= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 h1:OyrsyzuttWTSur2qN/Lm0m2a8yqyIjUVBZcxFPuXq2o= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0/go.mod h1:C2NGBr+kAB4bk3xtMXfZ94gqFDtg/GkI7e9zqGh5Beg= +go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I= +go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0= +go.opentelemetry.io/otel/metric v1.43.0 h1:d7638QeInOnuwOONPp4JAOGfbCEpYb+K6DVWvdxGzgM= +go.opentelemetry.io/otel/metric v1.43.0/go.mod h1:RDnPtIxvqlgO8GRW18W6Z/4P462ldprJtfxHxyKd2PY= +go.opentelemetry.io/otel/sdk v1.43.0 h1:pi5mE86i5rTeLXqoF/hhiBtUNcrAGHLKQdhg4h4V9Dg= +go.opentelemetry.io/otel/sdk v1.43.0/go.mod h1:P+IkVU3iWukmiit/Yf9AWvpyRDlUeBaRg6Y+C58QHzg= +go.opentelemetry.io/otel/sdk/metric v1.43.0 h1:S88dyqXjJkuBNLeMcVPRFXpRw2fuwdvfCGLEo89fDkw= +go.opentelemetry.io/otel/sdk/metric v1.43.0/go.mod h1:C/RJtwSEJ5hzTiUz5pXF1kILHStzb9zFlIEe85bhj6A= +go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09nk+3A= +go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0= +golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4= +golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA= +golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0= +golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw= +golang.org/x/oauth2 v0.36.0 h1:peZ/1z27fi9hUOFCAZaHyrpWG5lwe0RJEEEeH0ThlIs= +golang.org/x/oauth2 v0.36.0/go.mod h1:YDBUJMTkDnJS+A4BP4eZBjCqtokkg1hODuPjwiGPO7Q= +golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= +golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= +golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= +golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8= +golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA= +golang.org/x/time v0.15.0 h1:bbrp8t3bGUeFOx08pvsMYRTCVSMk89u4tKbNOZbp88U= +golang.org/x/time v0.15.0/go.mod h1:Y4YMaQmXwGQZoFaVFk4YpCt4FLQMYKZe9oeV/f4MSno= +google.golang.org/api v0.276.0 h1:nVArUtfLEihtW+b0DdcqRGK1xoEm2+ltAihyztq7MKY= +google.golang.org/api v0.276.0/go.mod h1:Fnag/EWUPIcJXuIkP1pjoTgS5vdxlk3eeemL7Do6bvw= +google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7 h1:XzmzkmB14QhVhgnawEVsOn6OFsnpyxNPRY9QV01dNB0= +google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7/go.mod h1:L43LFes82YgSonw6iTXTxXUX1OlULt4AQtkik4ULL/I= +google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 h1:VPWxll4HlMw1Vs/qXtN7BvhZqsS9cdAittCNvVENElA= +google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:7QBABkRtR8z+TEnmXTqIqwJLlzrZKVfAUm7tY3yGv0M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 h1:m8qni9SQFH0tJc1X0vmnpw/0t+AImlSvp30sEupozUg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= +google.golang.org/grpc v1.80.0 h1:Xr6m2WmWZLETvUNvIUmeD5OAagMw3FiKmMlTdViWsHM= +google.golang.org/grpc v1.80.0/go.mod h1:ho/dLnxwi3EDJA4Zghp7k2Ec1+c2jqup0bFkw07bwF4= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/ghl/internal/auth/github.go b/ghl/internal/auth/github.go new file mode 100644 index 00000000..2f4c8de6 --- /dev/null +++ b/ghl/internal/auth/github.go @@ -0,0 +1,199 @@ +package auth + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + "time" +) + +const githubAPIVersion = "2022-11-28" + +// GitHubConfig configures bearer-token validation against GitHub. +type GitHubConfig struct { + BaseURL string + AllowedOrgs []string + HTTPClient *http.Client + CacheTTL time.Duration +} + +// GitHubAuthenticator validates incoming bearer tokens against GitHub APIs. +type GitHubAuthenticator struct { + baseURL string + allowedOrgs []string + client *http.Client + cacheTTL time.Duration + + mu sync.Mutex + cache map[string]cacheEntry +} + +type cacheEntry struct { + expiresAt time.Time + err error +} + +type githubUser struct { + Login string `json:"login"` +} + +type githubMembership struct { + State string `json:"state"` +} + +// NewGitHubAuthenticator constructs a GitHub-backed token authenticator. +func NewGitHubAuthenticator(cfg GitHubConfig) *GitHubAuthenticator { + baseURL := strings.TrimSpace(cfg.BaseURL) + if baseURL == "" { + baseURL = "https://api.github.com" + } + client := cfg.HTTPClient + if client == nil { + client = &http.Client{Timeout: 10 * time.Second} + } + cacheTTL := cfg.CacheTTL + if cacheTTL <= 0 { + cacheTTL = 5 * time.Minute + } + return &GitHubAuthenticator{ + baseURL: strings.TrimRight(baseURL, "/"), + allowedOrgs: append([]string(nil), cfg.AllowedOrgs...), + client: client, + cacheTTL: cacheTTL, + cache: make(map[string]cacheEntry), + } +} + +// Authenticate validates the bearer token against GitHub and optional org membership. +func (a *GitHubAuthenticator) Authenticate(ctx context.Context, bearerToken string) error { + token := strings.TrimSpace(bearerToken) + if token == "" { + return errors.New("missing github token") + } + + cacheKey := hashToken(token) + if err, ok := a.cached(cacheKey); ok { + return err + } + + err := a.authenticateUncached(ctx, token) + if err == nil { + a.store(cacheKey, nil) + } + return err +} + +func (a *GitHubAuthenticator) authenticateUncached(ctx context.Context, token string) error { + user, err := a.fetchUser(ctx, token) + if err != nil { + return err + } + if len(a.allowedOrgs) == 0 { + return nil + } + for _, org := range a.allowedOrgs { + ok, err := a.isActiveOrgMember(ctx, token, org) + if err == nil && ok { + return nil + } + } + return fmt.Errorf("github user %q is not an active member of allowed orgs", user.Login) +} + +func (a *GitHubAuthenticator) fetchUser(ctx context.Context, token string) (*githubUser, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, a.baseURL+"/user", nil) + if err != nil { + return nil, err + } + addGitHubHeaders(req, token) + + resp, err := a.client.Do(req) + if err != nil { + return nil, fmt.Errorf("github /user request failed: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("github /user returned %d", resp.StatusCode) + } + + var user githubUser + if err := json.NewDecoder(resp.Body).Decode(&user); err != nil { + return nil, fmt.Errorf("decode github /user: %w", err) + } + if user.Login == "" { + return nil, errors.New("github /user missing login") + } + return &user, nil +} + +func (a *GitHubAuthenticator) isActiveOrgMember(ctx context.Context, token, org string) (bool, error) { + org = strings.TrimSpace(org) + if org == "" { + return false, nil + } + reqURL := a.baseURL + "/user/memberships/orgs/" + url.PathEscape(org) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, reqURL, nil) + if err != nil { + return false, err + } + addGitHubHeaders(req, token) + + resp, err := a.client.Do(req) + if err != nil { + return false, fmt.Errorf("github org membership request failed: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return false, fmt.Errorf("github org membership returned %d", resp.StatusCode) + } + + var membership githubMembership + if err := json.NewDecoder(resp.Body).Decode(&membership); err != nil { + return false, fmt.Errorf("decode github org membership: %w", err) + } + return strings.EqualFold(membership.State, "active"), nil +} + +func addGitHubHeaders(req *http.Request, token string) { + req.Header.Set("Authorization", "Bearer "+token) + req.Header.Set("Accept", "application/vnd.github+json") + req.Header.Set("X-GitHub-Api-Version", githubAPIVersion) + req.Header.Set("User-Agent", "codebase-memory-mcp-ghl") +} + +func hashToken(token string) string { + sum := sha256.Sum256([]byte(token)) + return hex.EncodeToString(sum[:]) +} + +func (a *GitHubAuthenticator) cached(key string) (error, bool) { + a.mu.Lock() + defer a.mu.Unlock() + entry, ok := a.cache[key] + if !ok { + return nil, false + } + if time.Now().After(entry.expiresAt) { + delete(a.cache, key) + return nil, false + } + return entry.err, true +} + +func (a *GitHubAuthenticator) store(key string, err error) { + a.mu.Lock() + defer a.mu.Unlock() + a.cache[key] = cacheEntry{ + expiresAt: time.Now().Add(a.cacheTTL), + err: err, + } +} diff --git a/ghl/internal/auth/github_test.go b/ghl/internal/auth/github_test.go new file mode 100644 index 00000000..856e9142 --- /dev/null +++ b/ghl/internal/auth/github_test.go @@ -0,0 +1,178 @@ +package auth + +import ( + "context" + "net/http" + "net/http/httptest" + "sync/atomic" + "testing" + "time" +) + +func TestGitHubAuthenticatorAcceptsValidUserToken(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/user": + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(`{"login":"octocat"}`)) + default: + http.NotFound(w, r) + } + })) + defer server.Close() + + auth := NewGitHubAuthenticator(GitHubConfig{ + BaseURL: server.URL, + CacheTTL: time.Minute, + }) + + if err := auth.Authenticate(context.Background(), "ghp-valid"); err != nil { + t.Fatalf("Authenticate: unexpected error: %v", err) + } +} + +func TestGitHubAuthenticatorRejectsUserOutsideAllowedOrg(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/user": + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(`{"login":"octocat"}`)) + case "/user/memberships/orgs/GoHighLevel": + http.Error(w, "not found", http.StatusNotFound) + default: + http.NotFound(w, r) + } + })) + defer server.Close() + + auth := NewGitHubAuthenticator(GitHubConfig{ + BaseURL: server.URL, + AllowedOrgs: []string{"GoHighLevel"}, + CacheTTL: time.Minute, + }) + + if err := auth.Authenticate(context.Background(), "ghp-valid"); err == nil { + t.Fatal("Authenticate: expected org membership error, got nil") + } +} + +func TestGitHubAuthenticatorAcceptsActiveOrgMember(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/user": + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(`{"login":"octocat"}`)) + case "/user/memberships/orgs/GoHighLevel": + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(`{"state":"active"}`)) + default: + http.NotFound(w, r) + } + })) + defer server.Close() + + auth := NewGitHubAuthenticator(GitHubConfig{ + BaseURL: server.URL, + AllowedOrgs: []string{"GoHighLevel"}, + CacheTTL: time.Minute, + }) + + if err := auth.Authenticate(context.Background(), "ghp-valid"); err != nil { + t.Fatalf("Authenticate: unexpected error: %v", err) + } +} + +func TestGitHubAuthenticatorCachesSuccessfulValidation(t *testing.T) { + var userCalls atomic.Int32 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/user": + userCalls.Add(1) + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(`{"login":"octocat"}`)) + default: + http.NotFound(w, r) + } + })) + defer server.Close() + + auth := NewGitHubAuthenticator(GitHubConfig{ + BaseURL: server.URL, + CacheTTL: time.Minute, + }) + + if err := auth.Authenticate(context.Background(), "ghp-valid"); err != nil { + t.Fatalf("Authenticate first: unexpected error: %v", err) + } + if err := auth.Authenticate(context.Background(), "ghp-valid"); err != nil { + t.Fatalf("Authenticate second: unexpected error: %v", err) + } + if got := userCalls.Load(); got != 1 { + t.Fatalf("/user calls: want 1, got %d", got) + } +} + +func TestGitHubAuthenticatorDoesNotCacheTransientFailures(t *testing.T) { + var userCalls atomic.Int32 + var failFirst atomic.Bool + failFirst.Store(true) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/user": + userCalls.Add(1) + if failFirst.CompareAndSwap(true, false) { + http.Error(w, "temporary failure", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(`{"login":"octocat"}`)) + default: + http.NotFound(w, r) + } + })) + defer server.Close() + + auth := NewGitHubAuthenticator(GitHubConfig{ + BaseURL: server.URL, + CacheTTL: time.Minute, + }) + + if err := auth.Authenticate(context.Background(), "ghp-valid"); err == nil { + t.Fatal("Authenticate first: expected transient failure, got nil") + } + if err := auth.Authenticate(context.Background(), "ghp-valid"); err != nil { + t.Fatalf("Authenticate second: unexpected error: %v", err) + } + if got := userCalls.Load(); got != 2 { + t.Fatalf("/user calls: want 2 after transient failure retry, got %d", got) + } +} + +func TestGitHubAuthenticatorAcceptsUserInAnyAllowedOrg(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/user": + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(`{"login":"octocat"}`)) + case "/user/memberships/orgs/OrgOne": + http.Error(w, "not found", http.StatusNotFound) + case "/user/memberships/orgs/OrgTwo": + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(`{"state":"active"}`)) + default: + http.NotFound(w, r) + } + })) + defer server.Close() + + auth := NewGitHubAuthenticator(GitHubConfig{ + BaseURL: server.URL, + AllowedOrgs: []string{"OrgOne", "OrgTwo"}, + CacheTTL: time.Minute, + }) + + if err := auth.Authenticate(context.Background(), "ghp-valid"); err != nil { + t.Fatalf("Authenticate: unexpected error: %v", err) + } +} diff --git a/ghl/internal/bridge/bridge.go b/ghl/internal/bridge/bridge.go new file mode 100644 index 00000000..446062bb --- /dev/null +++ b/ghl/internal/bridge/bridge.go @@ -0,0 +1,177 @@ +// Package bridge exposes the codebase-memory-mcp stdio binary as an HTTP endpoint. +package bridge + +import ( + "context" + "encoding/json" + "errors" + "io" + "net/http" + "strings" +) + +// ErrBackendUnavailable is returned when the underlying MCP binary is not ready. +var ErrBackendUnavailable = errors.New("bridge: backend unavailable") + +// ErrBackendBusy is returned when the backend has no capacity for another request. +var ErrBackendBusy = errors.New("bridge: backend busy") + +// ErrMethodNotFound is returned when the bridge backend does not implement an MCP method. +var ErrMethodNotFound = errors.New("bridge: method not found") + +// Backend is the interface to the underlying MCP binary. +type Backend interface { + // Call forwards a JSON-RPC method + params and returns the raw result or error. + Call(ctx context.Context, method string, params json.RawMessage) (json.RawMessage, error) +} + +// Config configures the HTTP bridge. +type Config struct { + // BearerToken, if non-empty, requires all /mcp requests to carry + // "Authorization: Bearer ". + BearerToken string + // Authenticator, if non-nil, validates bearer tokens dynamically. + // When set, it takes precedence over BearerToken. + Authenticator Authenticator +} + +// Authenticator validates bearer tokens for HTTP requests. +type Authenticator interface { + Authenticate(ctx context.Context, bearerToken string) error +} + +// Handler is an http.Handler that bridges HTTP JSON-RPC requests to the MCP backend. +type Handler struct { + backend Backend + cfg Config +} + +// NewHandler creates a new bridge Handler. +func NewHandler(backend Backend, cfg Config) *Handler { + return &Handler{backend: backend, cfg: cfg} +} + +// jsonrpcRequest is the inbound envelope. +type jsonrpcRequest struct { + JSONRPC string `json:"jsonrpc"` + ID interface{} `json:"id"` + Method string `json:"method"` + Params json.RawMessage `json:"params,omitempty"` +} + +// ServeHTTP routes requests: +// +// GET /health — liveness check, no auth required +// POST /mcp — Streamable HTTP JSON-RPC, auth required if BearerToken is set +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/health" { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{"status":"ok"}`)) + return + } + + if r.Method == http.MethodGet { + w.Header().Set("Allow", http.MethodPost) + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + + if r.Method != http.MethodPost { + w.Header().Set("Allow", http.MethodPost) + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + + // Auth check + if h.cfg.Authenticator != nil { + auth := r.Header.Get("Authorization") + if !strings.HasPrefix(auth, "Bearer ") { + http.Error(w, "unauthorized", http.StatusUnauthorized) + return + } + if err := h.cfg.Authenticator.Authenticate(r.Context(), strings.TrimPrefix(auth, "Bearer ")); err != nil { + http.Error(w, "unauthorized", http.StatusUnauthorized) + return + } + } else if h.cfg.BearerToken != "" { + auth := r.Header.Get("Authorization") + if !strings.HasPrefix(auth, "Bearer ") || strings.TrimPrefix(auth, "Bearer ") != h.cfg.BearerToken { + http.Error(w, "unauthorized", http.StatusUnauthorized) + return + } + } + + body, err := io.ReadAll(io.LimitReader(r.Body, 4<<20)) // 4 MB cap + if err != nil { + http.Error(w, "failed to read body", http.StatusBadRequest) + return + } + + var req jsonrpcRequest + if err := json.Unmarshal(body, &req); err != nil { + http.Error(w, "invalid JSON", http.StatusBadRequest) + return + } + + if req.JSONRPC != "" && req.JSONRPC != "2.0" { + w.Header().Set("Content-Type", "application/json") + writeError(w, req.ID, -32600, "invalid request: jsonrpc must be 2.0") + return + } + + // MCP notifications do not expect a JSON-RPC response body. + if req.ID == nil && strings.HasPrefix(req.Method, "notifications/") { + w.WriteHeader(http.StatusAccepted) + return + } + + result, backendErr := h.backend.Call(r.Context(), req.Method, req.Params) + if backendErr != nil { + switch { + case errors.Is(backendErr, context.Canceled): + return + case errors.Is(backendErr, context.DeadlineExceeded): + http.Error(w, "backend timed out", http.StatusGatewayTimeout) + return + case errors.Is(backendErr, ErrBackendBusy): + w.Header().Set("Retry-After", "1") + http.Error(w, "backend overloaded, retry later", http.StatusServiceUnavailable) + return + case errors.Is(backendErr, ErrMethodNotFound): + w.Header().Set("Content-Type", "application/json") + writeError(w, req.ID, -32601, backendErr.Error()) + default: + w.Header().Set("Content-Type", "application/json") + writeError(w, req.ID, -32603, "backend error: "+backendErr.Error()) + } + return + } + + w.Header().Set("Content-Type", "application/json") + + resp := struct { + JSONRPC string `json:"jsonrpc"` + ID interface{} `json:"id"` + Result json.RawMessage `json:"result"` + }{ + JSONRPC: "2.0", + ID: req.ID, + Result: result, + } + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(resp) +} + +func writeError(w http.ResponseWriter, id interface{}, code int, message string) { + resp := map[string]interface{}{ + "jsonrpc": "2.0", + "id": id, + "error": map[string]interface{}{ + "code": code, + "message": message, + }, + } + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(resp) +} diff --git a/ghl/internal/bridge/bridge_test.go b/ghl/internal/bridge/bridge_test.go new file mode 100644 index 00000000..867fec17 --- /dev/null +++ b/ghl/internal/bridge/bridge_test.go @@ -0,0 +1,317 @@ +package bridge_test + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/GoHighLevel/codebase-memory-mcp/ghl/internal/bridge" +) + +// ── Fake MCP backend ────────────────────────────────────────── + +type fakeBackend struct { + response json.RawMessage + err error + method string + params json.RawMessage + calls int + ctx context.Context +} + +func (f *fakeBackend) Call(ctx context.Context, method string, params json.RawMessage) (json.RawMessage, error) { + f.ctx = ctx + f.method = method + f.params = append(json.RawMessage(nil), params...) + f.calls++ + return f.response, f.err +} + +// ── Helpers ──────────────────────────────────────────────────── + +func mcpRequest(t *testing.T, id interface{}, method string, params interface{}) []byte { + t.Helper() + p, _ := json.Marshal(params) + req := map[string]interface{}{ + "jsonrpc": "2.0", + "id": id, + "method": method, + "params": json.RawMessage(p), + } + b, _ := json.Marshal(req) + return b +} + +type fakeAuthenticator struct { + token string + calls int +} + +func (f *fakeAuthenticator) Authenticate(_ context.Context, bearerToken string) error { + f.calls++ + if bearerToken != f.token { + return bridge.ErrBackendUnavailable + } + return nil +} + +// ── Tests ────────────────────────────────────────────────────── + +func TestBridge_ForwardsToolCall(t *testing.T) { + expected := json.RawMessage(`{"content":[{"type":"text","text":"ok"}],"isError":false}`) + backend := &fakeBackend{response: expected} + h := bridge.NewHandler(backend, bridge.Config{}) + + body := mcpRequest(t, 1, "tools/call", map[string]interface{}{ + "name": "list_projects", + "arguments": map[string]interface{}{}, + }) + + req := httptest.NewRequest(http.MethodPost, "/mcp", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Errorf("status: want 200, got %d\nbody: %s", rr.Code, rr.Body.String()) + } + + var resp map[string]interface{} + if err := json.Unmarshal(rr.Body.Bytes(), &resp); err != nil { + t.Fatalf("parse response: %v\nbody: %s", err, rr.Body.String()) + } + if resp["jsonrpc"] != "2.0" { + t.Errorf("jsonrpc: want 2.0, got %v", resp["jsonrpc"]) + } + if resp["result"] == nil { + t.Error("result: want non-nil") + } + if backend.method != "tools/call" { + t.Errorf("method: want tools/call, got %q", backend.method) + } + if backend.ctx == nil { + t.Error("backend ctx: expected request context to be forwarded") + } +} + +func TestBridge_ReturnsErrorOnBackendFailure(t *testing.T) { + backend := &fakeBackend{err: bridge.ErrBackendUnavailable} + h := bridge.NewHandler(backend, bridge.Config{}) + + body := mcpRequest(t, 2, "tools/call", map[string]interface{}{"name": "list_projects"}) + req := httptest.NewRequest(http.MethodPost, "/mcp", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + // HTTP level: still 200 (MCP errors are in the JSON body) + if rr.Code != http.StatusOK { + t.Errorf("status: want 200, got %d", rr.Code) + } + + var resp map[string]interface{} + json.Unmarshal(rr.Body.Bytes(), &resp) + if resp["error"] == nil { + t.Error("expected JSON-RPC error field for backend failure") + } +} + +func TestBridge_ReturnsServiceUnavailableWhenBackendBusy(t *testing.T) { + backend := &fakeBackend{err: bridge.ErrBackendBusy} + h := bridge.NewHandler(backend, bridge.Config{}) + + body := mcpRequest(t, 2, "tools/call", map[string]interface{}{"name": "list_projects"}) + req := httptest.NewRequest(http.MethodPost, "/mcp", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + if rr.Code != http.StatusServiceUnavailable { + t.Fatalf("status: want 503, got %d", rr.Code) + } + if got := rr.Header().Get("Retry-After"); got != "1" { + t.Fatalf("Retry-After: want 1, got %q", got) + } +} + +func TestBridge_RequiresAuthToken(t *testing.T) { + backend := &fakeBackend{response: json.RawMessage(`{}`)} + h := bridge.NewHandler(backend, bridge.Config{ + BearerToken: "secret-token", + }) + + body := mcpRequest(t, 3, "tools/call", nil) + + // Request without token + req := httptest.NewRequest(http.MethodPost, "/mcp", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + if rr.Code != http.StatusUnauthorized { + t.Errorf("status: want 401 without token, got %d", rr.Code) + } + + // Request with correct token + req2 := httptest.NewRequest(http.MethodPost, "/mcp", bytes.NewReader(body)) + req2.Header.Set("Content-Type", "application/json") + req2.Header.Set("Authorization", "Bearer secret-token") + rr2 := httptest.NewRecorder() + h.ServeHTTP(rr2, req2) + + if rr2.Code != http.StatusOK { + t.Errorf("status: want 200 with correct token, got %d", rr2.Code) + } +} + +func TestBridge_UsesAuthenticatorWhenConfigured(t *testing.T) { + backend := &fakeBackend{response: json.RawMessage(`{}`)} + auth := &fakeAuthenticator{token: "ghp-valid"} + h := bridge.NewHandler(backend, bridge.Config{ + BearerToken: "legacy-token", + Authenticator: auth, + }) + + body := mcpRequest(t, 4, "tools/call", nil) + + req := httptest.NewRequest(http.MethodPost, "/mcp", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer ghp-valid") + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("status: want 200 with valid authenticator token, got %d", rr.Code) + } + if auth.calls != 1 { + t.Fatalf("auth calls: want 1, got %d", auth.calls) + } +} + +func TestBridge_RejectsInvalidAuthenticatorToken(t *testing.T) { + backend := &fakeBackend{response: json.RawMessage(`{}`)} + auth := &fakeAuthenticator{token: "ghp-valid"} + h := bridge.NewHandler(backend, bridge.Config{ + Authenticator: auth, + }) + + body := mcpRequest(t, 5, "tools/call", nil) + + req := httptest.NewRequest(http.MethodPost, "/mcp", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer ghp-invalid") + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + if rr.Code != http.StatusUnauthorized { + t.Fatalf("status: want 401 with invalid authenticator token, got %d", rr.Code) + } + if auth.calls != 1 { + t.Fatalf("auth calls: want 1, got %d", auth.calls) + } +} + +func TestBridge_InvalidJSON_BadRequest(t *testing.T) { + backend := &fakeBackend{response: json.RawMessage(`{}`)} + h := bridge.NewHandler(backend, bridge.Config{}) + + req := httptest.NewRequest(http.MethodPost, "/mcp", bytes.NewReader([]byte("not json {"))) + req.Header.Set("Content-Type", "application/json") + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + if rr.Code != http.StatusBadRequest { + t.Errorf("status: want 400 for invalid JSON, got %d", rr.Code) + } +} + +func TestBridge_MethodNotAllowed(t *testing.T) { + backend := &fakeBackend{response: json.RawMessage(`{}`)} + h := bridge.NewHandler(backend, bridge.Config{}) + + req := httptest.NewRequest(http.MethodGet, "/mcp", nil) + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + if rr.Code != http.StatusMethodNotAllowed { + t.Errorf("status: want 405 for GET, got %d", rr.Code) + } + if got := rr.Header().Get("Allow"); got != http.MethodPost { + t.Errorf("Allow: want POST, got %q", got) + } +} + +func TestBridge_HealthEndpoint(t *testing.T) { + backend := &fakeBackend{response: json.RawMessage(`{}`)} + h := bridge.NewHandler(backend, bridge.Config{}) + + req := httptest.NewRequest(http.MethodGet, "/health", nil) + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Errorf("status: want 200 for /health, got %d", rr.Code) + } +} + +func TestBridge_PreservesRequestID(t *testing.T) { + backend := &fakeBackend{response: json.RawMessage(`{"content":[],"isError":false}`)} + h := bridge.NewHandler(backend, bridge.Config{}) + + body := mcpRequest(t, "req-42", "tools/call", map[string]interface{}{"name": "list_projects"}) + req := httptest.NewRequest(http.MethodPost, "/mcp", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + var resp map[string]interface{} + json.Unmarshal(rr.Body.Bytes(), &resp) + if resp["id"] != "req-42" { + t.Errorf("id: want req-42, got %v", resp["id"]) + } +} + +func TestBridge_NotificationAcceptedWithoutResponse(t *testing.T) { + backend := &fakeBackend{response: json.RawMessage(`{}`)} + h := bridge.NewHandler(backend, bridge.Config{}) + + body := []byte(`{"jsonrpc":"2.0","method":"notifications/initialized"}`) + req := httptest.NewRequest(http.MethodPost, "/mcp", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + if rr.Code != http.StatusAccepted { + t.Errorf("status: want 202 for notification, got %d", rr.Code) + } + if rr.Body.Len() != 0 { + t.Errorf("body: want empty notification response, got %q", rr.Body.String()) + } + if backend.calls != 0 { + t.Errorf("backend calls: want 0, got %d", backend.calls) + } +} + +func TestBridge_ReturnsMethodNotFound(t *testing.T) { + backend := &fakeBackend{err: bridge.ErrMethodNotFound} + h := bridge.NewHandler(backend, bridge.Config{}) + + body := mcpRequest(t, 9, "unknown/method", nil) + req := httptest.NewRequest(http.MethodPost, "/mcp", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + var resp map[string]interface{} + if err := json.Unmarshal(rr.Body.Bytes(), &resp); err != nil { + t.Fatalf("parse response: %v", err) + } + + errObj, _ := resp["error"].(map[string]interface{}) + if code := int(errObj["code"].(float64)); code != -32601 { + t.Errorf("error code: want -32601, got %d", code) + } +} diff --git a/ghl/internal/cachepersist/gcs.go b/ghl/internal/cachepersist/gcs.go new file mode 100644 index 00000000..a69b35d4 --- /dev/null +++ b/ghl/internal/cachepersist/gcs.go @@ -0,0 +1,203 @@ +package cachepersist + +import ( + "context" + "fmt" + "io" + "os" + "path" + "path/filepath" + "sort" + "strings" + "time" + + "cloud.google.com/go/storage" + "google.golang.org/api/iterator" +) + +const gcsOperationTimeout = 10 * time.Minute + +// NewGCS creates a syncer that persists SQLite artifacts directly to GCS. +func NewGCS(ctx context.Context, runtimeDir, bucket, prefix string) (*Syncer, error) { + runtimeDir = strings.TrimSpace(runtimeDir) + bucket = strings.TrimSpace(bucket) + if runtimeDir == "" { + return nil, fmt.Errorf("cachepersist: runtime dir is required") + } + if bucket == "" { + return nil, fmt.Errorf("cachepersist: gcs bucket is required") + } + if err := os.MkdirAll(runtimeDir, 0o750); err != nil { + return nil, fmt.Errorf("cachepersist: create runtime dir: %w", err) + } + + client, err := storage.NewClient(ctx) + if err != nil { + return nil, fmt.Errorf("cachepersist: create gcs client: %w", err) + } + + prefix = normalizeGCSPrefix(prefix) + artifactDir := "gs://" + bucket + if prefix != "" { + artifactDir += "/" + prefix + } + + return &Syncer{ + RuntimeDir: runtimeDir, + ArtifactDir: artifactDir, + backend: &gcsBackend{ + client: client, + bucket: bucket, + prefix: prefix, + }, + }, nil +} + +type gcsBackend struct { + client *storage.Client + bucket string + prefix string +} + +func (b *gcsBackend) Hydrate(runtimeDir string) (int, error) { + ctx, cancel := context.WithTimeout(context.Background(), gcsOperationTimeout) + defer cancel() + + files, err := b.listDBObjects(ctx) + if err != nil { + return 0, err + } + + copied := 0 + for _, attrs := range files { + name := path.Base(attrs.Name) + reader, err := b.client.Bucket(b.bucket).Object(attrs.Name).NewReader(ctx) + if err != nil { + return copied, fmt.Errorf("cachepersist: open gcs object %s: %w", attrs.Name, err) + } + err = copyReaderAtomic(reader, filepath.Join(runtimeDir, name), 0o640) + _ = reader.Close() + if err != nil { + return copied, fmt.Errorf("cachepersist: hydrate %s: %w", name, err) + } + copied++ + } + return copied, nil +} + +func (b *gcsBackend) PersistProject(runtimeDir, project string) (int, error) { + project = strings.TrimSpace(project) + if project == "" { + return 0, fmt.Errorf("cachepersist: project is required") + } + + pattern := filepath.Join(runtimeDir, project+".db*") + matches, err := filepath.Glob(pattern) + if err != nil { + return 0, fmt.Errorf("cachepersist: glob project artifacts: %w", err) + } + sort.Strings(matches) + + copied := 0 + for _, src := range matches { + info, err := os.Stat(src) + if err != nil { + if os.IsNotExist(err) { + continue + } + return copied, fmt.Errorf("cachepersist: stat %s: %w", src, err) + } + if info.IsDir() || !isDBArtifact(info.Name()) { + continue + } + + ctx, cancel := context.WithTimeout(context.Background(), gcsOperationTimeout) + if err := b.uploadFile(ctx, src, info.Name()); err != nil { + cancel() + return copied, fmt.Errorf("cachepersist: persist %s: %w", info.Name(), err) + } + cancel() + copied++ + } + return copied, nil +} + +func (b *gcsBackend) CountArtifacts() (int, error) { + ctx, cancel := context.WithTimeout(context.Background(), gcsOperationTimeout) + defer cancel() + + files, err := b.listDBObjects(ctx) + if err != nil { + return 0, err + } + return len(files), nil +} + +func (b *gcsBackend) Close() error { + return b.client.Close() +} + +func (b *gcsBackend) uploadFile(ctx context.Context, srcPath, name string) error { + input, err := os.Open(srcPath) + if err != nil { + return err + } + defer input.Close() + + writer := b.client.Bucket(b.bucket).Object(b.objectName(name)).NewWriter(ctx) + writer.ContentType = "application/octet-stream" + if _, err := io.Copy(writer, input); err != nil { + _ = writer.Close() + return err + } + if err := writer.Close(); err != nil { + return err + } + return nil +} + +func (b *gcsBackend) listDBObjects(ctx context.Context) ([]*storage.ObjectAttrs, error) { + query := &storage.Query{Prefix: b.listPrefix()} + iter := b.client.Bucket(b.bucket).Objects(ctx, query) + + files := make([]*storage.ObjectAttrs, 0) + for { + attrs, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, fmt.Errorf("cachepersist: list gcs objects: %w", err) + } + if attrs == nil || strings.HasSuffix(attrs.Name, "/") { + continue + } + if !isDBArtifact(path.Base(attrs.Name)) { + continue + } + files = append(files, attrs) + } + + sort.Slice(files, func(i, j int) bool { + return files[i].Name < files[j].Name + }) + return files, nil +} + +func (b *gcsBackend) listPrefix() string { + if b.prefix == "" { + return "" + } + return b.prefix + "/" +} + +func (b *gcsBackend) objectName(name string) string { + if b.prefix == "" { + return name + } + return b.prefix + "/" + name +} + +func normalizeGCSPrefix(prefix string) string { + return strings.Trim(strings.TrimSpace(prefix), "/") +} diff --git a/ghl/internal/cachepersist/sync.go b/ghl/internal/cachepersist/sync.go new file mode 100644 index 00000000..b3155abc --- /dev/null +++ b/ghl/internal/cachepersist/sync.go @@ -0,0 +1,210 @@ +package cachepersist + +import ( + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" +) + +type backend interface { + Hydrate(runtimeDir string) (int, error) + PersistProject(runtimeDir, project string) (int, error) + CountArtifacts() (int, error) + Close() error +} + +// Syncer keeps runtime SQLite indexes on local disk while persisting copies in +// a durable artifact directory. +type Syncer struct { + RuntimeDir string + ArtifactDir string + backend backend +} + +// New validates and prepares a cache syncer. +func New(runtimeDir, artifactDir string) (*Syncer, error) { + runtimeDir = strings.TrimSpace(runtimeDir) + artifactDir = strings.TrimSpace(artifactDir) + if runtimeDir == "" { + return nil, fmt.Errorf("cachepersist: runtime dir is required") + } + if err := os.MkdirAll(runtimeDir, 0o750); err != nil { + return nil, fmt.Errorf("cachepersist: create runtime dir: %w", err) + } + artifactDir = strings.TrimSpace(artifactDir) + if artifactDir == "" { + return nil, fmt.Errorf("cachepersist: artifact dir is required") + } + if err := os.MkdirAll(artifactDir, 0o750); err != nil { + return nil, fmt.Errorf("cachepersist: create artifact dir: %w", err) + } + return &Syncer{ + RuntimeDir: runtimeDir, + ArtifactDir: artifactDir, + backend: &fsBackend{artifactDir: artifactDir}, + }, nil +} + +// Hydrate restores persisted index artifacts into the local runtime cache. +func (s *Syncer) Hydrate() (int, error) { + if s == nil || s.backend == nil { + return 0, nil + } + return s.backend.Hydrate(s.RuntimeDir) +} + +// PersistProject persists one project's SQLite files into the artifact dir. +func (s *Syncer) PersistProject(project string) (int, error) { + if s == nil || s.backend == nil { + return 0, nil + } + return s.backend.PersistProject(s.RuntimeDir, project) +} + +// CountArtifacts returns the number of persisted DB artifact files. +func (s *Syncer) CountArtifacts() (int, error) { + if s == nil || s.backend == nil { + return 0, nil + } + return s.backend.CountArtifacts() +} + +// Close releases any resources held by the syncer backend. +func (s *Syncer) Close() error { + if s == nil || s.backend == nil { + return nil + } + return s.backend.Close() +} + +func listDBArtifacts(dir string) ([]string, error) { + entries, err := os.ReadDir(dir) + if err != nil { + return nil, fmt.Errorf("cachepersist: read dir %s: %w", dir, err) + } + files := make([]string, 0, len(entries)) + for _, entry := range entries { + if entry.IsDir() || !isDBArtifact(entry.Name()) { + continue + } + files = append(files, entry.Name()) + } + sort.Strings(files) + return files, nil +} + +func isDBArtifact(name string) bool { + return strings.HasSuffix(name, ".db") +} + +type fsBackend struct { + artifactDir string +} + +func (b *fsBackend) Hydrate(runtimeDir string) (int, error) { + files, err := listDBArtifacts(b.artifactDir) + if err != nil { + return 0, err + } + copied := 0 + for _, name := range files { + src := filepath.Join(b.artifactDir, name) + dst := filepath.Join(runtimeDir, name) + if err := copyFileAtomic(src, dst); err != nil { + return copied, fmt.Errorf("cachepersist: hydrate %s: %w", name, err) + } + copied++ + } + return copied, nil +} + +func (b *fsBackend) PersistProject(runtimeDir, project string) (int, error) { + project = strings.TrimSpace(project) + if project == "" { + return 0, fmt.Errorf("cachepersist: project is required") + } + pattern := filepath.Join(runtimeDir, project+".db*") + matches, err := filepath.Glob(pattern) + if err != nil { + return 0, fmt.Errorf("cachepersist: glob project artifacts: %w", err) + } + sort.Strings(matches) + copied := 0 + for _, src := range matches { + info, err := os.Stat(src) + if err != nil { + if os.IsNotExist(err) { + continue + } + return copied, fmt.Errorf("cachepersist: stat %s: %w", src, err) + } + if info.IsDir() || !isDBArtifact(info.Name()) { + continue + } + dst := filepath.Join(b.artifactDir, info.Name()) + if err := copyFileAtomic(src, dst); err != nil { + return copied, fmt.Errorf("cachepersist: persist %s: %w", info.Name(), err) + } + copied++ + } + return copied, nil +} + +func (b *fsBackend) CountArtifacts() (int, error) { + files, err := listDBArtifacts(b.artifactDir) + if err != nil { + return 0, err + } + return len(files), nil +} + +func (b *fsBackend) Close() error { + return nil +} + +func copyFileAtomic(src, dst string) error { + input, err := os.Open(src) + if err != nil { + return err + } + defer input.Close() + + info, err := input.Stat() + if err != nil { + return err + } + + return copyReaderAtomic(input, dst, info.Mode()) +} + +func copyReaderAtomic(input io.Reader, dst string, mode os.FileMode) error { + if err := os.MkdirAll(filepath.Dir(dst), 0o750); err != nil { + return err + } + tmp, err := os.CreateTemp(filepath.Dir(dst), ".cachepersist-*") + if err != nil { + return err + } + tmpName := tmp.Name() + defer func() { + _ = tmp.Close() + _ = os.Remove(tmpName) + }() + + if _, err := io.Copy(tmp, input); err != nil { + return err + } + if err := tmp.Chmod(mode); err != nil { + return err + } + if err := tmp.Close(); err != nil { + return err + } + if err := os.Rename(tmpName, dst); err != nil { + return err + } + return nil +} diff --git a/ghl/internal/cachepersist/sync_test.go b/ghl/internal/cachepersist/sync_test.go new file mode 100644 index 00000000..fa9af738 --- /dev/null +++ b/ghl/internal/cachepersist/sync_test.go @@ -0,0 +1,110 @@ +package cachepersist + +import ( + "os" + "path/filepath" + "testing" +) + +func TestHydrateCopiesDBArtifactsOnly(t *testing.T) { + artifactDir := t.TempDir() + runtimeDir := t.TempDir() + + writeFile(t, filepath.Join(artifactDir, "platform-backend.db"), "db") + writeFile(t, filepath.Join(artifactDir, "platform-backend.db-wal"), "wal") + writeFile(t, filepath.Join(artifactDir, "platform-backend.db-shm"), "shm") + writeFile(t, filepath.Join(artifactDir, "README.txt"), "ignore") + + syncer, err := New(runtimeDir, artifactDir) + if err != nil { + t.Fatalf("New: %v", err) + } + + copied, err := syncer.Hydrate() + if err != nil { + t.Fatalf("Hydrate: %v", err) + } + if copied != 1 { + t.Fatalf("copied: want 1, got %d", copied) + } + if _, err := os.Stat(filepath.Join(runtimeDir, "platform-backend.db")); err != nil { + t.Fatalf("runtime db missing: %v", err) + } + if _, err := os.Stat(filepath.Join(runtimeDir, "platform-backend.db-wal")); !os.IsNotExist(err) { + t.Fatalf("unexpected wal copied: %v", err) + } + if _, err := os.Stat(filepath.Join(runtimeDir, "platform-backend.db-shm")); !os.IsNotExist(err) { + t.Fatalf("unexpected shm copied: %v", err) + } + if _, err := os.Stat(filepath.Join(runtimeDir, "README.txt")); !os.IsNotExist(err) { + t.Fatalf("unexpected non-db file copied: %v", err) + } +} + +func TestPersistProjectCopiesMatchingArtifacts(t *testing.T) { + artifactDir := t.TempDir() + runtimeDir := t.TempDir() + + writeFile(t, filepath.Join(runtimeDir, "platform-backend.db"), "db") + writeFile(t, filepath.Join(runtimeDir, "platform-backend.db-wal"), "wal") + writeFile(t, filepath.Join(runtimeDir, "platform-backend.db-shm"), "shm") + writeFile(t, filepath.Join(runtimeDir, "other.db"), "other") + + syncer, err := New(runtimeDir, artifactDir) + if err != nil { + t.Fatalf("New: %v", err) + } + + copied, err := syncer.PersistProject("platform-backend") + if err != nil { + t.Fatalf("PersistProject: %v", err) + } + if copied != 1 { + t.Fatalf("copied: want 1, got %d", copied) + } + if _, err := os.Stat(filepath.Join(artifactDir, "platform-backend.db")); err != nil { + t.Fatalf("artifact db missing: %v", err) + } + if _, err := os.Stat(filepath.Join(artifactDir, "platform-backend.db-wal")); !os.IsNotExist(err) { + t.Fatalf("unexpected wal artifact copied: %v", err) + } + if _, err := os.Stat(filepath.Join(artifactDir, "platform-backend.db-shm")); !os.IsNotExist(err) { + t.Fatalf("unexpected shm artifact copied: %v", err) + } + if _, err := os.Stat(filepath.Join(artifactDir, "other.db")); !os.IsNotExist(err) { + t.Fatalf("unexpected unrelated artifact copied: %v", err) + } +} + +func TestCountArtifacts(t *testing.T) { + artifactDir := t.TempDir() + runtimeDir := t.TempDir() + + writeFile(t, filepath.Join(artifactDir, "a.db"), "a") + writeFile(t, filepath.Join(artifactDir, "a.db-wal"), "wal") + writeFile(t, filepath.Join(artifactDir, "a.db-shm"), "shm") + writeFile(t, filepath.Join(artifactDir, "notes.md"), "ignore") + + syncer, err := New(runtimeDir, artifactDir) + if err != nil { + t.Fatalf("New: %v", err) + } + + count, err := syncer.CountArtifacts() + if err != nil { + t.Fatalf("CountArtifacts: %v", err) + } + if count != 1 { + t.Fatalf("count: want 1, got %d", count) + } +} + +func writeFile(t *testing.T, path, content string) { + t.Helper() + if err := os.MkdirAll(filepath.Dir(path), 0o750); err != nil { + t.Fatalf("mkdir: %v", err) + } + if err := os.WriteFile(path, []byte(content), 0o640); err != nil { + t.Fatalf("write file: %v", err) + } +} diff --git a/ghl/internal/discovery/discovery.go b/ghl/internal/discovery/discovery.go new file mode 100644 index 00000000..3e8b39a3 --- /dev/null +++ b/ghl/internal/discovery/discovery.go @@ -0,0 +1,76 @@ +package discovery + +import ( + "context" +) + +// ToolDefinition describes the wrapper-owned discover_projects MCP tool. +type ToolDefinition struct { + Name string `json:"name"` + Description string `json:"description"` + InputSchema map[string]interface{} `json:"inputSchema"` +} + +// Candidate is a single repo candidate returned by discovery. +type Candidate struct { + Project string `json:"project"` + RepoSlug string `json:"repo_slug"` + Score float64 `json:"score,omitempty"` + Confidence string `json:"confidence,omitempty"` + Reasons []string `json:"reasons,omitempty"` +} + +// Request is the discover_projects tool input. +type Request struct { + Query string `json:"query"` + Limit int `json:"limit,omitempty"` + IncludeGraphConfidence bool `json:"include_graph_confidence,omitempty"` + IncludeSemantic bool `json:"include_semantic,omitempty"` +} + +// Response is the discover_projects tool output. +type Response struct { + Query string `json:"query"` + CrossRepo bool `json:"cross_repo,omitempty"` + PrimaryRepos []Candidate `json:"primary_repos,omitempty"` + RelatedRepos []Candidate `json:"related_repos,omitempty"` +} + +// Service executes wrapper-owned repo discovery. +type Service interface { + Definition() ToolDefinition + DiscoverProjects(ctx context.Context, req Request) (Response, error) +} + +// NewDefinition returns the canonical wrapper tool definition. +func NewDefinition() ToolDefinition { + return ToolDefinition{ + Name: "discover_projects", + Description: "Discover the most likely indexed repos for a task using metadata, code search, and graph evidence.", + InputSchema: map[string]interface{}{ + "type": "object", + "properties": map[string]interface{}{ + "query": map[string]interface{}{ + "type": "string", + "description": "Task or feature description to map to indexed repositories.", + }, + "limit": map[string]interface{}{ + "type": "integer", + "default": 5, + "description": "Maximum number of candidate repositories to return.", + }, + "include_graph_confidence": map[string]interface{}{ + "type": "boolean", + "default": true, + "description": "When true, use graph-level architecture checks to refine confidence for top candidates.", + }, + "include_semantic": map[string]interface{}{ + "type": "boolean", + "default": false, + "description": "When true, optionally use semantic vector hits where available as positive evidence.", + }, + }, + "required": []string{"query"}, + }, + } +} diff --git a/ghl/internal/discovery/discovery_test.go b/ghl/internal/discovery/discovery_test.go new file mode 100644 index 00000000..025d93b3 --- /dev/null +++ b/ghl/internal/discovery/discovery_test.go @@ -0,0 +1,314 @@ +package discovery + +import ( + "context" + "encoding/json" + "testing" + + "github.com/GoHighLevel/codebase-memory-mcp/ghl/internal/manifest" + "github.com/GoHighLevel/codebase-memory-mcp/ghl/internal/mcp" +) + +type fakeToolCaller struct { + tools map[string]func(params map[string]interface{}) *mcp.ToolResult +} + +func (f *fakeToolCaller) CallTool(_ context.Context, name string, params map[string]interface{}) (*mcp.ToolResult, error) { + if fn, ok := f.tools[name]; ok { + return fn(params), nil + } + return &mcp.ToolResult{}, nil +} + +func jsonToolResult(t *testing.T, payload interface{}) *mcp.ToolResult { + t.Helper() + raw, err := json.Marshal(payload) + if err != nil { + t.Fatalf("marshal payload: %v", err) + } + return &mcp.ToolResult{ + Content: []mcp.Content{{Type: "text", Text: string(raw)}}, + } +} + +func TestDiscoverProjectsNormalizesCatalogFromRootPath(t *testing.T) { + svc := NewService(&fakeToolCaller{ + tools: map[string]func(map[string]interface{}) *mcp.ToolResult{ + "list_projects": func(params map[string]interface{}) *mcp.ToolResult { + return jsonToolResult(t, map[string]interface{}{ + "projects": []map[string]interface{}{ + { + "name": "app-fleet-cache-membership-backend", + "root_path": "/app/fleet-cache/membership-backend", + "nodes": 5942, + "edges": 11602, + }, + }, + }) + }, + }, + }, manifest.Manifest{ + Repos: []manifest.Repo{ + {Name: "membership-backend", Team: "revex", Type: "service", Tags: []string{"membership", "checkout"}}, + }, + }, Options{}) + + catalog, err := svc.refreshCatalog(context.Background()) + if err != nil { + t.Fatalf("refreshCatalog: %v", err) + } + if len(catalog) != 1 { + t.Fatalf("catalog size: want 1, got %d", len(catalog)) + } + if catalog[0].RepoSlug != "membership-backend" { + t.Fatalf("repo slug: want membership-backend, got %q", catalog[0].RepoSlug) + } + if catalog[0].Team != "revex" { + t.Fatalf("team: want revex, got %q", catalog[0].Team) + } +} + +func TestDiscoverProjectsRanksByMetadataAndBM25(t *testing.T) { + svc := NewService(&fakeToolCaller{ + tools: map[string]func(map[string]interface{}) *mcp.ToolResult{ + "list_projects": func(params map[string]interface{}) *mcp.ToolResult { + return jsonToolResult(t, map[string]interface{}{ + "projects": []map[string]interface{}{ + { + "name": "app-fleet-cache-membership-backend", + "root_path": "/app/fleet-cache/membership-backend", + "nodes": 5942, + "edges": 11602, + }, + { + "name": "app-fleet-cache-ghl-membership-frontend", + "root_path": "/app/fleet-cache/ghl-membership-frontend", + "nodes": 10287, + "edges": 15213, + }, + }, + }) + }, + "search_graph": func(params map[string]interface{}) *mcp.ToolResult { + project, _ := params["project"].(string) + switch project { + case "app-fleet-cache-membership-backend": + return jsonToolResult(t, map[string]interface{}{ + "total": 4, + "results": []map[string]interface{}{ + {"label": "Function", "name": "acquireCheckoutLock", "rank": -14.0}, + }, + }) + case "app-fleet-cache-ghl-membership-frontend": + return jsonToolResult(t, map[string]interface{}{ + "total": 1, + "results": []map[string]interface{}{ + {"label": "Component", "name": "CheckoutPage", "rank": -2.0}, + }, + }) + default: + return jsonToolResult(t, map[string]interface{}{"total": 0, "results": []map[string]interface{}{}}) + } + }, + "get_architecture": func(params map[string]interface{}) *mcp.ToolResult { + project, _ := params["project"].(string) + if project == "app-fleet-cache-membership-backend" { + return jsonToolResult(t, map[string]interface{}{ + "project": project, + "total_nodes": 5942, + "total_edges": 11602, + "node_labels": []map[string]interface{}{{"label": "Function", "count": 600}}, + "edge_types": []map[string]interface{}{{"type": "CALLS", "count": 1800}}, + }) + } + return jsonToolResult(t, map[string]interface{}{ + "project": project, + "total_nodes": 10287, + "total_edges": 15213, + "node_labels": []map[string]interface{}{{"label": "Component", "count": 420}}, + "edge_types": []map[string]interface{}{{"type": "IMPORTS", "count": 2000}}, + }) + }, + }, + }, manifest.Manifest{ + Repos: []manifest.Repo{ + {Name: "membership-backend", Team: "revex", Type: "service", Tags: []string{"membership", "checkout", "contact"}}, + {Name: "ghl-membership-frontend", Team: "revex", Type: "frontend", Tags: []string{"membership", "checkout"}}, + }, + }, Options{MaxBM25Candidates: 5, MaxGraphCandidates: 3}) + + resp, err := svc.DiscoverProjects(context.Background(), Request{ + Query: "add lock in membership checkout flow for contact purchases", + Limit: 5, + IncludeGraphConfidence: true, + }) + if err != nil { + t.Fatalf("DiscoverProjects: %v", err) + } + if len(resp.PrimaryRepos) == 0 { + t.Fatal("expected at least one primary repo") + } + if got := resp.PrimaryRepos[0].RepoSlug; got != "membership-backend" { + t.Fatalf("top repo: want membership-backend, got %q", got) + } +} + +func TestDiscoverProjectsPenalizesPlaceholderIndexes(t *testing.T) { + svc := NewService(&fakeToolCaller{ + tools: map[string]func(map[string]interface{}) *mcp.ToolResult{ + "list_projects": func(params map[string]interface{}) *mcp.ToolResult { + return jsonToolResult(t, map[string]interface{}{ + "projects": []map[string]interface{}{ + { + "name": "app-fleet-cache-membership-backend", + "root_path": "/app/fleet-cache/membership-backend", + "nodes": 1, + "edges": 0, + }, + { + "name": "app-fleet-cache-ghl-membership-frontend", + "root_path": "/app/fleet-cache/ghl-membership-frontend", + "nodes": 1200, + "edges": 2400, + }, + }, + }) + }, + "search_graph": func(params map[string]interface{}) *mcp.ToolResult { + project, _ := params["project"].(string) + if project == "app-fleet-cache-membership-backend" { + return jsonToolResult(t, map[string]interface{}{ + "total": 3, + "results": []map[string]interface{}{ + {"label": "Function", "name": "fakeMatch", "rank": -12.0}, + }, + }) + } + return jsonToolResult(t, map[string]interface{}{ + "total": 2, + "results": []map[string]interface{}{ + {"label": "Component", "name": "CheckoutPage", "rank": -5.0}, + }, + }) + }, + "get_architecture": func(params map[string]interface{}) *mcp.ToolResult { + project, _ := params["project"].(string) + if project == "app-fleet-cache-membership-backend" { + return jsonToolResult(t, map[string]interface{}{ + "project": project, + "total_nodes": 1, + "total_edges": 0, + }) + } + return jsonToolResult(t, map[string]interface{}{ + "project": project, + "total_nodes": 1200, + "total_edges": 2400, + }) + }, + }, + }, manifest.Manifest{ + Repos: []manifest.Repo{ + {Name: "membership-backend", Team: "revex", Type: "service", Tags: []string{"membership", "checkout"}}, + {Name: "ghl-membership-frontend", Team: "revex", Type: "frontend", Tags: []string{"membership", "checkout"}}, + }, + }, Options{MaxBM25Candidates: 5, MaxGraphCandidates: 3}) + + resp, err := svc.DiscoverProjects(context.Background(), Request{ + Query: "membership checkout", + Limit: 5, + IncludeGraphConfidence: true, + }) + if err != nil { + t.Fatalf("DiscoverProjects: %v", err) + } + if len(resp.PrimaryRepos) == 0 { + t.Fatal("expected at least one primary repo") + } + if got := resp.PrimaryRepos[0].RepoSlug; got != "ghl-membership-frontend" { + t.Fatalf("top repo after placeholder penalty: want ghl-membership-frontend, got %q", got) + } +} + +func TestDiscoverProjectsReturnsCrossRepoCandidates(t *testing.T) { + svc := NewService(&fakeToolCaller{ + tools: map[string]func(map[string]interface{}) *mcp.ToolResult{ + "list_projects": func(params map[string]interface{}) *mcp.ToolResult { + return jsonToolResult(t, map[string]interface{}{ + "projects": []map[string]interface{}{ + { + "name": "app-fleet-cache-membership-backend", + "root_path": "/app/fleet-cache/membership-backend", + "nodes": 5942, + "edges": 11602, + }, + { + "name": "app-fleet-cache-ghl-membership-frontend", + "root_path": "/app/fleet-cache/ghl-membership-frontend", + "nodes": 10287, + "edges": 15213, + }, + }, + }) + }, + "search_graph": func(params map[string]interface{}) *mcp.ToolResult { + project, _ := params["project"].(string) + switch project { + case "app-fleet-cache-membership-backend": + return jsonToolResult(t, map[string]interface{}{ + "total": 3, + "results": []map[string]interface{}{ + {"label": "Function", "name": "checkoutContactLock", "rank": -10.0}, + }, + }) + case "app-fleet-cache-ghl-membership-frontend": + return jsonToolResult(t, map[string]interface{}{ + "total": 3, + "results": []map[string]interface{}{ + {"label": "Component", "name": "CheckoutLockBanner", "rank": -9.0}, + }, + }) + default: + return jsonToolResult(t, map[string]interface{}{"total": 0, "results": []map[string]interface{}{}}) + } + }, + "get_architecture": func(params map[string]interface{}) *mcp.ToolResult { + project, _ := params["project"].(string) + if project == "app-fleet-cache-membership-backend" { + return jsonToolResult(t, map[string]interface{}{ + "project": project, + "total_nodes": 5942, + "total_edges": 11602, + "node_labels": []map[string]interface{}{{"label": "Function", "count": 600}}, + }) + } + return jsonToolResult(t, map[string]interface{}{ + "project": project, + "total_nodes": 10287, + "total_edges": 15213, + "node_labels": []map[string]interface{}{{"label": "Component", "count": 420}}, + }) + }, + }, + }, manifest.Manifest{ + Repos: []manifest.Repo{ + {Name: "membership-backend", Team: "revex", Type: "service", Tags: []string{"membership", "checkout", "contact"}}, + {Name: "ghl-membership-frontend", Team: "revex", Type: "frontend", Tags: []string{"membership", "checkout", "ui"}}, + }, + }, Options{MaxBM25Candidates: 5, MaxGraphCandidates: 3}) + + resp, err := svc.DiscoverProjects(context.Background(), Request{ + Query: "add checkout lock ui and backend validation for membership contact purchases", + Limit: 5, + IncludeGraphConfidence: true, + }) + if err != nil { + t.Fatalf("DiscoverProjects: %v", err) + } + if !resp.CrossRepo { + t.Fatal("expected cross_repo=true") + } + if len(resp.PrimaryRepos)+len(resp.RelatedRepos) < 2 { + t.Fatalf("expected at least two repos, got primary=%d related=%d", len(resp.PrimaryRepos), len(resp.RelatedRepos)) + } +} diff --git a/ghl/internal/discovery/service.go b/ghl/internal/discovery/service.go new file mode 100644 index 00000000..67205afc --- /dev/null +++ b/ghl/internal/discovery/service.go @@ -0,0 +1,586 @@ +package discovery + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "math" + "path/filepath" + "sort" + "strings" + "sync" + "time" + + "github.com/GoHighLevel/codebase-memory-mcp/ghl/internal/manifest" + "github.com/GoHighLevel/codebase-memory-mcp/ghl/internal/mcp" +) + +// ToolCaller is the subset of MCP client behavior discovery needs. +type ToolCaller interface { + CallTool(ctx context.Context, name string, params map[string]interface{}) (*mcp.ToolResult, error) +} + +// Options tunes candidate narrowing and scoring depth. +type Options struct { + MaxBM25Candidates int + MaxGraphCandidates int + RequestTimeout time.Duration +} + +type indexedProject struct { + Name string `json:"name"` + RootPath string `json:"root_path"` + Nodes int `json:"nodes"` + Edges int `json:"edges"` +} + +type listProjectsPayload struct { + Projects []indexedProject `json:"projects"` +} + +type searchGraphPayload struct { + Total int `json:"total"` + Results []searchGraphHit `json:"results"` + SemanticResults []semanticGraphHit `json:"semantic_results"` +} + +type searchGraphHit struct { + Name string `json:"name"` + QualifiedName string `json:"qualified_name"` + Label string `json:"label"` + FilePath string `json:"file_path"` + Rank float64 `json:"rank"` +} + +type semanticGraphHit struct { + Name string `json:"name"` + QualifiedName string `json:"qualified_name"` + Label string `json:"label"` + FilePath string `json:"file_path"` + Score float64 `json:"score"` +} + +type architecturePayload struct { + Project string `json:"project"` + TotalNodes int `json:"total_nodes"` + TotalEdges int `json:"total_edges"` + NodeLabels []labelStat `json:"node_labels"` +} + +type labelStat struct { + Label string `json:"label"` + Count int `json:"count"` +} + +type catalogEntry struct { + Project string + RepoSlug string + RootPath string + Nodes int + Edges int + Team string + Type string + Tags []string +} + +type candidateScore struct { + Candidate + indexed catalogEntry +} + +// Discoverer implements the discovery Service. +type Discoverer struct { + caller ToolCaller + manifest manifest.Manifest + opts Options + + mu sync.RWMutex + catalog []catalogEntry +} + +// NewService constructs a discoverer with sane defaults. +func NewService(caller ToolCaller, m manifest.Manifest, opts Options) *Discoverer { + if opts.MaxBM25Candidates <= 0 { + opts.MaxBM25Candidates = 5 + } + if opts.MaxGraphCandidates <= 0 { + opts.MaxGraphCandidates = 3 + } + if opts.RequestTimeout <= 0 { + opts.RequestTimeout = 5 * time.Second + } + return &Discoverer{ + caller: caller, + manifest: m, + opts: opts, + } +} + +func (d *Discoverer) Definition() ToolDefinition { + return NewDefinition() +} + +// Invalidate clears the in-memory project catalog so the next request refreshes it. +func (d *Discoverer) Invalidate() { + d.mu.Lock() + defer d.mu.Unlock() + d.catalog = nil +} + +func (d *Discoverer) DiscoverProjects(ctx context.Context, req Request) (Response, error) { + if strings.TrimSpace(req.Query) == "" { + return Response{}, errors.New("query is required") + } + if req.Limit <= 0 { + req.Limit = 5 + } + + if _, ok := ctx.Deadline(); !ok { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, d.opts.RequestTimeout) + defer cancel() + } + + catalog, err := d.ensureCatalog(ctx) + if err != nil { + return Response{}, err + } + if len(catalog) == 0 { + return Response{Query: req.Query}, nil + } + + queryTokens := tokenize(req.Query) + candidates := d.initialCandidates(req.Query, queryTokens, catalog) + if len(candidates) == 0 { + return Response{Query: req.Query}, nil + } + + if err := d.applyBM25Scores(ctx, req, queryTokens, candidates); err != nil { + return Response{}, err + } + if req.IncludeGraphConfidence { + if err := d.applyGraphConfidence(ctx, candidates); err != nil { + return Response{}, err + } + } + + sort.SliceStable(candidates, func(i, j int) bool { + if candidates[i].Score == candidates[j].Score { + return candidates[i].RepoSlug < candidates[j].RepoSlug + } + return candidates[i].Score > candidates[j].Score + }) + + resp := Response{Query: req.Query} + topScore := candidates[0].Score + primaryCutoff := math.Max(0.55, topScore-0.12) + for _, cand := range candidates { + cand.Confidence = confidenceFromScore(cand.Score) + if len(resp.PrimaryRepos) == 0 || (cand.Score >= primaryCutoff && len(resp.PrimaryRepos) < min(req.Limit, 3)) { + resp.PrimaryRepos = append(resp.PrimaryRepos, cand.Candidate) + continue + } + if cand.Score >= 0.30 && len(resp.PrimaryRepos)+len(resp.RelatedRepos) < req.Limit { + resp.RelatedRepos = append(resp.RelatedRepos, cand.Candidate) + } + } + resp.CrossRepo = len(resp.PrimaryRepos)+len(resp.RelatedRepos) > 1 + return resp, nil +} + +func (d *Discoverer) ensureCatalog(ctx context.Context) ([]catalogEntry, error) { + d.mu.RLock() + if d.catalog != nil { + cached := append([]catalogEntry(nil), d.catalog...) + d.mu.RUnlock() + return cached, nil + } + d.mu.RUnlock() + return d.refreshCatalog(ctx) +} + +func (d *Discoverer) refreshCatalog(ctx context.Context) ([]catalogEntry, error) { + result, err := d.caller.CallTool(ctx, "list_projects", nil) + if err != nil { + return nil, fmt.Errorf("list_projects: %w", err) + } + + var payload listProjectsPayload + if err := decodeToolPayload(result, &payload); err != nil { + return nil, fmt.Errorf("decode list_projects: %w", err) + } + + manifestByName := make(map[string]manifest.Repo, len(d.manifest.Repos)) + for _, repo := range d.manifest.Repos { + manifestByName[strings.ToLower(repo.Name)] = repo + } + + catalog := make([]catalogEntry, 0, len(payload.Projects)) + for _, project := range payload.Projects { + slug := deriveRepoSlug(project.Name, project.RootPath, manifestByName) + entry := catalogEntry{ + Project: project.Name, + RepoSlug: slug, + RootPath: project.RootPath, + Nodes: project.Nodes, + Edges: project.Edges, + } + if repo, ok := manifestByName[strings.ToLower(slug)]; ok { + entry.Team = repo.Team + entry.Type = repo.Type + entry.Tags = append([]string(nil), repo.Tags...) + } + catalog = append(catalog, entry) + } + + d.mu.Lock() + d.catalog = append([]catalogEntry(nil), catalog...) + d.mu.Unlock() + return catalog, nil +} + +func deriveRepoSlug(projectName, rootPath string, manifestByName map[string]manifest.Repo) string { + if base := strings.TrimSpace(filepath.Base(rootPath)); base != "" && base != "." && base != string(filepath.Separator) { + return base + } + lowerProject := strings.ToLower(projectName) + if _, ok := manifestByName[lowerProject]; ok { + return projectName + } + prefixes := []string{ + "app-fleet-cache-", + "data-fleet-cache-", + "tmp-fleet-cache-", + "fleet-cache-", + } + for _, prefix := range prefixes { + if strings.HasPrefix(lowerProject, prefix) { + return projectName[len(prefix):] + } + } + return projectName +} + +func (d *Discoverer) initialCandidates(query string, queryTokens []string, catalog []catalogEntry) []candidateScore { + candidates := make([]candidateScore, 0, len(catalog)) + for _, entry := range catalog { + score, reasons := metadataScore(query, queryTokens, entry) + candidates = append(candidates, candidateScore{ + Candidate: Candidate{ + Project: entry.Project, + RepoSlug: entry.RepoSlug, + Score: score, + Reasons: reasons, + }, + indexed: entry, + }) + } + + sort.SliceStable(candidates, func(i, j int) bool { + if candidates[i].Score == candidates[j].Score { + return healthScore(candidates[i].indexed) > healthScore(candidates[j].indexed) + } + return candidates[i].Score > candidates[j].Score + }) + + limit := min(len(candidates), d.opts.MaxBM25Candidates) + if limit == 0 { + return nil + } + + selected := append([]candidateScore(nil), candidates[:limit]...) + allZero := true + for _, candidate := range selected { + if candidate.Score > 0 { + allZero = false + break + } + } + if allZero { + sort.SliceStable(candidates, func(i, j int) bool { + return healthScore(candidates[i].indexed) > healthScore(candidates[j].indexed) + }) + selected = append([]candidateScore(nil), candidates[:limit]...) + } + return selected +} + +func metadataScore(query string, queryTokens []string, entry catalogEntry) (float64, []string) { + var score float64 + var reasons []string + + lowerQuery := strings.ToLower(query) + lowerSlug := strings.ToLower(entry.RepoSlug) + if lowerSlug != "" && strings.Contains(lowerQuery, lowerSlug) { + score += 0.35 + reasons = append(reasons, "repo slug appears directly in task") + } + + slugTokens := tokenSet(tokenize(lowerSlug)) + tagTokens := tokenSet(entry.Tags) + for _, token := range queryTokens { + if _, ok := slugTokens[token]; ok { + score += 0.12 + reasons = append(reasons, fmt.Sprintf("name token match: %s", token)) + continue + } + if _, ok := tagTokens[token]; ok { + score += 0.08 + reasons = append(reasons, fmt.Sprintf("tag match: %s", token)) + continue + } + if token == strings.ToLower(entry.Team) || token == strings.ToLower(entry.Type) { + score += 0.04 + reasons = append(reasons, fmt.Sprintf("metadata match: %s", token)) + } + } + + if entry.Nodes > 0 && entry.Edges > 0 { + score += 0.03 + } + if entry.Nodes <= 1 || entry.Edges == 0 { + score -= 0.15 + reasons = append(reasons, "indexed project is shallow") + } + + return clamp(score, 0, 0.75), dedupeStrings(reasons) +} + +func (d *Discoverer) applyBM25Scores(ctx context.Context, req Request, queryTokens []string, candidates []candidateScore) error { + for i := range candidates { + args := map[string]interface{}{ + "project": candidates[i].Project, + "query": req.Query, + "limit": 8, + } + if req.IncludeSemantic { + if semanticKeywords := semanticKeywords(queryTokens); len(semanticKeywords) > 0 { + args["semantic_query"] = semanticKeywords + } + } + + result, err := d.caller.CallTool(ctx, "search_graph", args) + if err != nil { + return fmt.Errorf("search_graph %s: %w", candidates[i].Project, err) + } + + var payload searchGraphPayload + if err := decodeToolPayload(result, &payload); err != nil { + return fmt.Errorf("decode search_graph %s: %w", candidates[i].Project, err) + } + + add, reasons := bm25Score(payload) + candidates[i].Score = clamp(candidates[i].Score+add, 0, 1.0) + candidates[i].Reasons = dedupeStrings(append(candidates[i].Reasons, reasons...)) + + if req.IncludeSemantic { + semAdd, semReasons := semanticScore(payload) + candidates[i].Score = clamp(candidates[i].Score+semAdd, 0, 1.0) + candidates[i].Reasons = dedupeStrings(append(candidates[i].Reasons, semReasons...)) + } + } + return nil +} + +func bm25Score(payload searchGraphPayload) (float64, []string) { + if payload.Total <= 0 || len(payload.Results) == 0 { + return 0, []string{"no BM25 code hits"} + } + + score := math.Min(float64(payload.Total), 8) / 8 * 0.30 + best := payload.Results[0] + score += labelWeight(best.Label) + + reasons := []string{ + fmt.Sprintf("BM25 hit count: %d", payload.Total), + fmt.Sprintf("top hit label: %s", best.Label), + } + return clamp(score, 0, 0.50), reasons +} + +func semanticScore(payload searchGraphPayload) (float64, []string) { + if len(payload.SemanticResults) == 0 { + return 0, nil + } + + best := payload.SemanticResults[0].Score + score := clamp(best*0.08, 0, 0.08) + reasons := []string{fmt.Sprintf("semantic hits: %d", len(payload.SemanticResults))} + return score, reasons +} + +func (d *Discoverer) applyGraphConfidence(ctx context.Context, candidates []candidateScore) error { + sort.SliceStable(candidates, func(i, j int) bool { return candidates[i].Score > candidates[j].Score }) + + limit := min(len(candidates), d.opts.MaxGraphCandidates) + for i := 0; i < limit; i++ { + result, err := d.caller.CallTool(ctx, "get_architecture", map[string]interface{}{ + "project": candidates[i].Project, + }) + if err != nil { + return fmt.Errorf("get_architecture %s: %w", candidates[i].Project, err) + } + + var payload architecturePayload + if err := decodeToolPayload(result, &payload); err != nil { + return fmt.Errorf("decode get_architecture %s: %w", candidates[i].Project, err) + } + + add, reasons := graphConfidenceScore(payload) + candidates[i].Score = clamp(candidates[i].Score+add, 0, 1.0) + candidates[i].Reasons = dedupeStrings(append(candidates[i].Reasons, reasons...)) + } + return nil +} + +func graphConfidenceScore(payload architecturePayload) (float64, []string) { + if payload.TotalNodes <= 1 || payload.TotalEdges == 0 { + return -0.40, []string{"graph confidence penalty: project-only or placeholder index"} + } + + score := 0.0 + reasons := []string{ + fmt.Sprintf("graph depth: %d nodes / %d edges", payload.TotalNodes, payload.TotalEdges), + } + + if payload.TotalNodes > 100 && payload.TotalEdges > 100 { + score += 0.10 + } + + for _, label := range payload.NodeLabels { + switch label.Label { + case "Function", "Method", "Route", "Class", "Component": + if label.Count > 0 { + score += 0.05 + reasons = append(reasons, fmt.Sprintf("architecture contains %s nodes", label.Label)) + return clamp(score, -0.40, 0.15), dedupeStrings(reasons) + } + } + } + return clamp(score, -0.40, 0.15), dedupeStrings(reasons) +} + +func decodeToolPayload(result *mcp.ToolResult, out interface{}) error { + if result == nil { + return errors.New("missing tool result") + } + if result.IsError { + msg := "tool returned error" + if len(result.Content) > 0 { + msg = result.Content[0].Text + } + return errors.New(msg) + } + for _, item := range result.Content { + if item.Type != "text" || strings.TrimSpace(item.Text) == "" { + continue + } + return json.Unmarshal([]byte(item.Text), out) + } + return errors.New("missing JSON text content") +} + +func tokenize(input string) []string { + replacer := strings.NewReplacer("-", " ", "_", " ", "/", " ", ".", " ", ":", " ") + normalized := strings.ToLower(replacer.Replace(input)) + fields := strings.Fields(normalized) + tokens := make([]string, 0, len(fields)) + for _, field := range fields { + field = strings.TrimSpace(field) + if field == "" { + continue + } + tokens = append(tokens, field) + } + return dedupeStrings(tokens) +} + +func semanticKeywords(tokens []string) []string { + stop := map[string]struct{}{ + "add": {}, "for": {}, "the": {}, "and": {}, "flow": {}, "in": {}, "a": {}, "an": {}, + } + out := make([]string, 0, len(tokens)) + for _, token := range tokens { + if _, ok := stop[token]; ok { + continue + } + out = append(out, token) + if len(out) == 5 { + break + } + } + return out +} + +func tokenSet(tokens []string) map[string]struct{} { + set := make(map[string]struct{}, len(tokens)) + for _, token := range tokens { + token = strings.ToLower(strings.TrimSpace(token)) + if token == "" { + continue + } + set[token] = struct{}{} + } + return set +} + +func labelWeight(label string) float64 { + switch label { + case "Function", "Method": + return 0.15 + case "Route": + return 0.13 + case "Class", "Interface", "Type", "Enum": + return 0.10 + case "Component": + return 0.08 + default: + return 0.03 + } +} + +func healthScore(entry catalogEntry) int { + return entry.Nodes + entry.Edges +} + +func confidenceFromScore(score float64) string { + switch { + case score >= 0.75: + return "high" + case score >= 0.50: + return "medium" + default: + return "low" + } +} + +func dedupeStrings(values []string) []string { + seen := make(map[string]struct{}, len(values)) + out := make([]string, 0, len(values)) + for _, value := range values { + if _, ok := seen[value]; ok { + continue + } + seen[value] = struct{}{} + out = append(out, value) + } + return out +} + +func clamp(value, minValue, maxValue float64) float64 { + if value < minValue { + return minValue + } + if value > maxValue { + return maxValue + } + return value +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/ghl/internal/indexer/indexer.go b/ghl/internal/indexer/indexer.go new file mode 100644 index 00000000..dd06fb98 --- /dev/null +++ b/ghl/internal/indexer/indexer.go @@ -0,0 +1,148 @@ +// Package indexer orchestrates fleet-wide repository cloning and indexing. +package indexer + +import ( + "context" + "fmt" + "path/filepath" + "sync" + + "github.com/GoHighLevel/codebase-memory-mcp/ghl/internal/manifest" +) + +// Client is the interface for calling the codebase-memory-mcp binary. +type Client interface { + IndexRepository(ctx context.Context, repoPath, mode string) error +} + +// Cloner is the interface for ensuring a local clone of a repository exists. +type Cloner interface { + EnsureClone(ctx context.Context, githubURL, localPath string) error +} + +// IndexResult summarises the outcome of an IndexAll call. +type IndexResult struct { + Total int + Succeeded int + Failed int + Errors []RepoError +} + +// RepoError records an indexing failure for a single repo. +type RepoError struct { + RepoSlug string + Err error +} + +// Config configures the Indexer. +type Config struct { + Client Client + Cloner Cloner + CacheDir string // local directory where repos are cloned + Concurrency int // max parallel indexing goroutines (default: 5) + + // Optional callbacks for observability / testing. + OnRepoStart func(repoSlug string) + OnRepoDone func(repoSlug string, err error) + OnClone func(githubURL, localPath string) +} + +// Indexer manages cloning and indexing a fleet of repositories. +type Indexer struct { + cfg Config +} + +// New creates a new Indexer with the given config. +// Concurrency defaults to 5 if <= 0. +func New(cfg Config) *Indexer { + if cfg.Concurrency <= 0 { + cfg.Concurrency = 5 + } + return &Indexer{cfg: cfg} +} + +// IndexAll clones and indexes every repo in the list. +// It respects the configured concurrency limit and continues on per-repo errors. +// If force is true, re-indexes repos even if already up-to-date. +// It returns immediately if ctx is cancelled, but in-flight goroutines may still complete. +func (i *Indexer) IndexAll(ctx context.Context, repos []manifest.Repo, force bool) IndexResult { + result := IndexResult{Total: len(repos)} + if len(repos) == 0 { + return result + } + + type repoErr struct { + slug string + err error + } + + sem := make(chan struct{}, i.cfg.Concurrency) + errs := make(chan repoErr, len(repos)) + var wg sync.WaitGroup + + for _, repo := range repos { + // Check context before dispatching + select { + case <-ctx.Done(): + // Record remaining as failed + result.Failed++ + result.Errors = append(result.Errors, RepoError{RepoSlug: repo.Name, Err: ctx.Err()}) + continue + case sem <- struct{}{}: + } + + wg.Add(1) + go func(r manifest.Repo) { + defer wg.Done() + defer func() { <-sem }() + + if i.cfg.OnRepoStart != nil { + i.cfg.OnRepoStart(r.Name) + } + err := i.IndexRepo(ctx, r, force) + if i.cfg.OnRepoDone != nil { + i.cfg.OnRepoDone(r.Name, err) + } + errs <- repoErr{slug: r.Name, err: err} + }(repo) + } + + wg.Wait() + close(errs) + + for re := range errs { + if re.err != nil { + result.Failed++ + result.Errors = append(result.Errors, RepoError{RepoSlug: re.slug, Err: re.err}) + } else { + result.Succeeded++ + } + } + + return result +} + +// IndexRepo clones (or updates) a single repo and triggers indexing. +func (i *Indexer) IndexRepo(ctx context.Context, repo manifest.Repo, force bool) error { + localPath := filepath.Join(i.cfg.CacheDir, repo.Name) + + if i.cfg.OnClone != nil { + i.cfg.OnClone(repo.GitHubURL, localPath) + } + + // Step 1: Ensure local clone exists + if err := i.cfg.Cloner.EnsureClone(ctx, repo.GitHubURL, localPath); err != nil { + return fmt.Errorf("indexer: clone %q: %w", repo.Name, err) + } + + // Step 2: Index via MCP binary + mode := "moderate" // fast enough for incremental; use "full" for weekly force run + if force { + mode = "full" + } + if err := i.cfg.Client.IndexRepository(ctx, localPath, mode); err != nil { + return fmt.Errorf("indexer: index %q: %w", repo.Name, err) + } + + return nil +} diff --git a/ghl/internal/indexer/indexer_test.go b/ghl/internal/indexer/indexer_test.go new file mode 100644 index 00000000..49852871 --- /dev/null +++ b/ghl/internal/indexer/indexer_test.go @@ -0,0 +1,294 @@ +package indexer_test + +import ( + "context" + "errors" + "sync/atomic" + "testing" + "time" + + "github.com/GoHighLevel/codebase-memory-mcp/ghl/internal/indexer" + "github.com/GoHighLevel/codebase-memory-mcp/ghl/internal/manifest" +) + +// ── Fake MCP client ──────────────────────────────────────────── + +type fakeClient struct { + indexCalls atomic.Int64 + shouldFail bool + callDuration time.Duration +} + +func (f *fakeClient) IndexRepository(ctx context.Context, repoPath, mode string) error { + f.indexCalls.Add(1) + if f.callDuration > 0 { + select { + case <-time.After(f.callDuration): + case <-ctx.Done(): + return ctx.Err() + } + } + if f.shouldFail { + return errors.New("fake index error") + } + return nil +} + +// ── Fake cloner ──────────────────────────────────────────────── + +type fakeCloner struct { + cloneCalls atomic.Int64 + shouldFail bool +} + +func (f *fakeCloner) EnsureClone(ctx context.Context, githubURL, localPath string) error { + f.cloneCalls.Add(1) + if f.shouldFail { + return errors.New("fake clone error") + } + return nil +} + +// ── Tests ────────────────────────────────────────────────────── + +func sampleRepos(n int) []manifest.Repo { + repos := make([]manifest.Repo, n) + for i := range repos { + repos[i] = manifest.Repo{ + Name: "repo-" + string(rune('a'+i)), + GitHubURL: "https://github.com/GoHighLevel/repo-" + string(rune('a'+i)), + Team: "revex", + Type: "backend", + } + } + return repos +} + +func TestIndexer_IndexAll_AllReposIndexed(t *testing.T) { + client := &fakeClient{} + cloner := &fakeCloner{} + repos := sampleRepos(5) + + idx := indexer.New(indexer.Config{ + Client: client, + Cloner: cloner, + CacheDir: t.TempDir(), + Concurrency: 2, + }) + + ctx := context.Background() + result := idx.IndexAll(ctx, repos, false) + + if result.Total != 5 { + t.Errorf("Total: want 5, got %d", result.Total) + } + if result.Succeeded != 5 { + t.Errorf("Succeeded: want 5, got %d", result.Succeeded) + } + if result.Failed != 0 { + t.Errorf("Failed: want 0, got %d", result.Failed) + } + if client.indexCalls.Load() != 5 { + t.Errorf("IndexRepository calls: want 5, got %d", client.indexCalls.Load()) + } + if cloner.cloneCalls.Load() != 5 { + t.Errorf("EnsureClone calls: want 5, got %d", cloner.cloneCalls.Load()) + } +} + +func TestIndexer_IndexAll_ContinuesOnError(t *testing.T) { + client := &fakeClient{shouldFail: true} + cloner := &fakeCloner{} + repos := sampleRepos(3) + + idx := indexer.New(indexer.Config{ + Client: client, + Cloner: cloner, + CacheDir: t.TempDir(), + Concurrency: 1, + }) + + ctx := context.Background() + result := idx.IndexAll(ctx, repos, false) + + // All failed, but all were attempted — must not stop on first error + if result.Total != 3 { + t.Errorf("Total: want 3, got %d", result.Total) + } + if result.Failed != 3 { + t.Errorf("Failed: want 3, got %d", result.Failed) + } + if result.Succeeded != 0 { + t.Errorf("Succeeded: want 0, got %d", result.Succeeded) + } + if len(result.Errors) != 3 { + t.Errorf("Errors: want 3, got %d", len(result.Errors)) + } +} + +func TestIndexer_IndexAll_ConcurrencyLimit(t *testing.T) { + const concurrency = 3 + const totalRepos = 9 + + var inFlight atomic.Int64 + var maxInFlight atomic.Int64 + + client := &fakeClient{callDuration: 20 * time.Millisecond} + cloner := &fakeCloner{} + + // Wrap the client to track in-flight count + idx := indexer.New(indexer.Config{ + Client: client, + Cloner: cloner, + CacheDir: t.TempDir(), + Concurrency: concurrency, + OnRepoStart: func(_ string) { + cur := inFlight.Add(1) + for { + old := maxInFlight.Load() + if cur <= old || maxInFlight.CompareAndSwap(old, cur) { + break + } + } + }, + OnRepoDone: func(_ string, _ error) { + inFlight.Add(-1) + }, + }) + + ctx := context.Background() + idx.IndexAll(ctx, sampleRepos(totalRepos), false) + + if got := maxInFlight.Load(); got > int64(concurrency) { + t.Errorf("max in-flight: want <= %d, got %d (concurrency limit exceeded)", concurrency, got) + } +} + +func TestIndexer_IndexAll_ContextCancellation(t *testing.T) { + client := &fakeClient{callDuration: 500 * time.Millisecond} + cloner := &fakeCloner{} + repos := sampleRepos(10) + + idx := indexer.New(indexer.Config{ + Client: client, + Cloner: cloner, + CacheDir: t.TempDir(), + Concurrency: 2, + }) + + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + + result := idx.IndexAll(ctx, repos, false) + + // With 500ms per repo and 50ms total timeout, we can't finish all 10 + if result.Succeeded == 10 { + t.Error("expected context cancellation to stop indexing before all 10 repos complete") + } +} + +func TestIndexer_IndexRepo_SingleRepo(t *testing.T) { + client := &fakeClient{} + cloner := &fakeCloner{} + + idx := indexer.New(indexer.Config{ + Client: client, + Cloner: cloner, + CacheDir: t.TempDir(), + Concurrency: 1, + }) + + repo := manifest.Repo{ + Name: "membership-backend", + GitHubURL: "https://github.com/GoHighLevel/membership-backend", + } + + ctx := context.Background() + err := idx.IndexRepo(ctx, repo, false) + if err != nil { + t.Errorf("IndexRepo: unexpected error: %v", err) + } + if client.indexCalls.Load() != 1 { + t.Errorf("IndexRepository calls: want 1, got %d", client.indexCalls.Load()) + } +} + +func TestIndexer_IndexRepo_CloneFailure(t *testing.T) { + client := &fakeClient{} + cloner := &fakeCloner{shouldFail: true} + + idx := indexer.New(indexer.Config{ + Client: client, + Cloner: cloner, + CacheDir: t.TempDir(), + Concurrency: 1, + }) + + repo := manifest.Repo{ + Name: "membership-backend", + GitHubURL: "https://github.com/GoHighLevel/membership-backend", + } + + ctx := context.Background() + err := idx.IndexRepo(ctx, repo, false) + if err == nil { + t.Error("IndexRepo: expected error from clone failure, got nil") + } + // Should not have tried to index if clone failed + if client.indexCalls.Load() != 0 { + t.Errorf("IndexRepository: should not be called if clone fails, got %d calls", client.indexCalls.Load()) + } +} + +func TestIndexer_EmptyRepoList(t *testing.T) { + client := &fakeClient{} + cloner := &fakeCloner{} + + idx := indexer.New(indexer.Config{ + Client: client, + Cloner: cloner, + CacheDir: t.TempDir(), + Concurrency: 5, + }) + + ctx := context.Background() + result := idx.IndexAll(ctx, []manifest.Repo{}, false) + + if result.Total != 0 { + t.Errorf("Total: want 0, got %d", result.Total) + } + if result.Succeeded != 0 { + t.Errorf("Succeeded: want 0, got %d", result.Succeeded) + } +} + +func TestIndexer_LocalCachePath(t *testing.T) { + cacheDir := t.TempDir() + var capturedPath string + + client := &fakeClient{} + cloner := &fakeCloner{} + + idx := indexer.New(indexer.Config{ + Client: client, + Cloner: cloner, + CacheDir: cacheDir, + OnClone: func(_, path string) { + capturedPath = path + }, + Concurrency: 1, + }) + + repo := manifest.Repo{ + Name: "membership-backend", + GitHubURL: "https://github.com/GoHighLevel/membership-backend", + } + + ctx := context.Background() + _ = idx.IndexRepo(ctx, repo, false) + + expected := cacheDir + "/membership-backend" + if capturedPath != expected { + t.Errorf("clone path: want %q, got %q", expected, capturedPath) + } +} diff --git a/ghl/internal/manifest/manifest.go b/ghl/internal/manifest/manifest.go new file mode 100644 index 00000000..77389a00 --- /dev/null +++ b/ghl/internal/manifest/manifest.go @@ -0,0 +1,97 @@ +// Package manifest loads and validates the GHL fleet repos manifest (REPOS.yaml). +package manifest + +import ( + "fmt" + "io" + "net/url" + "os" + + "gopkg.in/yaml.v3" +) + +// Repo describes a single GHL GitHub repository to be indexed. +type Repo struct { + Name string `yaml:"name"` + GitHubURL string `yaml:"github_url"` + Team string `yaml:"team"` + Type string `yaml:"type"` // "backend" | "frontend" | "infra" | "other" + Tags []string `yaml:"tags"` +} + +// Validate returns an error if the repo is missing required fields or has invalid values. +func (r Repo) Validate() error { + if r.Name == "" { + return fmt.Errorf("repo: name is required") + } + if r.GitHubURL == "" { + return fmt.Errorf("repo %q: github_url is required", r.Name) + } + u, err := url.ParseRequestURI(r.GitHubURL) + if err != nil || u.Scheme == "" || u.Host == "" { + return fmt.Errorf("repo %q: invalid github_url %q", r.Name, r.GitHubURL) + } + return nil +} + +// Slug returns the last path component of GitHubURL (the repo name on disk). +func (r Repo) Slug() string { + return r.Name +} + +// Manifest is the parsed top-level structure of REPOS.yaml. +type Manifest struct { + Repos []Repo `yaml:"repos"` +} + +// FindByName returns the repo with the given name, or false if not found. +func (m *Manifest) FindByName(name string) (Repo, bool) { + for _, r := range m.Repos { + if r.Name == name { + return r, true + } + } + return Repo{}, false +} + +// FilterByTeam returns all repos belonging to the given team. +func (m *Manifest) FilterByTeam(team string) []Repo { + var out []Repo + for _, r := range m.Repos { + if r.Team == team { + out = append(out, r) + } + } + return out +} + +// Load reads and validates the manifest from a file path. +func Load(path string) (*Manifest, error) { + f, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("manifest: open %q: %w", path, err) + } + defer f.Close() + return LoadReader(f) +} + +// LoadReader reads and validates the manifest from an io.Reader. +func LoadReader(r io.Reader) (*Manifest, error) { + data, err := io.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("manifest: read: %w", err) + } + + var m Manifest + if err := yaml.Unmarshal(data, &m); err != nil { + return nil, fmt.Errorf("manifest: parse YAML: %w", err) + } + + for i, repo := range m.Repos { + if err := repo.Validate(); err != nil { + return nil, fmt.Errorf("manifest: repo[%d]: %w", i, err) + } + } + + return &m, nil +} diff --git a/ghl/internal/manifest/manifest_test.go b/ghl/internal/manifest/manifest_test.go new file mode 100644 index 00000000..d5366c50 --- /dev/null +++ b/ghl/internal/manifest/manifest_test.go @@ -0,0 +1,130 @@ +package manifest_test + +import ( + "strings" + "testing" + + "github.com/GoHighLevel/codebase-memory-mcp/ghl/internal/manifest" +) + +const sampleYAML = ` +repos: + - name: membership-backend + github_url: https://github.com/GoHighLevel/membership-backend + team: revex + type: backend + tags: [membership, billing, subscription] + + - name: ghl-revex-frontend + github_url: https://github.com/GoHighLevel/ghl-revex-frontend + team: revex + type: frontend + tags: [crm, contacts, pipeline] + + - name: platform-backend + github_url: https://github.com/GoHighLevel/platform-backend + team: platform + type: backend + tags: [infrastructure, routing] +` + +func TestLoad_ParsesAllRepos(t *testing.T) { + m, err := manifest.LoadReader(strings.NewReader(sampleYAML)) + if err != nil { + t.Fatalf("LoadReader failed: %v", err) + } + if len(m.Repos) != 3 { + t.Fatalf("want 3 repos, got %d", len(m.Repos)) + } +} + +func TestLoad_RepoFields(t *testing.T) { + m, err := manifest.LoadReader(strings.NewReader(sampleYAML)) + if err != nil { + t.Fatalf("LoadReader failed: %v", err) + } + r := m.Repos[0] + if r.Name != "membership-backend" { + t.Errorf("Name: want membership-backend, got %q", r.Name) + } + if r.GitHubURL != "https://github.com/GoHighLevel/membership-backend" { + t.Errorf("GitHubURL: want ..., got %q", r.GitHubURL) + } + if r.Team != "revex" { + t.Errorf("Team: want revex, got %q", r.Team) + } + if r.Type != "backend" { + t.Errorf("Type: want backend, got %q", r.Type) + } + if len(r.Tags) != 3 { + t.Errorf("Tags: want 3, got %d", len(r.Tags)) + } +} + +func TestLoad_InvalidYAML(t *testing.T) { + _, err := manifest.LoadReader(strings.NewReader("not: valid: yaml: :::")) + if err == nil { + t.Error("want error for invalid YAML, got nil") + } +} + +func TestLoad_EmptyRepos(t *testing.T) { + m, err := manifest.LoadReader(strings.NewReader("repos: []")) + if err != nil { + t.Fatalf("LoadReader failed: %v", err) + } + if len(m.Repos) != 0 { + t.Errorf("want 0 repos, got %d", len(m.Repos)) + } +} + +func TestManifest_FindByName(t *testing.T) { + m, _ := manifest.LoadReader(strings.NewReader(sampleYAML)) + + r, ok := m.FindByName("ghl-revex-frontend") + if !ok { + t.Fatal("FindByName: want found, got not found") + } + if r.Type != "frontend" { + t.Errorf("Type: want frontend, got %q", r.Type) + } + + _, ok = m.FindByName("nonexistent-repo") + if ok { + t.Error("FindByName: want not found for unknown name") + } +} + +func TestManifest_FilterByTeam(t *testing.T) { + m, _ := manifest.LoadReader(strings.NewReader(sampleYAML)) + revex := m.FilterByTeam("revex") + if len(revex) != 2 { + t.Errorf("FilterByTeam(revex): want 2, got %d", len(revex)) + } + platform := m.FilterByTeam("platform") + if len(platform) != 1 { + t.Errorf("FilterByTeam(platform): want 1, got %d", len(platform)) + } +} + +func TestRepo_Validate(t *testing.T) { + valid := manifest.Repo{Name: "foo", GitHubURL: "https://github.com/GoHighLevel/foo"} + if err := valid.Validate(); err != nil { + t.Errorf("Validate: want nil for valid repo, got %v", err) + } + + missingName := manifest.Repo{GitHubURL: "https://github.com/GoHighLevel/foo"} + if err := missingName.Validate(); err == nil { + t.Error("Validate: want error for missing name") + } + + missingURL := manifest.Repo{Name: "foo"} + if err := missingURL.Validate(); err == nil { + t.Error("Validate: want error for missing github_url") + } + + badURL := manifest.Repo{Name: "foo", GitHubURL: "not-a-url"} + if err := badURL.Validate(); err == nil { + t.Error("Validate: want error for invalid github_url") + } +} diff --git a/ghl/internal/mcp/client.go b/ghl/internal/mcp/client.go new file mode 100644 index 00000000..735bd2d2 --- /dev/null +++ b/ghl/internal/mcp/client.go @@ -0,0 +1,276 @@ +// Package mcp provides a JSON-RPC 2.0 MCP client that speaks to the +// codebase-memory-mcp binary over stdin/stdout. +package mcp + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "io" + "os/exec" + "sync" + "sync/atomic" +) + +// ServerInfo holds identifying information returned during initialization. +type ServerInfo struct { + Name string `json:"name"` + Version string `json:"version"` +} + +// Content is a single item returned in a tool result. +type Content struct { + Type string `json:"type"` + Text string `json:"text"` +} + +// ToolResult is the parsed result of a tools/call response. +type ToolResult struct { + Content []Content `json:"content"` + IsError bool `json:"isError"` +} + +// Client manages a single subprocess running codebase-memory-mcp and serializes +// MCP JSON-RPC requests over stdin/stdout. +type Client struct { + cmd *exec.Cmd + stdin io.WriteCloser + reader *bufio.Scanner + mu sync.Mutex + nextID atomic.Int64 + info ServerInfo + closed bool +} + +// jsonrpcRequest is the envelope for outbound MCP calls. +type jsonrpcRequest struct { + JSONRPC string `json:"jsonrpc"` + ID int64 `json:"id"` + Method string `json:"method"` + Params interface{} `json:"params,omitempty"` +} + +// jsonrpcResponse is the envelope for inbound MCP responses. +type jsonrpcResponse struct { + JSONRPC string `json:"jsonrpc"` + ID int64 `json:"id"` + Result json.RawMessage `json:"result,omitempty"` + Error *jsonrpcError `json:"error,omitempty"` +} + +type jsonrpcError struct { + Code int `json:"code"` + Message string `json:"message"` +} + +// initResult is the subset of the initialize response we care about. +type initResult struct { + ServerInfo struct { + Name string `json:"name"` + Version string `json:"version"` + } `json:"serverInfo"` +} + +// toolCallResult is the subset of tools/call response we care about. +type toolCallResult struct { + Content []Content `json:"content"` + IsError bool `json:"isError"` +} + +// NewClient launches the binary at binPath, performs MCP initialization, and +// returns a ready-to-use Client. It blocks until initialization succeeds or ctx +// is cancelled. +func NewClient(ctx context.Context, binPath string) (*Client, error) { + if err := ctx.Err(); err != nil { + return nil, err + } + + // The startup context should bound initialization, not the subprocess lifetime. + // Pool replacement creates clients with short-lived bootstrap contexts. + cmd := exec.Command(binPath) + + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, fmt.Errorf("mcp: stdin pipe: %w", err) + } + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, fmt.Errorf("mcp: stdout pipe: %w", err) + } + + if err := cmd.Start(); err != nil { + return nil, fmt.Errorf("mcp: start binary %q: %w", binPath, err) + } + + c := &Client{ + cmd: cmd, + stdin: stdin, + reader: bufio.NewScanner(stdout), + } + // Increase scanner buffer for large responses (e.g. index_repository results) + c.reader.Buffer(make([]byte, 4*1024*1024), 4*1024*1024) + + if err := c.initialize(ctx); err != nil { + _ = cmd.Process.Kill() + _ = cmd.Wait() + return nil, fmt.Errorf("mcp: initialize: %w", err) + } + + return c, nil +} + +// ServerInfo returns the server name and version reported during initialization. +func (c *Client) ServerInfo() ServerInfo { + return c.info +} + +// Call sends an arbitrary MCP request and returns the raw result payload. +// It is safe to call from multiple goroutines — requests are serialized. +func (c *Client) Call(ctx context.Context, method string, params interface{}) (json.RawMessage, error) { + if err := ctx.Err(); err != nil { + return nil, err + } + return c.roundtrip(ctx, method, params) +} + +// CallTool sends a tools/call request and returns the parsed result. +// It is safe to call from multiple goroutines — requests are serialized. +func (c *Client) CallTool(ctx context.Context, name string, params map[string]interface{}) (*ToolResult, error) { + toolParams := map[string]interface{}{ + "name": name, + } + if params != nil { + toolParams["arguments"] = params + } + + raw, err := c.Call(ctx, "tools/call", toolParams) + if err != nil { + return nil, err + } + + var result toolCallResult + if err := json.Unmarshal(raw, &result); err != nil { + return nil, fmt.Errorf("mcp: parse tools/call result: %w", err) + } + return &ToolResult{Content: result.Content, IsError: result.IsError}, nil +} + +// Close terminates the subprocess. Safe to call multiple times. +func (c *Client) Close() { + c.mu.Lock() + defer c.mu.Unlock() + if c.closed { + return + } + c.closed = true + _ = c.stdin.Close() + if c.cmd.Process != nil { + _ = c.cmd.Process.Kill() + } + _ = c.cmd.Wait() +} + +// ── Internal ─────────────────────────────────────────────────── + +func (c *Client) initialize(ctx context.Context) error { + initParams := map[string]interface{}{ + "protocolVersion": "2025-11-25", + "capabilities": map[string]interface{}{}, + "clientInfo": map[string]interface{}{"name": "ghl-fleet", "version": "1.0.0"}, + } + raw, err := c.roundtrip(ctx, "initialize", initParams) + if err != nil { + return err + } + + var result initResult + if err := json.Unmarshal(raw, &result); err != nil { + return fmt.Errorf("parse initialize result: %w", err) + } + c.info = ServerInfo{ + Name: result.ServerInfo.Name, + Version: result.ServerInfo.Version, + } + + // Send initialized notification (no response expected) + _ = c.send(jsonrpcRequest{ + JSONRPC: "2.0", + Method: "notifications/initialized", + }) + + return nil +} + +// roundtrip sends a request and reads the matching response. +// Requests are serialized via the mutex so only one is in-flight at a time. +func (c *Client) roundtrip(ctx context.Context, method string, params interface{}) (json.RawMessage, error) { + c.mu.Lock() + defer c.mu.Unlock() + + id := c.nextID.Add(1) + req := jsonrpcRequest{ + JSONRPC: "2.0", + ID: id, + Method: method, + Params: params, + } + + if err := c.send(req); err != nil { + return nil, fmt.Errorf("mcp: send %q: %w", method, err) + } + + // Read lines until we get a response with our ID + for { + // Check context before blocking read + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + if !c.reader.Scan() { + if err := c.reader.Err(); err != nil { + return nil, fmt.Errorf("mcp: read: %w", err) + } + return nil, fmt.Errorf("mcp: subprocess closed stdout unexpectedly") + } + + line := c.reader.Text() + if line == "" { + continue + } + + var resp jsonrpcResponse + if err := json.Unmarshal([]byte(line), &resp); err != nil { + // Not valid JSON-RPC — might be a progress notification, skip + continue + } + + // Skip notifications (no ID) + if resp.ID == 0 && resp.JSONRPC == "2.0" { + continue + } + + if resp.ID != id { + // Response for a different request (shouldn't happen with serialization) + continue + } + + if resp.Error != nil { + return nil, fmt.Errorf("mcp: %q error %d: %s", method, resp.Error.Code, resp.Error.Message) + } + + return resp.Result, nil + } +} + +func (c *Client) send(req jsonrpcRequest) error { + b, err := json.Marshal(req) + if err != nil { + return err + } + b = append(b, '\n') + _, err = c.stdin.Write(b) + return err +} diff --git a/ghl/internal/mcp/client_test.go b/ghl/internal/mcp/client_test.go new file mode 100644 index 00000000..ac261389 --- /dev/null +++ b/ghl/internal/mcp/client_test.go @@ -0,0 +1,252 @@ +package mcp_test + +import ( + "context" + "encoding/json" + "os" + "os/exec" + "strings" + "testing" + "time" + + "github.com/GoHighLevel/codebase-memory-mcp/ghl/internal/mcp" +) + +// echoServer is a tiny Go program used as a fake codebase-memory-mcp binary. +// It reads a JSON-RPC request from stdin and echoes a fixed response to stdout. +const echoServerSrc = ` +package main + +import ( + "bufio" + "encoding/json" + "fmt" + "os" +) + +func main() { + scanner := bufio.NewScanner(os.Stdin) + for scanner.Scan() { + line := scanner.Text() + if line == "" { continue } + var req map[string]interface{} + if err := json.Unmarshal([]byte(line), &req); err != nil { continue } + + id := req["id"] + method, _ := req["method"].(string) + + switch method { + case "initialize": + resp := map[string]interface{}{ + "jsonrpc": "2.0", "id": id, + "result": map[string]interface{}{ + "protocolVersion": "2024-11-05", + "capabilities": map[string]interface{}{"tools": map[string]interface{}{}}, + "serverInfo": map[string]interface{}{"name": "codebase-memory-mcp", "version": "0.5.5"}, + }, + } + b, _ := json.Marshal(resp) + fmt.Println(string(b)) + case "tools/call": + params, _ := req["params"].(map[string]interface{}) + toolName, _ := params["name"].(string) + resp := map[string]interface{}{ + "jsonrpc": "2.0", "id": id, + "result": map[string]interface{}{ + "content": []interface{}{ + map[string]interface{}{"type": "text", "text": "ok:" + toolName}, + }, + "isError": false, + }, + } + b, _ := json.Marshal(resp) + fmt.Println(string(b)) + default: + resp := map[string]interface{}{ + "jsonrpc": "2.0", "id": id, + "error": map[string]interface{}{"code": -32601, "message": "method not found"}, + } + b, _ := json.Marshal(resp) + fmt.Println(string(b)) + } + } +} +` + +// buildEchoServer compiles the echo server and returns its path. +func buildEchoServer(t *testing.T) string { + t.Helper() + dir := t.TempDir() + + // Write source + srcPath := dir + "/main.go" + if err := os.WriteFile(srcPath, []byte(echoServerSrc), 0600); err != nil { + t.Fatalf("write echo server src: %v", err) + } + + // Init module + cmd := exec.Command("go", "mod", "init", "echoserver") + cmd.Dir = dir + if out, err := cmd.CombinedOutput(); err != nil { + t.Fatalf("go mod init: %v\n%s", err, out) + } + + // Build + binPath := dir + "/echoserver" + cmd = exec.Command("go", "build", "-o", binPath, ".") + cmd.Dir = dir + if out, err := cmd.CombinedOutput(); err != nil { + t.Fatalf("go build echo server: %v\n%s", err, out) + } + + return binPath +} + +func TestClient_Initialize(t *testing.T) { + bin := buildEchoServer(t) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + c, err := mcp.NewClient(ctx, bin) + if err != nil { + t.Fatalf("NewClient: %v", err) + } + defer c.Close() + + info := c.ServerInfo() + if info.Name != "codebase-memory-mcp" { + t.Errorf("ServerInfo.Name: want codebase-memory-mcp, got %q", info.Name) + } + if info.Version != "0.5.5" { + t.Errorf("ServerInfo.Version: want 0.5.5, got %q", info.Version) + } +} + +func TestClient_CallTool_Success(t *testing.T) { + bin := buildEchoServer(t) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + c, err := mcp.NewClient(ctx, bin) + if err != nil { + t.Fatalf("NewClient: %v", err) + } + defer c.Close() + + result, err := c.CallTool(ctx, "list_projects", nil) + if err != nil { + t.Fatalf("CallTool: %v", err) + } + if len(result.Content) == 0 { + t.Fatal("CallTool: expected content, got empty") + } + text := result.Content[0].Text + if !strings.HasPrefix(text, "ok:") { + t.Errorf("CallTool: unexpected response %q", text) + } +} + +func TestClient_CallTool_IndexRepository(t *testing.T) { + bin := buildEchoServer(t) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + c, err := mcp.NewClient(ctx, bin) + if err != nil { + t.Fatalf("NewClient: %v", err) + } + defer c.Close() + + params := map[string]interface{}{ + "repo_path": "/tmp/test-repo", + "mode": "full", + } + result, err := c.CallTool(ctx, "index_repository", params) + if err != nil { + t.Fatalf("CallTool index_repository: %v", err) + } + if result.IsError { + t.Errorf("CallTool: unexpected error result") + } +} + +func TestClient_CallTool_Timeout(t *testing.T) { + bin := buildEchoServer(t) + // Very short timeout — should cause context deadline exceeded + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond) + defer cancel() + + // Give enough time to start but the tool call will use the expired ctx + startCtx, startCancel := context.WithTimeout(context.Background(), 5*time.Second) + defer startCancel() + + c, err := mcp.NewClient(startCtx, bin) + if err != nil { + t.Fatalf("NewClient: %v", err) + } + defer c.Close() + + // Cancel before calling + cancel() + _, err = c.CallTool(ctx, "list_projects", nil) + if err == nil { + t.Error("CallTool: expected error from cancelled context, got nil") + } +} + +func TestClient_SerializeParams(t *testing.T) { + // Ensure params are correctly serialized to JSON + params := map[string]interface{}{ + "repo_path": "/app/fleet-cache/membership-backend", + "mode": "moderate", + } + b, err := json.Marshal(params) + if err != nil { + t.Fatalf("marshal params: %v", err) + } + var roundtrip map[string]interface{} + if err := json.Unmarshal(b, &roundtrip); err != nil { + t.Fatalf("unmarshal: %v", err) + } + if roundtrip["mode"] != "moderate" { + t.Errorf("mode: want moderate, got %v", roundtrip["mode"]) + } +} + +func TestClient_Close_Idempotent(t *testing.T) { + bin := buildEchoServer(t) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + c, err := mcp.NewClient(ctx, bin) + if err != nil { + t.Fatalf("NewClient: %v", err) + } + c.Close() + c.Close() // should not panic +} + +func TestClient_RemainsUsableAfterInitContextCancel(t *testing.T) { + bin := buildEchoServer(t) + startCtx, cancel := context.WithCancel(context.Background()) + + c, err := mcp.NewClient(startCtx, bin) + if err != nil { + t.Fatalf("NewClient: %v", err) + } + defer c.Close() + + cancel() + time.Sleep(100 * time.Millisecond) + + callCtx, callCancel := context.WithTimeout(context.Background(), 5*time.Second) + defer callCancel() + + result, err := c.CallTool(callCtx, "list_projects", nil) + if err != nil { + t.Fatalf("CallTool after init context cancel: %v", err) + } + if len(result.Content) == 0 { + t.Fatal("CallTool after init context cancel: expected content, got empty") + } +} diff --git a/ghl/internal/webhook/handler.go b/ghl/internal/webhook/handler.go new file mode 100644 index 00000000..fa45c524 --- /dev/null +++ b/ghl/internal/webhook/handler.go @@ -0,0 +1,115 @@ +// Package webhook handles incoming GitHub push events and triggers repo re-indexing. +package webhook + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "io" + "net/http" + "strings" +) + +// Config configures the webhook handler. +type Config struct { + // Secret is the HMAC-SHA256 key configured on the GitHub webhook. + // If nil, signature validation is skipped (development mode only). + Secret []byte + + // OnPush is called asynchronously when a valid push to a default branch is received. + // The argument is the repository slug (repository.name from the payload). + OnPush func(repoSlug string) +} + +// Handler is an http.Handler that processes GitHub webhook events. +type Handler struct { + cfg Config +} + +// NewHandler creates a new webhook Handler with the given configuration. +func NewHandler(cfg Config) *Handler { + return &Handler{cfg: cfg} +} + +// pushPayload is the subset of a GitHub push event we care about. +type pushPayload struct { + Ref string `json:"ref"` + After string `json:"after"` + Repository struct { + Name string `json:"name"` + FullName string `json:"full_name"` + CloneURL string `json:"clone_url"` + } `json:"repository"` +} + +// ServeHTTP implements http.Handler. +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20)) // 1 MB cap + if err != nil { + http.Error(w, "failed to read body", http.StatusBadRequest) + return + } + + // Validate HMAC-SHA256 signature if a secret is configured + if len(h.cfg.Secret) > 0 { + sig := r.Header.Get("X-Hub-Signature-256") + if sig == "" { + http.Error(w, "missing X-Hub-Signature-256", http.StatusUnauthorized) + return + } + if !validateSignature(h.cfg.Secret, body, sig) { + http.Error(w, "invalid signature", http.StatusUnauthorized) + return + } + } + + // Only process push events + event := r.Header.Get("X-GitHub-Event") + if event != "push" { + w.WriteHeader(http.StatusOK) + return + } + + // Parse payload + var payload pushPayload + if err := json.Unmarshal(body, &payload); err != nil { + http.Error(w, "invalid JSON payload", http.StatusBadRequest) + return + } + + // Only handle pushes to default branches (master or main) + ref := payload.Ref + if !strings.HasSuffix(ref, "/master") && !strings.HasSuffix(ref, "/main") { + w.WriteHeader(http.StatusOK) + return + } + + repoSlug := payload.Repository.Name + if repoSlug == "" { + http.Error(w, "missing repository.name", http.StatusBadRequest) + return + } + + // Fire-and-forget — respond 202 immediately + if h.cfg.OnPush != nil { + go h.cfg.OnPush(repoSlug) + } + + w.WriteHeader(http.StatusAccepted) +} + +// validateSignature checks the X-Hub-Signature-256 header using a constant-time comparison. +func validateSignature(secret, body []byte, signature string) bool { + if !strings.HasPrefix(signature, "sha256=") { + return false + } + got, err := hex.DecodeString(strings.TrimPrefix(signature, "sha256=")) + if err != nil { + return false + } + mac := hmac.New(sha256.New, secret) + mac.Write(body) + expected := mac.Sum(nil) + return hmac.Equal(got, expected) +} diff --git a/ghl/internal/webhook/handler_test.go b/ghl/internal/webhook/handler_test.go new file mode 100644 index 00000000..9345f8ac --- /dev/null +++ b/ghl/internal/webhook/handler_test.go @@ -0,0 +1,254 @@ +package webhook_test + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/GoHighLevel/codebase-memory-mcp/ghl/internal/webhook" +) + +// ── Helpers ──────────────────────────────────────────────────── + +func sign(secret, body []byte) string { + mac := hmac.New(sha256.New, secret) + mac.Write(body) + return "sha256=" + hex.EncodeToString(mac.Sum(nil)) +} + +func pushPayload(repoName, ref, afterSHA string) []byte { + b, _ := json.Marshal(map[string]interface{}{ + "ref": ref, + "after": afterSHA, + "repository": map[string]interface{}{ + "name": repoName, + "full_name": "GoHighLevel/" + repoName, + "clone_url": "https://github.com/GoHighLevel/" + repoName + ".git", + }, + }) + return b +} + +func makeRequest(t *testing.T, body []byte, secret []byte, event string) *http.Request { + t.Helper() + req := httptest.NewRequest(http.MethodPost, "/webhooks/github", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("X-GitHub-Event", event) + if secret != nil { + req.Header.Set("X-Hub-Signature-256", sign(secret, body)) + } + return req +} + +// ── Tests ────────────────────────────────────────────────────── + +func TestHandler_ValidPush_Accepted(t *testing.T) { + secret := []byte("test-secret") + triggered := make(chan string, 1) + + h := webhook.NewHandler(webhook.Config{ + Secret: secret, + OnPush: func(repoSlug string) { + triggered <- repoSlug + }, + }) + + body := pushPayload("membership-backend", "refs/heads/master", "abc123") + req := makeRequest(t, body, secret, "push") + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + if rr.Code != http.StatusAccepted { + t.Errorf("status: want 202, got %d", rr.Code) + } + + select { + case slug := <-triggered: + if slug != "membership-backend" { + t.Errorf("OnPush slug: want membership-backend, got %q", slug) + } + case <-time.After(2 * time.Second): + t.Error("OnPush: not called within timeout") + } +} + +func TestHandler_InvalidSignature_Rejected(t *testing.T) { + h := webhook.NewHandler(webhook.Config{ + Secret: []byte("real-secret"), + OnPush: func(_ string) { /* should not be called */ }, + }) + + body := pushPayload("membership-backend", "refs/heads/master", "abc123") + // Sign with wrong secret + req := makeRequest(t, body, []byte("wrong-secret"), "push") + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + if rr.Code != http.StatusUnauthorized { + t.Errorf("status: want 401, got %d", rr.Code) + } +} + +func TestHandler_MissingSignature_Rejected(t *testing.T) { + h := webhook.NewHandler(webhook.Config{ + Secret: []byte("real-secret"), + OnPush: func(_ string) {}, + }) + + body := pushPayload("membership-backend", "refs/heads/master", "abc123") + req := makeRequest(t, body, nil /* no signature */, "push") + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + if rr.Code != http.StatusUnauthorized { + t.Errorf("status: want 401, got %d", rr.Code) + } +} + +func TestHandler_NonPushEvent_Ignored(t *testing.T) { + secret := []byte("test-secret") + called := false + + h := webhook.NewHandler(webhook.Config{ + Secret: secret, + OnPush: func(_ string) { called = true }, + }) + + body := pushPayload("membership-backend", "refs/heads/master", "abc123") + req := makeRequest(t, body, secret, "pull_request") + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Errorf("status: want 200, got %d", rr.Code) + } + if called { + t.Error("OnPush: should not be called for non-push events") + } +} + +func TestHandler_NonDefaultBranch_Ignored(t *testing.T) { + secret := []byte("test-secret") + called := false + + h := webhook.NewHandler(webhook.Config{ + Secret: secret, + OnPush: func(_ string) { called = true }, + }) + + // Feature branch push — should be ignored + body := pushPayload("membership-backend", "refs/heads/feat/new-feature", "abc123") + req := makeRequest(t, body, secret, "push") + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Errorf("status: want 200 for non-default branch, got %d", rr.Code) + } + if called { + t.Error("OnPush: should not be called for non-default branch pushes") + } +} + +func TestHandler_MainBranch_Accepted(t *testing.T) { + secret := []byte("test-secret") + triggered := make(chan string, 1) + + h := webhook.NewHandler(webhook.Config{ + Secret: secret, + OnPush: func(slug string) { triggered <- slug }, + }) + + // "main" branch (not "master") — both should be accepted + body := pushPayload("ghl-revex-frontend", "refs/heads/main", "def456") + req := makeRequest(t, body, secret, "push") + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + if rr.Code != http.StatusAccepted { + t.Errorf("status: want 202, got %d", rr.Code) + } + select { + case slug := <-triggered: + if slug != "ghl-revex-frontend" { + t.Errorf("OnPush slug: want ghl-revex-frontend, got %q", slug) + } + case <-time.After(2 * time.Second): + t.Error("OnPush: not called for main branch within timeout") + } +} + +func TestHandler_NoSecret_AllowsAnyRequest(t *testing.T) { + // When no secret is configured (dev mode), skip signature validation + triggered := make(chan string, 1) + + h := webhook.NewHandler(webhook.Config{ + Secret: nil, // no secret + OnPush: func(slug string) { triggered <- slug }, + }) + + body := pushPayload("platform-backend", "refs/heads/master", "xyz789") + req := httptest.NewRequest(http.MethodPost, "/webhooks/github", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("X-GitHub-Event", "push") + // No signature header + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + if rr.Code != http.StatusAccepted { + t.Errorf("status: want 202 with no secret, got %d", rr.Code) + } +} + +func TestHandler_InvalidJSON_BadRequest(t *testing.T) { + secret := []byte("test-secret") + badBody := []byte("not json {{{") + + h := webhook.NewHandler(webhook.Config{ + Secret: secret, + OnPush: func(_ string) {}, + }) + + req := makeRequest(t, badBody, secret, "push") + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + if rr.Code != http.StatusBadRequest { + t.Errorf("status: want 400 for invalid JSON, got %d", rr.Code) + } +} + +func TestHandler_TimingSafeComparison(t *testing.T) { + // Verify we're not vulnerable to timing attacks by confirming the implementation + // uses hmac.Equal (or equivalent) rather than string comparison. + // This is a behavioral test: both requests have valid-looking signatures but one is wrong. + secret := []byte("test-secret") + body := pushPayload("membership-backend", "refs/heads/master", "abc123") + + // Craft a signature that has the right prefix but wrong digest + wrongSig := fmt.Sprintf("sha256=%s", "0000000000000000000000000000000000000000000000000000000000000000") + + h := webhook.NewHandler(webhook.Config{ + Secret: secret, + OnPush: func(_ string) {}, + }) + + req := httptest.NewRequest(http.MethodPost, "/webhooks/github", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("X-GitHub-Event", "push") + req.Header.Set("X-Hub-Signature-256", wrongSig) + + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + if rr.Code != http.StatusUnauthorized { + t.Errorf("wrong signature should return 401, got %d", rr.Code) + } +} diff --git a/src/mcp/mcp.c b/src/mcp/mcp.c index 3372826b..5e5b007a 100644 --- a/src/mcp/mcp.c +++ b/src/mcp/mcp.c @@ -750,6 +750,8 @@ static cbm_store_t *resolve_store(cbm_mcp_server_t *srv, const char *project) { return srv->store; } +static bool is_project_db_file(const char *name, size_t len); + /* Scan cache dir for .db files, writing comma-separated quoted names into out. * Returns the number of projects found. */ static int collect_db_project_names(const char *dir_path, char *out, size_t out_sz) { @@ -763,10 +765,7 @@ static int collect_db_project_names(const char *dir_path, char *out, size_t out_ while ((entry = cbm_readdir(d)) != NULL) { const char *n = entry->name; size_t len = strlen(n); - if (len < MCP_MIN_DB_NAME || strcmp(n + len - MCP_DB_EXT, ".db") != 0) { - continue; - } - if (strncmp(n, "tmp-", SLEN("tmp-")) == 0 || strncmp(n, "_", SLEN("_")) == 0) { + if (!is_project_db_file(n, len)) { continue; } if (count > 0 && offset < (int)out_sz - MCP_SEPARATOR) { @@ -825,8 +824,7 @@ static bool is_project_db_file(const char *name, size_t len) { if (len < MCP_MIN_DB_NAME || strcmp(name + len - MCP_DB_EXT, ".db") != 0) { return false; } - if (strncmp(name, "tmp-", SLEN("tmp-")) == 0 || strncmp(name, "_", SLEN("_")) == 0 || - strncmp(name, ":memory:", SLEN(":memory:")) == 0) { + if (strncmp(name, "_", SLEN("_")) == 0 || strncmp(name, ":memory:", SLEN(":memory:")) == 0) { return false; } return true; @@ -846,23 +844,29 @@ static void build_project_json_entry(yyjson_mut_doc *doc, yyjson_mut_val *arr, c int nodes = 0; int edges = 0; char root_path_buf[CBM_SZ_1K] = ""; + char indexed_name_buf[CBM_SZ_1K]; + snprintf(indexed_name_buf, sizeof(indexed_name_buf), "%s", project_name); if (pstore) { - nodes = cbm_store_count_nodes(pstore, project_name); - edges = cbm_store_count_edges(pstore, project_name); - cbm_project_t proj = {0}; - if (cbm_store_get_project(pstore, project_name, &proj) == CBM_STORE_OK) { - if (proj.root_path) { - snprintf(root_path_buf, sizeof(root_path_buf), "%s", proj.root_path); + cbm_project_t *projects = NULL; + int project_count = 0; + if (cbm_store_list_projects(pstore, &projects, &project_count) == CBM_STORE_OK && + project_count > 0) { + const cbm_project_t *proj = &projects[0]; + if (proj->name && proj->name[0] != '\0') { + snprintf(indexed_name_buf, sizeof(indexed_name_buf), "%s", proj->name); } - free((void *)proj.name); - free((void *)proj.indexed_at); - free((void *)proj.root_path); + if (proj->root_path && proj->root_path[0] != '\0') { + snprintf(root_path_buf, sizeof(root_path_buf), "%s", proj->root_path); + } + cbm_store_free_projects(projects, project_count); } + nodes = cbm_store_count_nodes(pstore, indexed_name_buf); + edges = cbm_store_count_edges(pstore, indexed_name_buf); cbm_store_close(pstore); } yyjson_mut_val *p = yyjson_mut_obj(doc); - yyjson_mut_obj_add_strcpy(doc, p, "name", project_name); + yyjson_mut_obj_add_strcpy(doc, p, "name", indexed_name_buf); yyjson_mut_obj_add_strcpy(doc, p, "root_path", root_path_buf); yyjson_mut_obj_add_int(doc, p, "nodes", nodes); yyjson_mut_obj_add_int(doc, p, "edges", edges); @@ -2043,11 +2047,25 @@ static char *handle_index_repository(cbm_mcp_server_t *srv, const char *args) { "explore the codebase with get_architecture(aspects=['all']), then use " "manage_adr(mode='store') to persist architectural insights across sessions."); } + + /* Flush WAL pages into the main database before the fleet layer + * snapshots the project artifact. */ + (void)cbm_store_checkpoint(store); } } char *json = yy_doc_to_str(doc); yyjson_mut_doc_free(doc); + + /* Release the indexed store so follow-up requests reopen from the fresh + * checkpointed database file instead of a long-lived write connection. */ + if (srv->owns_store && srv->store) { + cbm_store_close(srv->store); + srv->store = NULL; + } + free(srv->current_project); + srv->current_project = NULL; + free(project_name); free(repo_path); @@ -2147,15 +2165,34 @@ static yyjson_doc *enrich_node_properties(yyjson_mut_doc *doc, yyjson_mut_val *o /* Resolve an absolute path from root_path + file_path, verify containment, * and read source lines. Sets *out_abs_path (caller frees). Returns source * string (caller frees) or NULL if path is invalid/unreadable. */ +static bool cbm_path_is_absolute(const char *path) { + if (!path || !path[0]) { + return false; + } +#ifdef _WIN32 + return path[0] == '/' || path[0] == '\\' || + ((path[0] >= 'A' && path[0] <= 'Z') || (path[0] >= 'a' && path[0] <= 'z')) && + path[1] == ':'; +#else + return path[0] == '/'; +#endif +} + static char *resolve_snippet_source(const char *root_path, const char *file_path, int start, int end, char **out_abs_path) { *out_abs_path = NULL; if (!root_path || !file_path) { return NULL; } - size_t apsz = strlen(root_path) + strlen(file_path) + MCP_SEPARATOR; + size_t apsz = cbm_path_is_absolute(file_path) + ? strlen(file_path) + SKIP_ONE + : strlen(root_path) + strlen(file_path) + MCP_SEPARATOR; char *abs_path = malloc(apsz); - snprintf(abs_path, apsz, "%s/%s", root_path, file_path); + if (cbm_path_is_absolute(file_path)) { + snprintf(abs_path, apsz, "%s", file_path); + } else { + snprintf(abs_path, apsz, "%s/%s", root_path, file_path); + } char real_root[CBM_SZ_4K]; char real_file[CBM_SZ_4K]; diff --git a/tests/test_mcp.c b/tests/test_mcp.c index 72729f11..a7ab7c7d 100644 --- a/tests/test_mcp.c +++ b/tests/test_mcp.c @@ -332,6 +332,8 @@ static cbm_mcp_server_t *setup_mcp_with_data(void) { return srv; } +static char *extract_text_content(const char *mcp_result); + TEST(tool_list_projects_empty) { cbm_mcp_server_t *srv = setup_mcp_with_data(); @@ -348,6 +350,131 @@ TEST(tool_list_projects_empty) { PASS(); } +TEST(tool_list_projects_uses_indexed_project_metadata) { + char tmp_dir[256]; + snprintf(tmp_dir, sizeof(tmp_dir), "/tmp/cbm_projects_test_XXXXXX"); + ASSERT_NOT_NULL(cbm_mkdtemp(tmp_dir)); + + const char *old_cache_dir = getenv("CBM_CACHE_DIR"); + char old_cache_dir_buf[512] = ""; + if (old_cache_dir) { + snprintf(old_cache_dir_buf, sizeof(old_cache_dir_buf), "%s", old_cache_dir); + } + cbm_setenv("CBM_CACHE_DIR", tmp_dir, 1); + + cbm_store_t *store = cbm_store_open("artifact-platform-backend"); + ASSERT_NOT_NULL(store); + ASSERT_EQ(cbm_store_upsert_project(store, "platform-backend", "/workspace/platform-backend"), 0); + + cbm_node_t node_a = {0}; + node_a.project = "platform-backend"; + node_a.label = "Function"; + node_a.name = "HandleRequest"; + node_a.qualified_name = "platform-backend.HandleRequest"; + node_a.file_path = "main.go"; + node_a.start_line = 3; + node_a.end_line = 5; + int64_t node_a_id = cbm_store_upsert_node(store, &node_a); + + cbm_node_t node_b = {0}; + node_b.project = "platform-backend"; + node_b.label = "Function"; + node_b.name = "ProcessOrder"; + node_b.qualified_name = "platform-backend.ProcessOrder"; + node_b.file_path = "main.go"; + node_b.start_line = 7; + node_b.end_line = 9; + int64_t node_b_id = cbm_store_upsert_node(store, &node_b); + + cbm_edge_t edge = {0}; + edge.project = "platform-backend"; + edge.source_id = node_a_id; + edge.target_id = node_b_id; + edge.type = "CALLS"; + ASSERT_GT(cbm_store_insert_edge(store, &edge), 0); + cbm_store_close(store); + + cbm_mcp_server_t *srv = setup_mcp_with_data(); + ASSERT_NOT_NULL(srv); + + char *raw = cbm_mcp_handle_tool(srv, "list_projects", "{}"); + char *resp = extract_text_content(raw); + ASSERT_NOT_NULL(resp); + ASSERT_NOT_NULL(strstr(resp, "\"name\":\"platform-backend\"")); + ASSERT_NOT_NULL(strstr(resp, "\"root_path\":\"/workspace/platform-backend\"")); + ASSERT_NOT_NULL(strstr(resp, "\"nodes\":2")); + ASSERT_NOT_NULL(strstr(resp, "\"edges\":1")); + free(resp); + free(raw); + + cbm_mcp_server_free(srv); + + char db_path[512]; + snprintf(db_path, sizeof(db_path), "%s/artifact-platform-backend.db", tmp_dir); + unlink(db_path); + snprintf(db_path, sizeof(db_path), "%s/artifact-platform-backend.db-wal", tmp_dir); + unlink(db_path); + snprintf(db_path, sizeof(db_path), "%s/artifact-platform-backend.db-shm", tmp_dir); + unlink(db_path); + rmdir(tmp_dir); + + if (old_cache_dir) { + cbm_setenv("CBM_CACHE_DIR", old_cache_dir_buf, 1); + } else { + cbm_unsetenv("CBM_CACHE_DIR"); + } + PASS(); +} + +TEST(tool_list_projects_includes_tmp_prefixed_runtime_dbs) { + char tmp_dir[256]; + snprintf(tmp_dir, sizeof(tmp_dir), "/tmp/cbm_projects_tmp_runtime_XXXXXX"); + ASSERT_NOT_NULL(cbm_mkdtemp(tmp_dir)); + + const char *old_cache_dir = getenv("CBM_CACHE_DIR"); + char old_cache_dir_buf[512] = ""; + if (old_cache_dir) { + snprintf(old_cache_dir_buf, sizeof(old_cache_dir_buf), "%s", old_cache_dir); + } + cbm_setenv("CBM_CACHE_DIR", tmp_dir, 1); + + cbm_store_t *store = cbm_store_open("tmp-fleet-cache-platform-backend"); + ASSERT_NOT_NULL(store); + ASSERT_EQ(cbm_store_upsert_project(store, "tmp-fleet-cache-platform-backend", + "/tmp/fleet-cache/platform-backend"), + 0); + cbm_store_close(store); + + cbm_mcp_server_t *srv = setup_mcp_with_data(); + ASSERT_NOT_NULL(srv); + + char *raw = cbm_mcp_handle_tool(srv, "list_projects", "{}"); + char *resp = extract_text_content(raw); + ASSERT_NOT_NULL(resp); + ASSERT_NOT_NULL(strstr(resp, "\"name\":\"tmp-fleet-cache-platform-backend\"")); + ASSERT_NOT_NULL(strstr(resp, "\"root_path\":\"/tmp/fleet-cache/platform-backend\"")); + free(resp); + free(raw); + + cbm_mcp_server_free(srv); + + char db_path[512]; + snprintf(db_path, sizeof(db_path), "%s/tmp-fleet-cache-platform-backend.db", tmp_dir); + unlink(db_path); + snprintf(db_path, sizeof(db_path), "%s/tmp-fleet-cache-platform-backend.db-wal", tmp_dir); + unlink(db_path); + snprintf(db_path, sizeof(db_path), "%s/tmp-fleet-cache-platform-backend.db-shm", tmp_dir); + unlink(db_path); + rmdir(tmp_dir); + + if (old_cache_dir) { + cbm_setenv("CBM_CACHE_DIR", old_cache_dir_buf, 1); + } else { + cbm_unsetenv("CBM_CACHE_DIR"); + } + PASS(); +} + TEST(tool_get_graph_schema_empty) { cbm_mcp_server_t *srv = setup_mcp_with_data(); @@ -1060,6 +1187,42 @@ TEST(snippet_unique_short_name) { PASS(); } +TEST(snippet_absolute_file_path_returns_source) { + char tmp[256]; + cbm_mcp_server_t *srv = setup_snippet_server(tmp, sizeof(tmp)); + ASSERT_NOT_NULL(srv); + + char abs_path[512]; + snprintf(abs_path, sizeof(abs_path), "%s/project/main.go", tmp); + + cbm_store_t *st = cbm_mcp_server_store(srv); + ASSERT_NOT_NULL(st); + + cbm_node_t abs_node = {0}; + abs_node.project = "test-project"; + abs_node.label = "Function"; + abs_node.name = "HandleAbsolute"; + abs_node.qualified_name = "test-project.cmd.server.main.HandleAbsolute"; + abs_node.file_path = abs_path; + abs_node.start_line = 3; + abs_node.end_line = 5; + abs_node.properties_json = "{\"signature\":\"func HandleAbsolute() error\"}"; + ASSERT_GT(cbm_store_upsert_node(st, &abs_node), 0); + + char *resp = + call_snippet(srv, "{\"qualified_name\":\"test-project.cmd.server.main.HandleAbsolute\"," + "\"project\":\"test-project\"}"); + ASSERT_NOT_NULL(resp); + ASSERT_NOT_NULL(strstr(resp, "\"name\":\"HandleAbsolute\"")); + ASSERT_NOT_NULL(strstr(resp, "\"source\"")); + ASSERT_NULL(strstr(resp, "source not available")); + free(resp); + + cbm_mcp_server_free(srv); + cleanup_snippet_dir(tmp); + PASS(); +} + /* ── TestSnippet_NameTier ─────────────────────────────────────── */ TEST(snippet_name_tier) { @@ -1692,6 +1855,8 @@ SUITE(mcp) { /* Tool handlers */ RUN_TEST(tool_list_projects_empty); + RUN_TEST(tool_list_projects_uses_indexed_project_metadata); + RUN_TEST(tool_list_projects_includes_tmp_prefixed_runtime_dbs); RUN_TEST(tool_get_graph_schema_empty); RUN_TEST(tool_unknown_tool); RUN_TEST(tool_search_graph_basic); @@ -1745,6 +1910,7 @@ SUITE(mcp) { RUN_TEST(snippet_exact_qn); RUN_TEST(snippet_qn_suffix); RUN_TEST(snippet_unique_short_name); + RUN_TEST(snippet_absolute_file_path_returns_source); RUN_TEST(snippet_name_tier); RUN_TEST(snippet_ambiguous_short_name); RUN_TEST(snippet_not_found);