diff --git a/.claude/agents/protocol-domain.md b/.claude/agents/protocol-domain.md index 4dcafb3e5..ee9dfc300 100644 --- a/.claude/agents/protocol-domain.md +++ b/.claude/agents/protocol-domain.md @@ -614,7 +614,7 @@ EVM chains with Etherscan-compatible explorer configs (safe for seed workflows): - "11155111" -- Sepolia Testnet (Etherscan Sepolia) Non-EVM / Blockscout chains with explorer configs (NOT safe for protocol seed workflows -- different API format): -- "42420" -- Tempo (Blockscout) +- "4217" -- Tempo (Blockscout) - "42429" -- Tempo Testnet (Blockscout) - "101" -- Solana (Solscan) - "103" -- Solana Devnet (Solscan) diff --git a/.claude/commands/test-protocol.md b/.claude/commands/test-protocol.md index 144e64bdd..5651a0ea5 100644 --- a/.claude/commands/test-protocol.md +++ b/.claude/commands/test-protocol.md @@ -36,7 +36,7 @@ Read the protocol definition file: protocols/$ARGUMENTS.ts ``` -If it does not exist, check alternate names (e.g., `aave-v3.ts` for `aave`, `compound-v3.ts` for `compound`, `uniswap-v3.ts` for `uniswap`, `yearn-v3.ts` for `yearn`). +If it does not exist, check alternate names (e.g., `compound-v3.ts` for `compound`, `uniswap-v3.ts` for `uniswap`, `yearn-v3.ts` for `yearn`). Aave versions use explicit slugs: `aave-v3.ts` has slug `aave-v3`, `aave-v4.ts` has slug `aave-v4`. Extract: - Protocol name, slug, and description @@ -132,7 +132,7 @@ Save tested workflow configurations to `scripts/seed/workflows/$ARGUMENTS/` for After write tests, create and execute withdrawal workflows to recover deposited funds: - `vault-withdraw` / `vault-redeem` for ERC-4626 vaults -- Protocol-specific withdraw actions (e.g., `aave/withdraw`, `compound/withdraw`) +- Protocol-specific withdraw actions (e.g., `aave-v3/withdraw`, `compound/withdraw`) - Run withdrawals **sequentially** (same nonce contention concern) Verify final balances match expectations. diff --git a/.github/workflows/pr-checks.yml b/.github/workflows/pr-checks.yml index 18255a73c..557792b30 100644 --- a/.github/workflows/pr-checks.yml +++ b/.github/workflows/pr-checks.yml @@ -94,6 +94,23 @@ jobs: - name: Run check run: pnpm check + - name: Forbid legacy Tempo chain ID 42420 + # Canonical Tempo mainnet chain ID is 4217 (see lib/rpc/types.ts). + # 42420 is a retired legacy ID; re-introducing it silently routes to + # the wrong RPC entry or mismatches the chains table. See KEEP-261. + # Uses git grep on the checked-out tree -- no user input is interpolated. + run: | + MATCHES=$(git grep -nE '\b42420\b' -- \ + ':!*.md' \ + ':!drizzle/meta/' \ + ':!.github/workflows/pr-checks.yml' \ + || true) + if [ -n "$MATCHES" ]; then + echo "::error::Legacy Tempo chain ID 42420 found. Use 4217 instead (KEEP-261)." + echo "$MATCHES" + exit 1 + fi + typecheck: runs-on: ubuntu-latest steps: @@ -143,7 +160,7 @@ jobs: uses: docker/setup-buildx-action@v4 - name: Configure AWS credentials - if: steps.changes.outputs.relevant == 'true' + if: steps.changes.outputs.relevant == 'true' && github.actor != 'dependabot[bot]' uses: aws-actions/configure-aws-credentials@v6 with: aws-access-key-id: ${{ secrets.TO_AWS_ACCESS_KEY_ID }} @@ -151,7 +168,7 @@ jobs: aws-region: ${{ vars.TO_REGION }} - name: Login to AWS ECR - if: steps.changes.outputs.relevant == 'true' + if: steps.changes.outputs.relevant == 'true' && github.actor != 'dependabot[bot]' id: login-ecr uses: aws-actions/amazon-ecr-login@v2 diff --git a/app/api/billing/billing-details/route.ts b/app/api/billing/billing-details/route.ts new file mode 100644 index 000000000..5341cc2c0 --- /dev/null +++ b/app/api/billing/billing-details/route.ts @@ -0,0 +1,44 @@ +import { NextResponse } from "next/server"; +import { isBillingEnabled } from "@/lib/billing/feature-flag"; +import { getOrgSubscription } from "@/lib/billing/plans-server"; +import { getBillingProvider } from "@/lib/billing/providers"; +import { requireOrgOwner } from "@/lib/billing/require-org-owner"; +import { ErrorCategory, logSystemError } from "@/lib/logging"; + +export async function GET(): Promise { + if (!isBillingEnabled()) { + return NextResponse.json({ error: "Not found" }, { status: 404 }); + } + + try { + const authResult = await requireOrgOwner(); + if ("error" in authResult) { + return authResult.error; + } + const { orgId: activeOrgId } = authResult; + + const sub = await getOrgSubscription(activeOrgId); + if (!sub?.providerCustomerId) { + return NextResponse.json({ + paymentMethod: null, + billingEmail: null, + }); + } + + const provider = getBillingProvider(); + const details = await provider.getBillingDetails(sub.providerCustomerId); + + return NextResponse.json(details); + } catch (error) { + logSystemError( + ErrorCategory.EXTERNAL_SERVICE, + "[Billing] Billing details error", + error, + { endpoint: "/api/billing/billing-details", operation: "get" } + ); + return NextResponse.json( + { error: "Failed to load billing details" }, + { status: 500 } + ); + } +} diff --git a/app/api/mcp/workflows/[slug]/call/route.ts b/app/api/mcp/workflows/[slug]/call/route.ts index 4363ae6e4..ed921a00e 100644 --- a/app/api/mcp/workflows/[slug]/call/route.ts +++ b/app/api/mcp/workflows/[slug]/call/route.ts @@ -4,7 +4,7 @@ import { start } from "workflow/api"; import { checkConcurrencyLimit } from "@/app/api/execute/_lib/concurrency-limit"; import { enforceExecutionLimit } from "@/lib/billing/execution-guard"; import { db } from "@/lib/db"; -import { workflowExecutions, workflows } from "@/lib/db/schema"; +import { tags, workflowExecutions, workflows } from "@/lib/db/schema"; import { ErrorCategory, logSystemError } from "@/lib/logging"; import { checkIpRateLimit, getClientIp } from "@/lib/mcp/rate-limit"; import { hashMppCredential } from "@/lib/mpp/server"; @@ -15,6 +15,7 @@ import { } from "@/lib/payments/router"; import { executeWorkflow } from "@/lib/workflow-executor.workflow"; import type { WorkflowEdge, WorkflowNode } from "@/lib/workflow-store"; +import { buildCallCompletionResponse } from "@/lib/x402/execution-wait"; import { hashPaymentSignature, recordPayment, @@ -146,8 +147,9 @@ function startExecutionInBackground( } /** - * Free-path helper: prepares the execution and starts it. Used by the - * non-paid call path where there is no payment to record between the two. + * Free-path helper: prepares the execution, starts it, and awaits completion + * up to the read-wait timeout. Returns the mapped output inline on success or + * falls back to `{executionId, status: "running"}` on timeout. */ async function createAndStartExecution( workflow: CallRouteWorkflow, @@ -158,16 +160,18 @@ async function createAndStartExecution( return prepared.error; } startExecutionInBackground(workflow, body, prepared.executionId); - return NextResponse.json( - { executionId: prepared.executionId, status: "running" }, - { headers: corsHeaders } + const responseBody = await buildCallCompletionResponse( + prepared.executionId, + workflow.outputMapping ); + return NextResponse.json(responseBody, { headers: corsHeaders }); } async function lookupWorkflow(slug: string): Promise { const rows = await db - .select(CALL_ROUTE_COLUMNS) + .select({ ...CALL_ROUTE_COLUMNS, tagName: tags.name }) .from(workflows) + .leftJoin(tags, eq(workflows.tagId, tags.id)) .where(and(eq(workflows.listedSlug, slug), eq(workflows.isListed, true))) .limit(1); return rows[0] ?? null; @@ -333,10 +337,11 @@ async function handlePaidWorkflow( startExecutionInBackground(workflow, body, executionId); - return NextResponse.json( - { executionId, status: "running" }, - { headers: corsHeaders } + const responseBody = await buildCallCompletionResponse( + executionId, + workflow.outputMapping ); + return NextResponse.json(responseBody, { headers: corsHeaders }); }; } ); diff --git a/app/layout.tsx b/app/layout.tsx index cf565f35e..c5793e869 100644 --- a/app/layout.tsx +++ b/app/layout.tsx @@ -4,6 +4,7 @@ import { Analytics } from "@vercel/analytics/react"; import { SpeedInsights } from "@vercel/speed-insights/next"; import { Provider } from "jotai"; import { type ReactNode, Suspense } from "react"; +import { AppBanner } from "@/components/app-banner"; import { AuthProvider } from "@/components/auth/provider"; import { KeeperHubExtensionLoader } from "@/components/extension-loader"; import { GitHubStarsLoader } from "@/components/github-stars-loader"; @@ -76,6 +77,7 @@ const RootLayout = ({ children }: RootLayoutProps) => ( }> + {children} diff --git a/app/workflows/[workflowId]/layout.tsx b/app/workflows/[workflowId]/layout.tsx index 7cfbf2ee0..f2c1ea103 100644 --- a/app/workflows/[workflowId]/layout.tsx +++ b/app/workflows/[workflowId]/layout.tsx @@ -74,5 +74,13 @@ export async function generateMetadata({ } export default function WorkflowLayout({ children }: WorkflowLayoutProps) { - return children; + // Opt out of in-browser auto-translation (Google Translate, Edge, etc.) on + // the editor. Translators replace text nodes with wrappers, which + // breaks React's reconciler and throws NotFoundError on insertBefore / + // removeChild. Marketing / hub pages remain translatable. + return ( +
+ {children} +
+ ); } diff --git a/app/workflows/[workflowId]/page.tsx b/app/workflows/[workflowId]/page.tsx index 6bcdd66f2..0dafc4942 100644 --- a/app/workflows/[workflowId]/page.tsx +++ b/app/workflows/[workflowId]/page.tsx @@ -880,7 +880,7 @@ const WorkflowEditor = ({ params }: WorkflowPageProps) => { {/* Right panel overlay (desktop only) - only show if trigger exists */} {!isMobile && hasTriggerNode && (
target/targetHandle). setNodes(partialData.nodes || []); - setEdges(validEdges); + setEdges(dedupeEdges(validEdges)); if (partialData.name) { setCurrentWorkflowName(partialData.name); } @@ -153,11 +155,14 @@ export function AIPrompt({ workflowId, onWorkflowCreated }: AIPromptProps) { console.log("[AI Prompt] Nodes:", workflowData.nodes?.length || 0); console.log("[AI Prompt] Edges:", workflowData.edges?.length || 0); - // Use edges from workflow data with animated type - const finalEdges = (workflowData.edges || []).map((edge) => ({ - ...edge, - type: "animated", - })); + // Use edges from workflow data with animated type; dedupe before + // persisting so AI hallucinations don't leak duplicates into the DB. + const finalEdges = dedupeEdges( + (workflowData.edges || []).map((edge) => ({ + ...edge, + type: "animated", + })) + ); // Validate: check for blank/incomplete nodes console.log("[AI Prompt] Validating nodes:", workflowData.nodes); diff --git a/components/analytics/analytics-page.tsx b/components/analytics/analytics-page.tsx index f67759dfb..f94d55b4b 100644 --- a/components/analytics/analytics-page.tsx +++ b/components/analytics/analytics-page.tsx @@ -106,7 +106,7 @@ export function AnalyticsPage(): ReactNode { return (
-
+
@@ -118,7 +118,7 @@ export function AnalyticsPage(): ReactNode { return (
-
+
diff --git a/components/app-banner.tsx b/components/app-banner.tsx new file mode 100644 index 000000000..b9b1b200c --- /dev/null +++ b/components/app-banner.tsx @@ -0,0 +1,81 @@ +"use client"; + +import { Info, X } from "lucide-react"; +import Link from "next/link"; +import { useEffect, useState } from "react"; + +const STORAGE_KEY = "kh-billing-announce-v1"; + +export function AppBanner(): React.ReactElement | null { + const [mounted, setMounted] = useState(false); + const [dismissed, setDismissed] = useState(true); + + useEffect(() => { + setMounted(true); + try { + const stored = window.localStorage.getItem(STORAGE_KEY); + setDismissed(stored === "1"); + } catch { + setDismissed(false); + } + }, []); + + useEffect(() => { + if (!mounted) { + return; + } + if (dismissed) { + document.documentElement.style.removeProperty("--app-banner-height"); + } else { + document.documentElement.style.setProperty("--app-banner-height", "36px"); + } + return (): void => { + document.documentElement.style.removeProperty("--app-banner-height"); + }; + }, [mounted, dismissed]); + + function handleDismiss(): void { + try { + window.localStorage.setItem(STORAGE_KEY, "1"); + } catch { + // localStorage unavailable; dismissal only lasts this session + } + setDismissed(true); + } + + if (!mounted || dismissed) { + return null; + } + + return ( +
+

+

+ +
+ ); +} diff --git a/components/billing/billing-details.tsx b/components/billing/billing-details.tsx new file mode 100644 index 000000000..25a7a3b0b --- /dev/null +++ b/components/billing/billing-details.tsx @@ -0,0 +1,155 @@ +"use client"; + +import { Loader2, Pencil } from "lucide-react"; +import { useCallback, useEffect, useState } from "react"; +import { toast } from "sonner"; +import { Button } from "@/components/ui/button"; +import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; +import { BILLING_API } from "@/lib/billing/constants"; +import { useOrganization } from "@/lib/hooks/use-organization"; + +type PaymentMethod = { + brand: string; + last4: string; + expMonth: number; + expYear: number; +}; + +type BillingDetailsResponse = { + paymentMethod: PaymentMethod | null; + billingEmail: string | null; +}; + +function formatBrand(brand: string): string { + const map: Record = { + visa: "Visa", + mastercard: "Mastercard", + amex: "American Express", + discover: "Discover", + jcb: "JCB", + diners: "Diners Club", + unionpay: "UnionPay", + }; + return map[brand] ?? brand.charAt(0).toUpperCase() + brand.slice(1); +} + +export function BillingDetails(): React.ReactElement { + const { organization } = useOrganization(); + const orgId = organization?.id; + const [data, setData] = useState(null); + const [loading, setLoading] = useState(true); + const [portalLoading, setPortalLoading] = useState(false); + + const fetchDetails = useCallback(async (): Promise => { + setLoading(true); + try { + const response = await fetch(BILLING_API.BILLING_DETAILS); + if (response.ok) { + const json = (await response.json()) as BillingDetailsResponse; + setData(json); + } else { + setData({ paymentMethod: null, billingEmail: null }); + } + } catch { + setData({ paymentMethod: null, billingEmail: null }); + } finally { + setLoading(false); + } + }, []); + + // biome-ignore lint/correctness/useExhaustiveDependencies: orgId drives re-fetch on org switch + useEffect(() => { + fetchDetails().catch(() => undefined); + }, [fetchDetails, orgId]); + + async function openPortal(): Promise { + setPortalLoading(true); + try { + const response = await fetch(BILLING_API.PORTAL, { method: "POST" }); + const json = (await response.json()) as { url?: string; error?: string }; + if (response.ok && json.url) { + window.location.href = json.url; + return; + } + toast.error(json.error ?? "Could not open billing portal"); + } catch { + toast.error("Could not open billing portal"); + } finally { + setPortalLoading(false); + } + } + + const paymentMethod = data?.paymentMethod ?? null; + const billingEmail = data?.billingEmail ?? null; + const hasPaymentMethod = paymentMethod !== null; + + return ( + + + + Billing Details + {hasPaymentMethod && ( + + )} + + + + {loading && ( +
+ + Loading... +
+ )} + + {!(loading || hasPaymentMethod) && ( +

+ No card on file. Subscribe to a paid plan to add a payment method. +

+ )} + + {!loading && hasPaymentMethod && ( +
+

+ + {formatBrand(paymentMethod.brand)} ending in + {" "} + + •••• {paymentMethod.last4} + +

+

+ Expires {String(paymentMethod.expMonth).padStart(2, "0")}/ + {String(paymentMethod.expYear).slice(-2)} +

+
+ )} + + {!loading && ( +
+

+ Invoice Email:{" "} + {billingEmail ? ( + {billingEmail} + ) : ( + + Not on file + + )} +

+
+ )} +
+
+ ); +} diff --git a/components/billing/billing-history.tsx b/components/billing/billing-history.tsx index 885b81372..e8707a6a6 100644 --- a/components/billing/billing-history.tsx +++ b/components/billing/billing-history.tsx @@ -216,7 +216,7 @@ export function BillingHistory(): React.ReactElement { {invoice.invoiceUrl && ( +
+
+
+ {isAuthRequired ? ( + + ) : ( + + )} +
+
+

+ {isAuthRequired + ? "Sign in to view billing" + : "Organization required"} +

+

+ {isAuthRequired + ? "Sign in to your account to manage your subscription and view billing history." + : "Create or join an organization to manage billing."} +

+
+ {!isAuthRequired && ( + + )} +
+
+
+ ); +} + export function BillingPage(): React.ReactElement { const searchParams = useSearchParams(); + const { data: session, isPending: sessionPending } = useSession(); const { organization } = useOrganization(); const orgId = organization?.id; const [currentPlan, setCurrentPlan] = useState("free"); @@ -39,13 +90,23 @@ export function BillingPage(): React.ReactElement { >(undefined); const [refreshKey, setRefreshKey] = useState(0); const [planLoaded, setPlanLoaded] = useState(false); + const isAnonymous = !session?.user || session.user.isAnonymous; useEffect(() => { const checkout = searchParams.get("checkout"); if (checkout === "success") { toast.success("Subscription activated successfully!"); window.history.replaceState({}, "", window.location.pathname); - } else if (checkout === "canceled") { + // Heuristic delay: Stripe needs a moment after Checkout to attach the + // payment method where getBillingDetails can read it back via the API + // cascade. 2s works in practice but is a race, not a guarantee. The + // robust fix is to persist payment method details in our DB on the + // checkout.session.completed webhook and read from DB instead of + // hitting Stripe here. + const timer = setTimeout(() => setRefreshKey((k) => k + 1), 2000); + return () => clearTimeout(timer); + } + if (checkout === "canceled") { toast.info("Checkout was canceled."); window.history.replaceState({}, "", window.location.pathname); } @@ -88,6 +149,20 @@ export function BillingPage(): React.ReactElement { setRefreshKey((k) => k + 1); } + if (sessionPending) { + return ( +
+ ); + } + + if (isAnonymous) { + return ; + } + // data-page-state tracks subscription plan fetch only. // BillingStatus and BillingHistory have independent async loads. return ( @@ -97,7 +172,7 @@ export function BillingPage(): React.ReactElement { data-testid="billing-page" >
- diff --git a/components/billing/billing-status.tsx b/components/billing/billing-status.tsx index f0fce614d..f9b8e6cbd 100644 --- a/components/billing/billing-status.tsx +++ b/components/billing/billing-status.tsx @@ -7,7 +7,7 @@ import { Button } from "@/components/ui/button"; import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; import { Skeleton } from "@/components/ui/skeleton"; import { BILLING_ALERTS, BILLING_API } from "@/lib/billing/constants"; -import { PLANS, type PlanName, type TierKey } from "@/lib/billing/plans"; +import { PLANS, type PlanName } from "@/lib/billing/plans"; type OverageCharge = { periodStart: string; @@ -371,13 +371,13 @@ function ExecutionUsageBar({ const overageRate = PLANS[plan].overage.ratePerThousand; function resolveBarColor(): string { - if (isOverLimit) { - return hasOverage ? "bg-muted-foreground" : "bg-destructive"; + if (isOverLimit && !hasOverage) { + return "bg-destructive"; } - if (isNearLimit) { + if (isNearLimit && !isOverLimit) { return "bg-yellow-500"; } - return "bg-keeperhub-green"; + return "bg-keeperhub-green-dark"; } const barColor = resolveBarColor(); @@ -393,7 +393,7 @@ function ExecutionUsageBar({
{!(isUnlimited || isOverLimit) && ( -
+
-
-
+
+
-
+
)} @@ -456,7 +456,7 @@ function GasCreditsBar({ if (isNearLimit) { return "bg-yellow-500"; } - return "bg-keeperhub-green"; + return "bg-keeperhub-green-dark"; } const barColor = resolveBarColor(); @@ -469,7 +469,7 @@ function GasCreditsBar({ {(gasCredits.totalCents / 100).toFixed(2)}
-
+
t.key === tier) : null; const statusVariant = STATUS_VARIANT[sub?.status ?? "active"] ?? "outline"; const renewalMessage = getRenewalMessage( @@ -607,14 +605,14 @@ function BillingStatusContent({ )} -
+
{planDef.name} - {activeTier && ( - - {activeTier.executions.toLocaleString()} executions - - )} {sub?.status ?? "active"} + {renewalMessage && ( +

+ {renewalMessage.text} +

+ )}
{usage && ( @@ -628,12 +626,6 @@ function BillingStatusContent({ {gasCredits && } - - {renewalMessage && ( -

- {renewalMessage.text} -

- )} ); } diff --git a/components/billing/confirm-plan-change-dialog.tsx b/components/billing/confirm-plan-change-dialog.tsx index 93f900131..ac238b5d8 100644 --- a/components/billing/confirm-plan-change-dialog.tsx +++ b/components/billing/confirm-plan-change-dialog.tsx @@ -567,7 +567,7 @@ export function ConfirmPlanChangeDialog({ )} {" "} - -- changes take effect immediately with prorated billing. + Changes take effect immediately with prorated billing.

diff --git a/components/billing/pricing-table/index.tsx b/components/billing/pricing-table/index.tsx index 455bcea40..8561f5d48 100644 --- a/components/billing/pricing-table/index.tsx +++ b/components/billing/pricing-table/index.tsx @@ -1,12 +1,158 @@ "use client"; +import { ChevronDown } from "lucide-react"; import { useState } from "react"; +import { Badge } from "@/components/ui/badge"; import type { BillingInterval } from "@/lib/billing/plans"; import { PLANS } from "@/lib/billing/plans"; import { cn } from "@/lib/utils"; import { PlanCard } from "./plan-card"; import type { PricingTableProps } from "./types"; +const COMPARISON_ROWS = [ + { + label: "Workflows", + free: "Unlimited", + pro: "Unlimited", + business: "Unlimited", + enterprise: "Unlimited", + }, + { + label: "Chains", + free: "All EVM", + pro: "All EVM", + business: "All EVM", + enterprise: "Custom", + }, + { + label: "Triggers", + free: "Standard", + pro: "Advanced", + business: "Advanced + Custom", + enterprise: "Custom", + }, + { + label: "API", + free: "Rate-limited", + pro: "Full", + business: "Full", + enterprise: "Full", + }, + { + label: "Logs", + free: "7 days", + pro: "30 days", + business: "90 days", + enterprise: "Custom", + }, + { + label: "Support", + free: "Community", + pro: "Email", + business: "Dedicated", + enterprise: "Dedicated (1h)", + }, + { + label: "SLA", + free: "\u2014", + pro: "\u2014", + business: "99.9%", + enterprise: "99.999%", + }, + { + label: "Builder", + free: "Visual + AI", + pro: "Visual + AI", + business: "Visual + AI", + enterprise: "Visual + AI", + }, + { + label: "MCP Server", + free: "Included", + pro: "Included", + business: "Included", + enterprise: "Included", + }, + { + label: "Ops team", + free: "\u2014", + pro: "\u2014", + business: "\u2014", + enterprise: "Dedicated", + }, +] as const; + +function ComparisonTable(): React.ReactElement { + const [isOpen, setIsOpen] = useState(false); + + return ( +
+ + + {isOpen && ( +
+ + + + + + + + + + + + {COMPARISON_ROWS.map((row, i) => ( + + + + + + + + ))} + +
+ Feature + + Free + + Pro + + Business + + Enterprise +
+ {row.label} + + {row.free} + {row.pro}{row.business} + {row.enterprise} +
+
+ )} +
+ ); +} + export function PricingTable({ currentPlan = "free", currentTier, @@ -18,14 +164,13 @@ export function PricingTable({ return (
- {/* Interval toggle */} -
-
+
+
+ + Save 20% +
- {/* Plan cards */} -
+
- {/* Overage callout */} -
-

- When users reach their execution limit -

-
-
-

Pay per execution (default)

-

- On paid tiers, overages billed at end of cycle -

-
-
-

Bump executions

-

- Select a higher tier from the dropdown -

-
-
-

Upgrade their plan

-

- Move to a higher plan for more features -

-
-
-

- On paid tiers, overages are billed at end of cycle. Unpaid overage - invoices may result in reduced execution limits. Free tier: hard cap, - must upgrade. -

-
+ + +

+ Paid tiers bill overage at the end of the cycle. Free tier caps at its + limit with no overage. +

); } diff --git a/components/billing/pricing-table/plan-card-parts.tsx b/components/billing/pricing-table/plan-card-parts.tsx index 77454303b..52c9ac944 100644 --- a/components/billing/pricing-table/plan-card-parts.tsx +++ b/components/billing/pricing-table/plan-card-parts.tsx @@ -1,152 +1,180 @@ +"use client"; + +import { ChevronDown } from "lucide-react"; +import { useEffect, useRef, useState } from "react"; import { Badge } from "@/components/ui/badge"; import { Button } from "@/components/ui/button"; import { CardFooter } from "@/components/ui/card"; -import { SUPPORT_LABELS } from "@/lib/billing/constants"; import type { BillingInterval, PLANS, PlanName } from "@/lib/billing/plans"; import { cn } from "@/lib/utils"; import type { PlanTierItem } from "./types"; -import { formatPrice, getButtonLabel, getExecutionsDisplay } from "./utils"; - -export function FeatureRow({ - label, - value, - highlight = false, -}: { - label: string; - value: string; - highlight?: boolean; -}): React.ReactElement { - return ( -
- {label} - - {value} - -
- ); -} +import { formatPrice, getButtonLabel, getTierPrice } from "./utils"; export function PlanCardBadge({ isActive, - isPopular, }: { isActive: boolean; - isPopular: boolean; }): React.ReactElement | null { - if (isActive) { - return ( -
- - ACTIVE - -
- ); - } - if (!isPopular) { + if (!isActive) { return null; } return ( -
- - POPULAR +
+ + CURRENT
); } -export function PlanCardFeatures({ - plan, - planName, - activeTier, - gasCreditCentsCap, +export function PlanHeader({ + name, + price, + isEnterprise, }: { - plan: (typeof PLANS)[PlanName]; - planName: PlanName; - activeTier: PlanTierItem | undefined; - gasCreditCentsCap?: number; + name: string; + price: number | null; + isEnterprise: boolean; }): React.ReactElement { - const isEnterprise = planName === "enterprise"; - const executionsDisplay = getExecutionsDisplay(planName, activeTier); - - const capCents = gasCreditCentsCap ?? plan.features.gasCreditsCents; - const gasCredits = isEnterprise - ? `$${(capCents / 100).toFixed(0)}+/mo` - : `$${(capCents / 100).toFixed(0)}/mo`; - - const logRetention = - plan.features.logRetentionDays >= 365 - ? "1 year" - : `${plan.features.logRetentionDays} days`; - return ( -
- {executionsDisplay && ( - - )} - - -
+
+

{name}

+
+ {isEnterprise || price === null ? ( + + Custom + + ) : ( + <> + + {formatPrice(price)} + + /mo + + )} +
+
+ ); +} - - - - - - - {plan.features.sla && ( - - )} +export function HeroMetrics({ + executions, + gas, +}: { + executions: string; + gas: string; +}): React.ReactElement { + return ( +
+
+

+ Executions /mo +

+

{executions}

+
+
+

+ Gas credits /mo +

+

{gas}

+
); } -export function PriceDisplay({ - price, - annualTotal, +export function TierSelect({ + options, + value, + onChange, interval, }: { - price: number | null; - annualTotal: number | null; + options: PlanTierItem[]; + value: string; + onChange: (key: string) => void; interval: BillingInterval; }): React.ReactElement { - if (price === null) { - return ( -
- - {interval === "yearly" ? "$1,999+" : "$2,499+"} - - /mo -
- ); - } + const [isOpen, setIsOpen] = useState(false); + const containerRef = useRef(null); + + useEffect(() => { + if (!isOpen) { + return; + } + function handleClickOutside(event: MouseEvent): void { + if ( + containerRef.current && + !containerRef.current.contains(event.target as Node) + ) { + setIsOpen(false); + } + } + document.addEventListener("mousedown", handleClickOutside); + return (): void => + document.removeEventListener("mousedown", handleClickOutside); + }, [isOpen]); + + const selected = options.find((opt) => opt.key === value) ?? options[0]; + return ( -
- - {formatPrice(price)} - - /mo - {annualTotal !== null && ( -

- {formatPrice(annualTotal)}/year billed annually -

+
+ + + {isOpen && ( +
+ {options.map((opt) => { + const isSelected = opt.key === value; + return ( + + ); + })} +
)}
); @@ -157,7 +185,6 @@ export function PlanCardFooter({ plan, isCurrent, loading, - isPopular, currentPlan, hasSubscription, onSubscribe, @@ -166,7 +193,6 @@ export function PlanCardFooter({ plan: (typeof PLANS)[PlanName]; isCurrent: boolean; loading: boolean; - isPopular: boolean; currentPlan?: PlanName; hasSubscription: boolean; onSubscribe: () => void; @@ -174,33 +200,30 @@ export function PlanCardFooter({ const isFree = planName === "free"; const isEnterprise = planName === "enterprise"; + let overageLabel: string | null = null; + if (plan.overage.enabled) { + overageLabel = `$${plan.overage.ratePerThousand}/1K additional executions`; + } else if (isFree) { + overageLabel = "No overage. Hard cap at limit."; + } else if (isEnterprise) { + overageLabel = "Custom overage terms"; + } + return ( - -
- {isFree && ( - No overage - )} - {plan.overage.enabled && ( - - ${plan.overage.ratePerThousand}/1K additional executions - - )} - {isEnterprise && ( - Custom pricing - )} -
+ + {overageLabel && ( + + {overageLabel} + + )} ); } diff --git a/components/billing/pricing-table/plan-card.tsx b/components/billing/pricing-table/plan-card.tsx index 6df2d22a7..50b138030 100644 --- a/components/billing/pricing-table/plan-card.tsx +++ b/components/billing/pricing-table/plan-card.tsx @@ -2,15 +2,7 @@ import { useState } from "react"; import { toast } from "sonner"; -import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; -import { - Select, - SelectContent, - SelectItem, - SelectTrigger, - SelectValue, -} from "@/components/ui/select"; -import { CONTACT_EMAIL } from "@/lib/billing/constants"; +import { Card, CardContent } from "@/components/ui/card"; import type { BillingInterval, PLANS, @@ -20,17 +12,16 @@ import type { import { cn } from "@/lib/utils"; import { ConfirmPlanChangeDialog } from "../confirm-plan-change-dialog"; import { + HeroMetrics, PlanCardBadge, - PlanCardFeatures, PlanCardFooter, - PriceDisplay, + PlanHeader, + TierSelect, } from "./plan-card-parts"; import type { GasCreditCapsMap } from "./types"; import { cancelSubscription, computeDisplayPrice, - formatPrice, - getTierPrice, isCurrentPlan, resolveExecutions, startCheckout, @@ -44,7 +35,6 @@ export function PlanCard({ currentTier, currentInterval, gasCreditCaps, - isPopular = false, onPlanUpdated, }: { plan: (typeof PLANS)[PlanName]; @@ -54,7 +44,6 @@ export function PlanCard({ currentTier?: TierKey | null; currentInterval?: BillingInterval | null; gasCreditCaps?: GasCreditCapsMap; - isPopular?: boolean; onPlanUpdated?: () => Promise; }): React.ReactElement { const [selectedTier, setSelectedTier] = useState( @@ -64,9 +53,10 @@ export function PlanCard({ ); const [loading, setLoading] = useState(false); const [confirmOpen, setConfirmOpen] = useState(false); - const [selectOpen, setSelectOpen] = useState(false); const hasSubscription = currentPlan !== undefined && currentPlan !== "free"; + const isEnterprise = planName === "enterprise"; + const isFree = planName === "free"; const isCurrent = isCurrentPlan( planName, @@ -80,15 +70,28 @@ export function PlanCard({ const activeTier = plan.tiers.find((t) => t.key === selectedTier); const price = computeDisplayPrice(planName, activeTier, interval); - const annualTotal = - activeTier && interval === "yearly" - ? activeTier.monthlyPriceAnnual * 12 - : null; + const capCents = gasCreditCaps?.[planName] ?? plan.features.gasCreditsCents; + const gasDisplay = isEnterprise + ? "Custom" + : `$${(capCents / 100).toFixed(0)}`; + + const executionsDisplay = (() => { + if (isEnterprise) { + return "Custom"; + } + if (isFree) { + return plan.features.maxExecutionsPerMonth.toLocaleString(); + } + if (activeTier) { + return activeTier.executions.toLocaleString(); + } + return "-"; + })(); async function executeCheckout(): Promise { setLoading(true); try { - if (planName === "free") { + if (isFree) { const result = await cancelSubscription(); if (!result.success) { return; @@ -122,35 +125,30 @@ export function PlanCard({ } function handleSubscribe(): void { - if (planName === "enterprise") { + if (isEnterprise) { window.open( - `mailto:${CONTACT_EMAIL}?subject=Enterprise%20Plan`, + "mailto:human@keeperhub.com?subject=Enterprise%20Plan", "_blank", "noopener" ); return; } - if (isCurrent) { return; } - - if (planName === "free" && hasSubscription) { + if (isFree && hasSubscription) { setConfirmOpen(true); return; } - - if (planName === "free") { + if (isFree) { return; } - if (hasSubscription) { setConfirmOpen(true); return; } - executeCheckout().catch(() => { - // error handled inside executeCheckout + // handled inside executeCheckout }); } @@ -180,63 +178,35 @@ export function PlanCard({ /> - - - - {plan.name} -

{plan.description}

-
+ - - + + + {plan.tiers.length > 0 && ( - + setSelectedTier(key as TierKey)} + options={plan.tiers} + value={selectedTier ?? plan.tiers[0].key} + /> )} - -
-

{label}

+
+

{label}

+ {helpHref ? ( + + + + ) : null} +

{value}

{subtext ? (

{subtext}

@@ -88,18 +107,30 @@ export function EarningsKpiCards(): ReactNode { totalCreatorEarnings, totalInvocations, creatorSharePercent, + perChain, } = data; + // Revenue arrives on Base (x402/USDC) or Tempo (MPP/USDC.e) depending on + // which protocol the calling agent used. Showing the split inline prevents + // creators from treating a zero on one chain as a bug. + const revenueChainSplit = `Base ${perChain.base.grossRevenue} -- Tempo ${perChain.tempo.grossRevenue}`; + const invocationChainSplit = `Base ${perChain.base.invocationCount.toLocaleString()} -- Tempo ${perChain.tempo.invocationCount.toLocaleString()}`; + return (
} iconClassName="bg-green-500/10 text-green-600 dark:text-green-400" label="Total Revenue" + subtext={revenueChainSplit} value={totalGrossRevenue} /> } iconClassName="bg-keeperhub-green/10 text-keeperhub-green-dark" label="Earnings" @@ -110,6 +141,7 @@ export function EarningsKpiCards(): ReactNode { icon={} iconClassName="bg-blue-500/10 text-blue-600 dark:text-blue-400" label="Total Invocations" + subtext={invocationChainSplit} value={totalInvocations.toLocaleString()} />
diff --git a/components/earnings/earnings-page.tsx b/components/earnings/earnings-page.tsx index 29b0f52ef..0dfd08e92 100644 --- a/components/earnings/earnings-page.tsx +++ b/components/earnings/earnings-page.tsx @@ -122,7 +122,7 @@ export function EarningsPage(): ReactNode { return (
-
+
diff --git a/components/flyout-panel.tsx b/components/flyout-panel.tsx index 30afebfab..aa28ba4e4 100644 --- a/components/flyout-panel.tsx +++ b/components/flyout-panel.tsx @@ -46,7 +46,7 @@ export function FlyoutPanel({ + ); + + if (showLabels) { + return reportButton; + } + + return ( + + {reportButton} + Report an issue + + ); + })()}
{/* Resize handle */} diff --git a/components/overlays/projects-and-tags-overlay.tsx b/components/overlays/projects-and-tags-overlay.tsx new file mode 100644 index 000000000..29f51ef3a --- /dev/null +++ b/components/overlays/projects-and-tags-overlay.tsx @@ -0,0 +1,260 @@ +"use client"; + +import { FolderOpen, Plus, Tag as TagIcon, Trash2 } from "lucide-react"; +import { useCallback, useEffect, useState } from "react"; +import { toast } from "sonner"; +import { ConfirmOverlay } from "@/components/overlays/confirm-overlay"; +import { Overlay } from "@/components/overlays/overlay"; +import { useOverlay } from "@/components/overlays/overlay-provider"; +import { ProjectFormDialog } from "@/components/projects/project-form-dialog"; +import { TagFormDialog } from "@/components/tags/tag-form-dialog"; +import { Button } from "@/components/ui/button"; +import { Spinner } from "@/components/ui/spinner"; +import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs"; +import { api, type Project, type Tag as TagType } from "@/lib/api-client"; + +type ProjectsAndTagsOverlayProps = { + overlayId: string; + initialTab?: "projects" | "tags"; +}; + +export function ProjectsAndTagsOverlay({ + overlayId, + initialTab = "projects", +}: ProjectsAndTagsOverlayProps): React.ReactElement { + const { open: openOverlay } = useOverlay(); + const [projects, setProjects] = useState([]); + const [tags, setTags] = useState([]); + const [loadingProjects, setLoadingProjects] = useState(true); + const [loadingTags, setLoadingTags] = useState(true); + const [showProjectDialog, setShowProjectDialog] = useState(false); + const [showTagDialog, setShowTagDialog] = useState(false); + + const loadProjects = useCallback(async (): Promise => { + try { + const result = await api.project.getAll(); + setProjects(result); + } catch { + toast.error("Failed to load projects"); + } finally { + setLoadingProjects(false); + } + }, []); + + const loadTags = useCallback(async (): Promise => { + try { + const result = await api.tag.getAll(); + setTags(result); + } catch { + toast.error("Failed to load tags"); + } finally { + setLoadingTags(false); + } + }, []); + + useEffect(() => { + loadProjects().catch(() => undefined); + loadTags().catch(() => undefined); + }, [loadProjects, loadTags]); + + const handleDeleteProject = (project: Project): void => { + openOverlay(ConfirmOverlay, { + title: "Delete Project", + message: `Are you sure you want to delete "${project.name}"? This cannot be undone.`, + confirmLabel: "Delete", + confirmVariant: "destructive" as const, + destructive: true, + onConfirm: async () => { + try { + await api.project.delete(project.id); + setProjects((prev) => prev.filter((p) => p.id !== project.id)); + toast.success(`Project "${project.name}" deleted`); + } catch { + toast.error("Failed to delete project"); + } + }, + }); + }; + + const handleDeleteTag = (tag: TagType): void => { + openOverlay(ConfirmOverlay, { + title: "Delete Tag", + message: `Are you sure you want to delete "${tag.name}"? This cannot be undone.`, + confirmLabel: "Delete", + confirmVariant: "destructive" as const, + destructive: true, + onConfirm: async () => { + try { + await api.tag.delete(tag.id); + setTags((prev) => prev.filter((t) => t.id !== tag.id)); + toast.success(`Tag "${tag.name}" deleted`); + } catch { + toast.error("Failed to delete tag"); + } + }, + }); + }; + + const handleProjectCreated = (project: Project): void => { + setProjects((prev) => [...prev, project]); + }; + + const handleTagCreated = (tag: TagType): void => { + setTags((prev) => [...prev, tag]); + }; + + return ( + <> + + + + Projects + Tags + + + +
+ +
+ + {loadingProjects && ( +
+ +
+ )} + {!loadingProjects && projects.length === 0 && ( +
+ +

No projects yet

+

+ Create a project to organize your workflows. +

+
+ )} + {!loadingProjects && projects.length > 0 && ( +
+ {projects.map((project) => ( +
+
+ +
+

{project.name}

+ {project.description && ( +

+ {project.description} +

+ )} +
+
+
+ + {project.workflowCount}{" "} + {project.workflowCount === 1 + ? "workflow" + : "workflows"} + + {project.workflowCount === 0 && ( + + )} +
+
+ ))} +
+ )} +
+ + +
+ +
+ + {loadingTags && ( +
+ +
+ )} + {!loadingTags && tags.length === 0 && ( +
+ +

No tags yet

+

+ Create a tag to categorize your workflows. +

+
+ )} + {!loadingTags && tags.length > 0 && ( +
+ {tags.map((tag) => ( +
+
+ +

{tag.name}

+
+
+ + {tag.workflowCount}{" "} + {tag.workflowCount === 1 ? "workflow" : "workflows"} + + {tag.workflowCount === 0 && ( + + )} +
+
+ ))} +
+ )} +
+
+
+ + + + ); +} diff --git a/components/workflow/config/chain-select-field.tsx b/components/workflow/config/chain-select-field.tsx index cf3c46d43..b09dadf49 100644 --- a/components/workflow/config/chain-select-field.tsx +++ b/components/workflow/config/chain-select-field.tsx @@ -49,6 +49,11 @@ type ChainSelectFieldProps = { * keys (used to set usePrivateMempool alongside network). */ onUpdateConfig?: (key: string, value: unknown) => void; + /** + * Restrict to specific chain IDs (e.g., ["1", "8453"]). + * Used by protocol actions to show only chains where the contract is deployed. + */ + allowedChainIds?: string[]; }; /** @@ -75,6 +80,7 @@ export function ChainSelectField({ chainTypeFilter, showPrivateVariants, onUpdateConfig, + allowedChainIds, }: ChainSelectFieldProps) { const [chains, setChains] = useState([]); const [isLoading, setIsLoading] = useState(true); @@ -93,11 +99,17 @@ export function ChainSelectField({ const data = (await response.json()) as Chain[]; - // Filter by chain type if specified - const filteredChains = chainTypeFilter + let filteredChains = chainTypeFilter ? data.filter((chain) => chain.chainType === chainTypeFilter) : data; + if (allowedChainIds && allowedChainIds.length > 0) { + const allowed = new Set(allowedChainIds); + filteredChains = filteredChains.filter((chain) => + allowed.has(String(chain.chainId)) + ); + } + setChains(filteredChains); } catch (err) { setError(err instanceof Error ? err.message : "Failed to load chains"); @@ -107,7 +119,7 @@ export function ChainSelectField({ } fetchChains(); - }, [chainTypeFilter]); + }, [chainTypeFilter, allowedChainIds]); if (isLoading) { return ( diff --git a/components/workflow/node-config-panel.tsx b/components/workflow/node-config-panel.tsx index 2e9126b03..9cbfc8d88 100644 --- a/components/workflow/node-config-panel.tsx +++ b/components/workflow/node-config-panel.tsx @@ -1084,7 +1084,13 @@ export const PanelInner = () => { !selectedNode.data.config?.actionType && isOwner ) && ( -
+ // key forces this subtree to remount when the selected node + // changes, resetting local useState in leaf field components so + // the previous node's inputs don't leak into the new node's panel. +
{selectedNode.data.type === "trigger" && ( { )} )} - + {selectedNode.data.type !== "trigger" && ( + + )}
)} diff --git a/components/workflow/workflow-canvas.tsx b/components/workflow/workflow-canvas.tsx index 44ed25682..2cab563f2 100644 --- a/components/workflow/workflow-canvas.tsx +++ b/components/workflow/workflow-canvas.tsx @@ -2,6 +2,7 @@ import { ConnectionMode, + type FinalConnectionState, MiniMap, type Node, type NodeMouseHandler, @@ -48,6 +49,7 @@ import { type WorkflowNode, type WorkflowNodeType, } from "@/lib/workflow-store"; +import { hasDuplicateEdge } from "@/lib/workflow/edge-helpers"; import { Edge } from "../ai-elements/edge"; import { Panel } from "../ai-elements/panel"; import { ActionNode } from "./nodes/action-node"; @@ -454,9 +456,25 @@ export function WorkflowCanvas() { return false; } + // Reject a duplicate of an existing edge (same source/target and handles). + // Different handles between the same node pair are still allowed. + if ( + hasDuplicateEdge(edges, { + source: connection.source, + target: connection.target, + sourceHandle, + targetHandle: + "targetHandle" in connection + ? (connection.targetHandle as string | null | undefined) + : undefined, + }) + ) { + return false; + } + return true; }, - [nodes] + [edges, nodes] ); const onConnect: OnConnect = useCallback( @@ -493,6 +511,17 @@ export function WorkflowCanvas() { } } + if ( + hasDuplicateEdge(currentEdges, { + source: connection.source, + target: connection.target, + sourceHandle, + targetHandle: connection.targetHandle, + }) + ) { + return currentEdges; + } + const newEdge = { id: nanoid(), ...connection, @@ -562,30 +591,16 @@ export function WorkflowCanvas() { ); const onConnectEnd = useCallback( - (event: MouseEvent | TouchEvent) => { + (event: MouseEvent | TouchEvent, connectionState: FinalConnectionState) => { if (!connectingNodeId.current) { return; } - // Get client position first - const { clientX, clientY } = getClientPosition(event); - - // For touch events, use elementFromPoint to get the actual element at the touch position - // For mouse events, use event.target as before - const target = - "changedTouches" in event - ? document.elementFromPoint(clientX, clientY) - : (event.target as Element); - - if (!target) { - connectingNodeId.current = null; - return; - } - - const isNode = target.closest(".react-flow__node"); - const isHandle = target.closest(".react-flow__handle"); - - if (!(isNode || isHandle)) { + // isValid === null: pointer never entered a handle's connection radius (pane drop). + // true: valid connection -- onConnect already created the edge. + // false: over a handle that rejected the drop -- do nothing. + if (connectionState.isValid === null) { + const { clientX, clientY } = getClientPosition(event); const { adjustedX, adjustedY } = calculateMenuPosition( event, clientX, @@ -755,7 +770,10 @@ export function WorkflowCanvas() { onNodeContextMenu={isGenerating ? undefined : onNodeContextMenu} onNodesChange={isGenerating ? undefined : onNodesChange} onPaneClick={onPaneClick} - onPaneContextMenu={isGenerating ? undefined : onPaneContextMenu} + onPaneContextMenu={ + // Add Step is the pane menu's only action; gate by route since the canvas is shared with / + isGenerating || !isWorkflowRoute ? undefined : onPaneContextMenu + } onSelectionChange={isGenerating ? undefined : onSelectionChange} > {isWorkflowRoute && ( diff --git a/components/workflow/workflow-context-menu.tsx b/components/workflow/workflow-context-menu.tsx index e12b4129d..ede7dd64b 100644 --- a/components/workflow/workflow-context-menu.tsx +++ b/components/workflow/workflow-context-menu.tsx @@ -144,12 +144,6 @@ export function WorkflowContextMenu({ return null; } - // Check if the node is a trigger (can't be deleted) - const isTriggerNode = Boolean( - menuState.nodeId && - nodes.find((n) => n.id === menuState.nodeId)?.data.type === "trigger" - ); - const getNodeLabel = () => { if (!menuState.nodeId) { return "Step"; @@ -169,7 +163,6 @@ export function WorkflowContextMenu({ > {menuState.type === "node" && ( } label={`Delete ${getNodeLabel()}`} onClick={handleDeleteNode} @@ -239,6 +232,10 @@ export function useContextMenuHandlers( const onNodeContextMenu = useCallback( (event: React.MouseEvent, node: Node) => { event.preventDefault(); + const data = node.data as WorkflowNode["data"] | undefined; + if (data?.type === "trigger") { + return; + } setMenuState({ type: "node", position: { x: event.clientX, y: event.clientY }, diff --git a/components/workflow/workflow-toolbar.tsx b/components/workflow/workflow-toolbar.tsx index 3835e369f..498a543a0 100644 --- a/components/workflow/workflow-toolbar.tsx +++ b/components/workflow/workflow-toolbar.tsx @@ -1803,7 +1803,7 @@ export const WorkflowToolbar = ({ // If persistent mode, use fixed positioning const containerClassName = persistent - ? "pointer-events-auto fixed top-0 right-0 left-0 z-50 flex items-center justify-between border-b bg-background px-4 py-3" + ? "pointer-events-auto fixed top-[var(--app-banner-height,0px)] right-0 left-0 z-50 flex items-center justify-between border-b bg-background px-4 py-3" : ""; const leftSectionClassName = persistent diff --git a/components/workflows/user-menu.tsx b/components/workflows/user-menu.tsx index 921e8f1af..dc0a2a610 100644 --- a/components/workflows/user-menu.tsx +++ b/components/workflows/user-menu.tsx @@ -1,31 +1,27 @@ "use client"; import { - Bookmark, - FolderOpen, - Github, + CreditCard, + FolderTree, Key, LogOut, Plug, Settings, - Tag, Users, Wallet, } from "lucide-react"; +import { useRouter } from "next/navigation"; import { useState } from "react"; import { AuthDialog, isSingleProviderSignInInitiated, } from "@/components/auth/dialog"; import { ManageOrgsModal } from "@/components/organization/manage-orgs-modal"; -import { AddressBookOverlay } from "@/components/overlays/address-book-overlay"; import { ApiKeysOverlay } from "@/components/overlays/api-keys-overlay"; -import { FeedbackOverlay } from "@/components/overlays/feedback-overlay"; import { IntegrationsOverlay } from "@/components/overlays/integrations-overlay"; import { useOverlay } from "@/components/overlays/overlay-provider"; -import { ProjectsOverlay } from "@/components/overlays/projects-overlay"; +import { ProjectsAndTagsOverlay } from "@/components/overlays/projects-and-tags-overlay"; import { SettingsOverlay } from "@/components/overlays/settings-overlay"; -import { TagsOverlay } from "@/components/overlays/tags-overlay"; import { WalletOverlay } from "@/components/overlays/wallet-overlay"; import { Avatar, AvatarFallback, AvatarImage } from "@/components/ui/avatar"; import { Button } from "@/components/ui/button"; @@ -38,13 +34,17 @@ import { DropdownMenuTrigger, } from "@/components/ui/dropdown-menu"; import { signOut, useSession } from "@/lib/auth-client"; -import { useOrganization } from "@/lib/hooks/use-organization"; +import { isBillingEnabled } from "@/lib/billing/feature-flag"; +import { useActiveMember, useOrganization } from "@/lib/hooks/use-organization"; -export const UserMenu = () => { +export const UserMenu = (): React.ReactElement => { const { data: session, isPending } = useSession(); const { open: openOverlay } = useOverlay(); const [orgModalOpen, setOrgModalOpen] = useState(false); const { organization } = useOrganization(); + const { isOwner } = useActiveMember(); + const router = useRouter(); + const showBilling = isOwner && isBillingEnabled(); const handleLogout = async () => { await signOut(); @@ -144,9 +144,9 @@ export const UserMenu = () => {
- openOverlay(FeedbackOverlay)}> - - Report an issue + openOverlay(WalletOverlay)}> + + Wallet openOverlay(SettingsOverlay)}> @@ -160,21 +160,15 @@ export const UserMenu = () => { API Keys - openOverlay(WalletOverlay)}> - - Wallet - - openOverlay(AddressBookOverlay)}> - - Address Book - - openOverlay(ProjectsOverlay)}> - - Projects - - openOverlay(TagsOverlay)}> - - Tags + {showBilling && ( + router.push("/billing")}> + + Billing + + )} + openOverlay(ProjectsAndTagsOverlay)}> + + Projects and Tags diff --git a/docker-bake.hcl b/docker-bake.hcl index bda2b0a96..e3d571344 100644 --- a/docker-bake.hcl +++ b/docker-bake.hcl @@ -50,13 +50,13 @@ target "app" { NEXT_PUBLIC_BILLING_ENABLED = NEXT_PUBLIC_BILLING_ENABLED NEXT_PUBLIC_SENTRY_DSN = NEXT_PUBLIC_SENTRY_DSN } - tags = compact([ + tags = ECR_REGISTRY != "" ? compact([ "${ECR_REGISTRY}/${ECR_REPO}:app-${IMAGE_TAG}", "${ECR_REGISTRY}/${ECR_REPO}:app-latest", ENVIRONMENT_TAG != "" ? "${ECR_REGISTRY}/${ECR_REPO}:${ENVIRONMENT_TAG}" : "", - ]) - cache-from = ["type=registry,ref=${ECR_REGISTRY}/${ECR_REPO}:cache-app"] - cache-to = ["type=registry,ref=${ECR_REGISTRY}/${ECR_REPO}:cache-app,mode=max"] + ]) : [] + cache-from = ECR_REGISTRY != "" ? ["type=registry,ref=${ECR_REGISTRY}/${ECR_REPO}:cache-app"] : [] + cache-to = ECR_REGISTRY != "" ? ["type=registry,ref=${ECR_REGISTRY}/${ECR_REPO}:cache-app,mode=max"] : [] } target "sentry-upload" { diff --git a/docs-site/pnpm-lock.yaml b/docs-site/pnpm-lock.yaml index 27cf45b54..7cc71ed25 100644 --- a/docs-site/pnpm-lock.yaml +++ b/docs-site/pnpm-lock.yaml @@ -981,8 +981,8 @@ packages: devlop@1.1.0: resolution: {integrity: sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==} - dompurify@3.3.3: - resolution: {integrity: sha512-Oj6pzI2+RqBfFG+qOaOLbFXLQ90ARpcGG6UePL82bJLtdsa6CYJD7nmiU8MW9nQNOtCHV3lZ/Bzq1X0QYbBZCA==} + dompurify@3.4.0: + resolution: {integrity: sha512-nolgK9JcaUXMSmW+j1yaSvaEaoXYHwWyGJlkoCTghc97KgGDDSnpoU/PlEnw63Ah+TGKFOyY+X5LnxaWbCSfXg==} entities@6.0.1: resolution: {integrity: sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==} @@ -2895,7 +2895,7 @@ snapshots: dependencies: dequal: 2.0.3 - dompurify@3.3.3: + dompurify@3.4.0: optionalDependencies: '@types/trusted-types': 2.0.7 @@ -3446,7 +3446,7 @@ snapshots: d3-sankey: 0.12.3 dagre-d3-es: 7.0.13 dayjs: 1.11.19 - dompurify: 3.3.3 + dompurify: 3.4.0 katex: 0.16.27 khroma: 2.1.0 lodash-es: 4.17.22 diff --git a/docs/ai-tools/_meta.ts b/docs/ai-tools/_meta.ts index f6b1674b0..f40fe0b70 100644 --- a/docs/ai-tools/_meta.ts +++ b/docs/ai-tools/_meta.ts @@ -2,4 +2,5 @@ export default { overview: "Overview", "claude-code-plugin": "Claude Code Plugin", "mcp-server": "MCP Server", + "agent-wallets": "x402 Wallets for Agents", }; diff --git a/docs/ai-tools/agent-wallets.md b/docs/ai-tools/agent-wallets.md new file mode 100644 index 000000000..4fede3799 --- /dev/null +++ b/docs/ai-tools/agent-wallets.md @@ -0,0 +1,61 @@ +--- +title: "x402 Wallets for AI Agents" +description: "Install an x402 wallet in your AI agent so it can pay for KeeperHub workflows (or any x402 service)." +--- + +# x402 Wallets for AI Agents + +KeeperHub paid workflows settle via the [x402 payment protocol](https://docs.cdp.coinbase.com/x402): each call carries a USDC payment, and the server returns the result only after the payment is verified. To call a paid workflow, your agent needs an x402 wallet. + +This page lists current x402 wallet options. KeeperHub does not run any of them -- both are third-party tools in the wider x402 ecosystem. Each works with KeeperHub and with every other x402-compliant service. + +## agentcash + +`agentcash` is a CLI + skill bundle from [agentcash.dev](https://agentcash.dev). It maintains a local USDC wallet and signs x402 payments on the agent's behalf. + +```bash +npx agentcash add https://app.keeperhub.com +``` + +This walks KeeperHub's `/openapi.json`, generates a `keeperhub` skill file, and symlinks it into every detected agent skill directory. After install, agents can call `search_workflows` and `call_workflow` as first-class tools; payment is routed through the agentcash wallet automatically. + +Supported agents (17 at time of writing): Claude Code, Cursor, Cline, Windsurf, Continue, Roo Code, Kilo Code, Goose, Trae, Junie, Crush, Kiro CLI, Qwen Code, OpenHands, Gemini CLI, Codex, GitHub Copilot. + +> **Testing only. Do not custody real funds.** +> agentcash stores the wallet key as an **unencrypted plaintext file** at `~/.agentcash/wallet.json`. There is no passphrase, no keychain integration, and no seed-phrase backup -- if the file is deleted, lost, or read by any process running as your user, the funds are gone or stolen. This is appropriate for development and automation experiments with small balances (e.g. a few dollars of USDC to pay for test calls), but it is **not** a production wallet. +> +> KeeperHub does not operate agentcash and is not responsible for funds stored in an agentcash wallet. Use it at your own risk and do not top it up beyond what you are willing to lose. + +## Coinbase agentic wallet skills + +Coinbase publishes a bundle of 9 general-purpose x402 skills that work with any x402-compliant service, including KeeperHub: + +```bash +npx skills add coinbase/agentic-wallet-skills +``` + +This installs skills including `authenticate-wallet`, `fund`, `pay-for-service`, `search-for-service`, `send-usdc`, `trade`, `query-onchain-data`, and `x402`. The wallet is managed through Coinbase Developer Platform; payment flows route through the CDP infrastructure. + +Full documentation and security risk ratings: https://skills.sh/coinbase/agentic-wallet-skills + +## Which wallet should I use? + +Both wallets can call any x402-compliant service, KeeperHub included, so the choice depends on your agent's existing setup and custody preferences, not on anything KeeperHub-specific. + +- **Pick agentcash** for a quick-start install of KeeperHub (or any x402 origin) as a first-class skill. Keep in mind agentcash keys are plaintext on disk -- it is a testing wallet, not a production one. +- **Pick Coinbase agentic wallet skills** if you already run a CDP wallet, want managed key infrastructure, or prefer the broader Coinbase x402 ecosystem. + +Nothing stops you from installing both -- they do not conflict. + +## What KeeperHub exposes to the agent + +Regardless of which wallet you install, the agent calls KeeperHub through two meta-tools (described in its OpenAPI at `/openapi.json`): + +- `search_workflows` -- find workflows by category, tag, or free text. Returns slug, description, inputSchema, and price for each match. +- `call_workflow` -- execute a listed workflow by slug. For read workflows the call executes and returns the result; for write workflows it returns unsigned calldata `{to, data, value}` for the caller to submit. + +This meta-tool pattern keeps the agent's tool list small no matter how many workflows are listed -- the agent discovers available workflows at runtime instead of registering one tool per workflow. + +## Paying for calls + +Paid workflows settle in USDC on Base (via x402) or USDC.e on Tempo (via MPP). Most workflows cost under `$0.05` per call. See [Paid Workflows](/workflows/paid-workflows) for the creator-side view of the same settlement. diff --git a/docs/ai-tools/index.md b/docs/ai-tools/index.md index d3181b8da..f918be5d3 100644 --- a/docs/ai-tools/index.md +++ b/docs/ai-tools/index.md @@ -10,3 +10,4 @@ AI-powered tools that help you build, configure, and manage blockchain automatio - [Overview](/ai-tools/overview) -- How AI tools integrate with KeeperHub - [Claude Code Plugin](/ai-tools/claude-code-plugin) -- Use Claude Code for workflow development - [MCP Server](/ai-tools/mcp-server) -- KeeperHub MCP server for AI-assisted automation +- [x402 Wallets for Agents](/ai-tools/agent-wallets) -- Install agentcash or Coinbase wallet skills so your agent can pay for KeeperHub workflows diff --git a/docs/api/workflows.md b/docs/api/workflows.md index 988bdf386..72d2cf877 100644 --- a/docs/api/workflows.md +++ b/docs/api/workflows.md @@ -265,7 +265,7 @@ Returns distinct categories and protocols from all public workflows. Useful for ```json { "categories": ["defi", "nft"], - "protocols": ["uniswap", "aave"] + "protocols": ["uniswap", "aave-v3"] } ``` diff --git a/docs/workflows/_meta.ts b/docs/workflows/_meta.ts index a454e18c1..c8b2950c5 100644 --- a/docs/workflows/_meta.ts +++ b/docs/workflows/_meta.ts @@ -1,5 +1,6 @@ export default { introduction: "Introduction", creating: "Creating Workflows", + "paid-workflows": "Paid Workflows", examples: "Examples", }; diff --git a/docs/workflows/index.md b/docs/workflows/index.md index 61bbb7768..5e7190577 100644 --- a/docs/workflows/index.md +++ b/docs/workflows/index.md @@ -9,4 +9,5 @@ Workflows are the core of KeeperHub -- visual automations that connect triggers, - [Introduction](/workflows/introduction) -- What workflows are and how they work - [Creating Workflows](/workflows/creating) -- Step-by-step guide to building your first workflow +- [Paid Workflows](/workflows/paid-workflows) -- List workflows for AI agents and earn per-call revenue on Base or Tempo - [Examples](/workflows/examples) -- Real-world workflow examples and templates diff --git a/docs/workflows/paid-workflows.md b/docs/workflows/paid-workflows.md new file mode 100644 index 000000000..2515fed46 --- /dev/null +++ b/docs/workflows/paid-workflows.md @@ -0,0 +1,59 @@ +--- +title: "Paid Workflows" +description: "List workflows for AI agents to call on demand and earn revenue on Base or Tempo." +--- + +# Paid Workflows + +When you list a workflow as paid, AI agents can discover and call it via KeeperHub's MCP endpoint. Each call settles on-chain in USDC, with the creator wallet as the recipient. Revenue arrives on either Base or Tempo depending on which protocol the calling agent uses. + +## How payment works + +Agents can pay using one of two protocols, and both are always offered on every paid workflow call: + +| Protocol | Chain | Token | Used by | +|---|---|---|---| +| x402 | Base (chain ID 8453) | USDC (`0x8335...02913`) | Agentcash wallets with a Base balance, Coinbase CDP-backed agents | +| MPP | Tempo (chain ID 4217) | USDC.e (`0x20c0...8b50`) | Agentcash wallets with a Tempo balance, MPP-native clients | + +The calling agent chooses which protocol to use based on what its wallet holds. A workflow creator does not pick one — both chains are live on every listed workflow, and you receive funds on whichever chain the caller paid from. + +## Receiving revenue on two chains + +After a caller pays, the USDC (or USDC.e) lands directly in your organization's creator wallet. Because the two chains settle in different tokens, you will see two balances in your wallet overlay: + +- **Base USDC** — accumulates from x402 calls +- **Tempo USDC.e** — accumulates from MPP calls + +Both are fully redeemable stablecoins pegged to USD. The split is purely a function of which agents called your workflow. There is nothing to configure, and no balance is "incorrect" if one chain has zero activity. + +### Why Tempo? + +Tempo has faster finality and predictable gas costs, which matters for high-throughput agent traffic. Base has the broader ecosystem and more wallet support today. KeeperHub supports both because different agents run on different wallets — forcing a single chain would exclude either the Coinbase Agent Kit ecosystem (Base) or the MPP-native wallet ecosystem (Tempo). + +### Consolidating your balance + +If you prefer a single-chain balance, you can bridge between Base and Tempo through the relevant chain bridges. KeeperHub does not auto-bridge — creator funds stay in the wallet you registered. See [Wallet Management](/wallet-management) for details on accessing the wallet. + +## Listing a workflow + +1. Open a workflow you want to list +2. Click the **List** button in the workflow toolbar +3. Set a per-call price in USDC and (optionally) a category and tags +4. Save — the workflow is now callable by agents via `https://app.keeperhub.com/api/mcp/workflows//call` + +Listed workflows are discoverable by x402scan, mppscan, and agentcash through their OpenAPI / `PAYMENT-REQUIRED` probes. No registration form is required for these scanners. + +## Pricing guidance + +Most listed workflows price between `$0.001` and `$0.10` per call. Agents pay per-request, so a price that sounds negligible in isolation adds up at scale. Consider: + +- The workflow's runtime cost (gas, RPC, external API calls) +- How long the execution takes +- Whether the output is a one-shot answer or part of a chained agent session + +You can update the price at any time on existing listed workflows. Prior calls settle at the price active when the call was made. + +## Dogfood reference + +The `mcp-test` workflow listed at `https://app.keeperhub.com/api/mcp/workflows/mcp-test/call` is the reference implementation. It is priced at `$0.01` per call, accepts both x402 and MPP payments, and its `/openapi.json` entry is what the scanners ingest. diff --git a/drizzle/0051_rename_aave_slug_to_v3.sql b/drizzle/0051_rename_aave_slug_to_v3.sql new file mode 100644 index 000000000..8d7130b1b --- /dev/null +++ b/drizzle/0051_rename_aave_slug_to_v3.sql @@ -0,0 +1,79 @@ +-- Rename Aave V3 protocol slug from "aave" to "aave-v3" so it coexists +-- cleanly with the new "aave-v4" slug. Data-only migration: no schema change. +-- +-- Slug-bearing fields audited in the schema: +-- * workflows.featured_protocol (text) +-- * workflows.nodes[].data.config.actionType (jsonb, "aave/*") +-- * workflows.nodes[].data.config._protocolMeta.protocolSlug +-- (stringified JSON inside jsonb) +-- * workflows.nodes[].data._eventProtocolSlug (jsonb, on trigger nodes) +-- * integrations.type (text, stores IntegrationType) +-- +-- Not touched: workflows.nodes[].data._eventProtocolIconPath. Both "aave" +-- and "aave-v3" resolve to the same icon file (protocols/aave-v3.ts still +-- declares icon: "/protocols/aave.png"), so the icon path is stable across +-- the rename. The 0025 safe-wallet precedent had to update its icon path +-- because the safe icon file itself was being renamed; that's not the case +-- here. +-- +-- Historical tables (workflow_executions, workflow_execution_logs, +-- direct_executions) are intentionally NOT touched: they record past runs +-- with their slug-of-the-day, rewriting them would falsify history. +-- +-- Strategy: text-level REPLACE on JSONB::text for actionType / protocolSlug +-- (the stringified _protocolMeta can't be reached by jsonb_set because its +-- value is itself a string, not nested jsonb). For _eventProtocolSlug, use +-- jsonb_set via jsonb_agg (the 0025 precedent): a native jsonb key on each +-- node's data object. +-- +-- LIKE-escape note: LIKE treats backslash as its own escape character by +-- default. To match a literal backslash-quote sequence in the text form we +-- must double the backslashes in the LIKE pattern (so '\\"' after string +-- literal parsing becomes '\\"' which LIKE processes as \ + ") -- mirroring +-- the pattern used in 0048_rename_weth_to_wrapped.sql. REPLACE has no such +-- escape processing, so the REPLACE patterns keep single backslashes. + +UPDATE workflows +SET featured_protocol = 'aave-v3' +WHERE featured_protocol = 'aave'; +--> statement-breakpoint + +UPDATE workflows +SET nodes = REPLACE( + REPLACE( + nodes::text, + '"actionType": "aave/', + '"actionType": "aave-v3/' + ), + '\"protocolSlug\":\"aave\"', + '\"protocolSlug\":\"aave-v3\"' +)::jsonb +WHERE + nodes::text LIKE '%"actionType": "aave/%' + OR nodes::text LIKE '%\\"protocolSlug\\":\\"aave\\"%'; +--> statement-breakpoint + +-- Event triggers (node.data._eventProtocolSlug). +-- Structured jsonb_set via jsonb_agg, mirroring the 0025 precedent. +UPDATE workflows +SET nodes = ( + SELECT jsonb_agg( + CASE + WHEN node->'data'->>'_eventProtocolSlug' = 'aave' + THEN jsonb_set(node, '{data,_eventProtocolSlug}', '"aave-v3"') + ELSE node + END + ) + FROM jsonb_array_elements(nodes) AS node +) +WHERE + nodes::text LIKE '%"_eventProtocolSlug":"aave"%' + OR nodes::text LIKE '%"_eventProtocolSlug": "aave"%'; +--> statement-breakpoint + +-- Defensive: integrations.type is $type which no longer +-- admits "aave". Protocol plugins set requiresCredentials: false, so no +-- rows are expected, but renaming is idempotent and cheap. +UPDATE integrations +SET type = 'aave-v3' +WHERE type = 'aave'; diff --git a/drizzle/meta/0048_snapshot.json b/drizzle/meta/0051_snapshot.json similarity index 99% rename from drizzle/meta/0048_snapshot.json rename to drizzle/meta/0051_snapshot.json index 6122d0f0d..bf9cff656 100644 --- a/drizzle/meta/0048_snapshot.json +++ b/drizzle/meta/0051_snapshot.json @@ -1,6 +1,6 @@ { - "id": "ef803367-2686-4d5a-9ecb-fa6f7ba3cfb2", - "prevId": "a9b21ef3-5400-418a-a731-71b47718eb14", + "id": "d0f93421-e64d-4982-9b9f-61c75c83e53e", + "prevId": "4d96b65e-dc8d-4755-856d-f8cf15412118", "version": "7", "dialect": "postgresql", "tables": { @@ -2559,6 +2559,13 @@ "primaryKey": false, "notNull": false }, + "is_active": { + "name": "is_active", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, "created_at": { "name": "created_at", "type": "timestamp", @@ -2567,7 +2574,24 @@ "default": "now()" } }, - "indexes": {}, + "indexes": { + "para_wallets_org_active_unique": { + "name": "para_wallets_org_active_unique", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "where": "\"para_wallets\".\"is_active\" = true", + "concurrently": false, + "method": "btree", + "with": {} + } + }, "foreignKeys": { "para_wallets_user_id_users_id_fk": { "name": "para_wallets_user_id_users_id_fk", @@ -2597,15 +2621,7 @@ } }, "compositePrimaryKeys": {}, - "uniqueConstraints": { - "para_wallets_organization_id_unique": { - "name": "para_wallets_organization_id_unique", - "nullsNotDistinct": false, - "columns": [ - "organization_id" - ] - } - }, + "uniqueConstraints": {}, "policies": {}, "checkConstraints": {}, "isRLSEnabled": false diff --git a/drizzle/meta/_journal.json b/drizzle/meta/_journal.json index 515b8fc03..e52c8a47e 100644 --- a/drizzle/meta/_journal.json +++ b/drizzle/meta/_journal.json @@ -358,6 +358,13 @@ "when": 1776441892552, "tag": "0050_curly_gorilla_man", "breakpoints": true + }, + { + "idx": 51, + "version": "7", + "when": 1776750254755, + "tag": "0051_rename_aave_slug_to_v3", + "breakpoints": true } ] } \ No newline at end of file diff --git a/keeperhub-events/event-tracker/lib/utils/chains.ts b/keeperhub-events/event-tracker/lib/utils/chains.ts index 4ce461337..e19e41bee 100644 --- a/keeperhub-events/event-tracker/lib/utils/chains.ts +++ b/keeperhub-events/event-tracker/lib/utils/chains.ts @@ -5,7 +5,7 @@ export const AVAILABLE_CHAINS = { SOLANA_MAINNET: "101", SOLANA_DEVNET: "103", BASE_MAINNET: "8453", - TEMPO_MAINNET: "42420", + TEMPO_MAINNET: "4217", TEMPO_TESTNET: "42429", BASE_SEPOLIA: "84532", } as const; diff --git a/lib/billing/constants.ts b/lib/billing/constants.ts index 0b08cf347..c0a36ce7c 100644 --- a/lib/billing/constants.ts +++ b/lib/billing/constants.ts @@ -6,6 +6,7 @@ export const BILLING_API = { CHECKOUT: "/api/billing/checkout", CANCEL: "/api/billing/cancel", USAGE_SUGGESTION: "/api/billing/usage-suggestion", + BILLING_DETAILS: "/api/billing/billing-details", } as const; export const BILLING_ALERTS = { diff --git a/lib/billing/provider.ts b/lib/billing/provider.ts index f5afaf541..8b6327f6f 100644 --- a/lib/billing/provider.ts +++ b/lib/billing/provider.ts @@ -93,6 +93,16 @@ export type ProrationPreview = { }[]; }; +export type BillingDetails = { + paymentMethod: { + brand: string; + last4: string; + expMonth: number; + expYear: number; + } | null; + billingEmail: string | null; +}; + export interface BillingProvider { readonly name: string; @@ -105,6 +115,8 @@ export interface BillingProvider { returnUrl: string ): Promise<{ url: string }>; + getBillingDetails(customerId: string): Promise; + verifyWebhook(body: string, signature: string): Promise; getSubscriptionDetails(subscriptionId: string): Promise; diff --git a/lib/billing/providers/stripe.ts b/lib/billing/providers/stripe.ts index a469a27e3..ce9fef734 100644 --- a/lib/billing/providers/stripe.ts +++ b/lib/billing/providers/stripe.ts @@ -1,6 +1,7 @@ import type Stripe from "stripe"; import { stripe } from "@/lib/stripe"; import type { + BillingDetails, BillingProvider, BillingWebhookEvent, CreateCheckoutParams, @@ -263,6 +264,77 @@ export class StripeBillingProvider implements BillingProvider { return { url: session.url }; } + async getBillingDetails(customerId: string): Promise { + const s = getStripe(); + const customer = await s.customers.retrieve(customerId, { + expand: ["invoice_settings.default_payment_method"], + }); + + if (customer.deleted) { + return { paymentMethod: null, billingEmail: null }; + } + + const defaultPaymentMethod = + customer.invoice_settings?.default_payment_method; + let card: Stripe.PaymentMethod.Card | null = + defaultPaymentMethod && + typeof defaultPaymentMethod === "object" && + defaultPaymentMethod.type === "card" + ? (defaultPaymentMethod.card ?? null) + : null; + + // Stripe Checkout stores the default payment method on the subscription, + // not on the customer. Prefer the active subscription's default PM, since + // that's the card actually being charged. Fall back to any status only if + // there is no active sub, to preserve data for past_due / canceled users + // viewing billing history. + if (!card) { + let subs = await s.subscriptions.list({ + customer: customerId, + status: "active", + limit: 1, + expand: ["data.default_payment_method"], + }); + if (subs.data.length === 0) { + subs = await s.subscriptions.list({ + customer: customerId, + status: "all", + limit: 1, + expand: ["data.default_payment_method"], + }); + } + const subDefault = subs.data[0]?.default_payment_method; + if ( + subDefault && + typeof subDefault === "object" && + subDefault.type === "card" + ) { + card = subDefault.card ?? null; + } + } + + if (!card) { + const methods = await s.paymentMethods.list({ + customer: customerId, + type: "card", + limit: 1, + }); + card = methods.data[0]?.card ?? null; + } + + return { + paymentMethod: card + ? { + brand: card.brand, + last4: card.last4, + expMonth: card.exp_month, + expYear: card.exp_year, + } + : null, + billingEmail: customer.email ?? null, + }; + } + // biome-ignore lint/suspicious/useAwait: must be async to satisfy BillingProvider interface contract async verifyWebhook( body: string, diff --git a/lib/earnings/queries.ts b/lib/earnings/queries.ts index 15d54e8e4..8e2f75e62 100644 --- a/lib/earnings/queries.ts +++ b/lib/earnings/queries.ts @@ -5,6 +5,7 @@ import { db } from "@/lib/db"; import { workflows } from "@/lib/db/schema"; import { workflowPayments } from "@/lib/db/schema-payments"; import type { + ChainEarnings, EarningsSummary, SettlementStatus, WorkflowEarningsRow, @@ -138,6 +139,10 @@ export async function getEarningsSummary( totalInvocations: 0, platformFeePercent, creatorSharePercent, + perChain: { + base: { grossRevenue: formatUsdc(0), invocationCount: 0 }, + tempo: { grossRevenue: formatUsdc(0), invocationCount: 0 }, + }, workflows: [], total: 0, page, @@ -181,6 +186,20 @@ export async function getEarningsSummary( const totalGross = Number(orgTotals?.grossRevenue ?? "0"); const totalInvocations = orgTotals?.invocationCount ?? 0; + // Per-chain breakdown so creators see Base (x402/USDC) vs Tempo (MPP/USDC.e) + // split instead of just an aggregate. See docs/workflows/paid-workflows.md. + const perChainRows = await db + .select({ + chain: workflowPayments.chain, + grossRevenue: sum(workflowPayments.amountUsdc), + invocationCount: count(workflowPayments.id), + }) + .from(workflowPayments) + .where(inArray(workflowPayments.workflowId, orgWorkflowIds)) + .groupBy(workflowPayments.chain); + + const perChain = buildPerChainEarnings(perChainRows); + // Per-workflow revenue for the current page only const revenueRows = await db .select({ @@ -259,6 +278,7 @@ export async function getEarningsSummary( totalInvocations, platformFeePercent, creatorSharePercent, + perChain, workflows: paginatedRows, total, page, @@ -266,3 +286,43 @@ export async function getEarningsSummary( hasListedWorkflows: true, }; } + +type PerChainRow = { + chain: string; + grossRevenue: string | null; + invocationCount: number; +}; + +/** + * Reshapes the chain-grouped SQL result into the fixed { base, tempo } shape + * expected by the UI. Missing chains default to zero so the UI never has to + * null-check. + */ +export function buildPerChainEarnings(rows: PerChainRow[]): { + base: ChainEarnings; + tempo: ChainEarnings; +} { + const base: ChainEarnings = { + grossRevenue: formatUsdc(0), + invocationCount: 0, + }; + const tempo: ChainEarnings = { + grossRevenue: formatUsdc(0), + invocationCount: 0, + }; + for (const row of rows) { + const gross = Number(row.grossRevenue ?? "0"); + const entry: ChainEarnings = { + grossRevenue: formatUsdc(gross), + invocationCount: row.invocationCount, + }; + if (row.chain === "base") { + base.grossRevenue = entry.grossRevenue; + base.invocationCount = entry.invocationCount; + } else if (row.chain === "tempo") { + tempo.grossRevenue = entry.grossRevenue; + tempo.invocationCount = entry.invocationCount; + } + } + return { base, tempo }; +} diff --git a/lib/earnings/types.ts b/lib/earnings/types.ts index 32012d8b5..5779d5010 100644 --- a/lib/earnings/types.ts +++ b/lib/earnings/types.ts @@ -12,6 +12,16 @@ export type WorkflowEarningsRow = { settlementStatus: SettlementStatus; }; +/** + * Revenue split for a single settlement chain. `grossRevenue` is USDC on Base + * (x402) or USDC.e on Tempo (MPP) -- both pegged to USD, so summed totals are + * meaningful even across chains for display purposes. + */ +export type ChainEarnings = { + grossRevenue: string; + invocationCount: number; +}; + export type EarningsSummary = { totalGrossRevenue: string; totalCreatorEarnings: string; @@ -19,6 +29,10 @@ export type EarningsSummary = { totalInvocations: number; platformFeePercent: number; creatorSharePercent: number; + perChain: { + base: ChainEarnings; + tempo: ChainEarnings; + }; workflows: WorkflowEarningsRow[]; total: number; page: number; diff --git a/lib/extensions.tsx b/lib/extensions.tsx index abbf91c41..00a4c87db 100644 --- a/lib/extensions.tsx +++ b/lib/extensions.tsx @@ -88,6 +88,7 @@ registerFieldRenderer(
| null; + category?: string | null; + tagName?: string | null; }; type PaymentRequiredV2 = { @@ -81,6 +83,8 @@ function buildPaymentRequired(params: Dual402Params): PaymentRequiredV2 { workflowName, resourceUrl, inputSchema, + category, + tagName, } = params; const amountSmallestUnit = String( Math.round(Number(price) * 10 ** USDC_DECIMALS) @@ -106,18 +110,24 @@ function buildPaymentRequired(params: Dual402Params): PaymentRequiredV2 { ], }; + // CDP Bazaar discovery: `discoverable: true` opts the resource into the + // marketplace index. The schema subtree feeds agentcash / x402scan probers. + const bazaar: Record = { discoverable: true }; + if (category) { + bazaar.category = category; + } + if (tagName) { + bazaar.tags = [tagName]; + } if (inputSchema) { - payload.extensions = { - bazaar: { - schema: { - properties: { - input: { properties: { body: inputSchema } }, - output: { properties: { example: WORKFLOW_OUTPUT_EXAMPLE } }, - }, - }, + bazaar.schema = { + properties: { + input: { properties: { body: inputSchema } }, + output: { properties: { example: WORKFLOW_OUTPUT_EXAMPLE } }, }, }; } + payload.extensions = { bazaar }; return payload; } @@ -362,14 +372,24 @@ export function gatePayment( return handleMpp(request, workflow, creatorWalletAddress, createHandler); } - // No payment header -- return dual 402 challenge + // No payment header -- return dual 402 challenge. + // Resource URL must use the public hostname (not request.url, which can be + // the internal pod bind `0.0.0.0:3000` inside K8s) or the CDP Bazaar + // crawler and any other caller will fail to resolve the endpoint. + const publicHost = + process.env.NEXT_PUBLIC_APP_URL ?? "https://app.keeperhub.com"; + const resourceUrl = workflow.listedSlug + ? `${publicHost}/api/mcp/workflows/${workflow.listedSlug}/call` + : request.url; return Promise.resolve( buildDual402Response({ price: workflow.priceUsdcPerCall ?? "0", creatorWalletAddress, workflowName: workflow.name, - resourceUrl: request.url, + resourceUrl, inputSchema: workflow.inputSchema, + category: workflow.category, + tagName: workflow.tagName, }) as NextResponse ); } diff --git a/lib/protocol-registry.ts b/lib/protocol-registry.ts index bc9908562..3e5b180ce 100644 --- a/lib/protocol-registry.ts +++ b/lib/protocol-registry.ts @@ -282,6 +282,7 @@ function buildConfigFieldsFromAction( action: ProtocolAction ): ActionConfigField[] { const contract = def.contracts[action.contract]; + const allowedChainIds = Object.keys(contract.addresses); const fields: ActionConfigField[] = [ { key: "network", @@ -291,6 +292,7 @@ function buildConfigFieldsFromAction( // KEEP-137: write actions show private mempool variants (e.g., Flashbots) ...(action.type === "write" ? { showPrivateVariants: true } : {}), required: true, + ...(allowedChainIds.length > 0 ? { allowedChainIds } : {}), }, ]; @@ -367,7 +369,11 @@ function buildOutputFieldsFromAction( ): Array<{ field: string; description: string }> { const outputs: Array<{ field: string; description: string }> = []; - if (action.outputs) { + // KEEP-296: only reads surface action.outputs as UI template suggestions. + // Write actions still have ABI-derived outputs at the model layer, but + // writeContractCore returns result: undefined, so surfacing them would + // create template suggestions that resolve to undefined at runtime. + if (action.type === "read" && action.outputs) { for (const output of action.outputs) { outputs.push({ field: output.name, description: output.label }); } diff --git a/lib/rpc/rpc-config.ts b/lib/rpc/rpc-config.ts index 126cabb38..7157604ed 100644 --- a/lib/rpc/rpc-config.ts +++ b/lib/rpc/rpc-config.ts @@ -105,19 +105,13 @@ export const CHAIN_CONFIG: Record = { fallbackEnvKey: "CHAIN_TEMPO_TESTNET_FALLBACK_RPC", publicDefault: PUBLIC_RPCS.TEMPO_TESTNET, }, - // Tempo Mainnet (4217 is the canonical chain ID; 42420 kept for backwards compatibility) + // Tempo Mainnet 4217: { jsonKey: "tempo-mainnet", envKey: "CHAIN_TEMPO_MAINNET_PRIMARY_RPC", fallbackEnvKey: "CHAIN_TEMPO_MAINNET_FALLBACK_RPC", publicDefault: PUBLIC_RPCS.TEMPO_MAINNET, }, - 42420: { - jsonKey: "tempo-mainnet", - envKey: "CHAIN_TEMPO_MAINNET_PRIMARY_RPC", - fallbackEnvKey: "CHAIN_TEMPO_MAINNET_FALLBACK_RPC", - publicDefault: PUBLIC_RPCS.TEMPO_MAINNET, - }, // BNB Chain (BSC) Mainnet 56: { jsonKey: "bsc-mainnet", diff --git a/lib/types/integration.ts b/lib/types/integration.ts index 5d558c13c..039fd1904 100755 --- a/lib/types/integration.ts +++ b/lib/types/integration.ts @@ -9,12 +9,13 @@ * 2. Add a system integration to SYSTEM_INTEGRATION_TYPES in discover-plugins.ts * 3. Run: pnpm discover-plugins * - * Generated types: aave, aerodrome, ai-gateway, ajna, chainlink, chronicle, clerk, code, compound, cowswap, curve, database, discord, ethena, lido, linear, math, morpho, pendle, protocol, resend, rocket-pool, safe, sendgrid, sky, slack, spark, telegram, uniswap, v0, web3, webflow, webhook, wrapped, yearn + * Generated types: aave-v3, aave-v4, aerodrome, ai-gateway, ajna, chainlink, chronicle, clerk, code, compound, cowswap, curve, database, discord, ethena, lido, linear, math, morpho, pendle, protocol, resend, rocket-pool, safe, sendgrid, sky, slack, spark, telegram, uniswap, v0, web3, webflow, webhook, wrapped, yearn */ // Integration type union - plugins + system integrations export type IntegrationType = - | "aave" + | "aave-v3" + | "aave-v4" | "aerodrome" | "ai-gateway" | "ajna" diff --git a/lib/wallet/fetch-balances.ts b/lib/wallet/fetch-balances.ts index 46fac8206..8c9fbf342 100644 --- a/lib/wallet/fetch-balances.ts +++ b/lib/wallet/fetch-balances.ts @@ -3,6 +3,11 @@ */ import { ErrorCategory, logUserError } from "@/lib/logging"; +import { + encodeBalanceOfCallData, + hexWeiToBigInt, + rpcCallWithFailover, +} from "./rpc"; import type { ChainBalance, ChainData, @@ -87,6 +92,16 @@ function buildExplorerAddressUrl( return `${chain.explorerUrl}${path.replace("{address}", address)}`; } +/** + * Collect the ordered list of RPC URLs to attempt for a chain: primary + * first, fallback second when configured. + */ +function getChainRpcUrls(chain: ChainData): string[] { + return chain.defaultFallbackRpc + ? [chain.defaultPrimaryRpc, chain.defaultFallbackRpc] + : [chain.defaultPrimaryRpc]; +} + /** * Fetch native token balance for a single chain */ @@ -95,23 +110,14 @@ export async function fetchNativeBalance( chain: ChainData ): Promise { try { - const response = await fetch(chain.defaultPrimaryRpc, { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ - jsonrpc: "2.0", - method: "eth_getBalance", - params: [address, "latest"], - id: 1, - }), + const resultHex = await rpcCallWithFailover(getChainRpcUrls(chain), { + jsonrpc: "2.0", + method: "eth_getBalance", + params: [address, "latest"], + id: 1, }); - const result = await response.json(); - if (result.error) { - throw new Error(result.error.message); - } - - const balanceWei = BigInt(result.result); + const balanceWei = hexWeiToBigInt(resultHex); return { chainId: chain.chainId, @@ -154,49 +160,17 @@ export async function fetchTokenBalance( chain: ChainData ): Promise { try { - // ERC20 balanceOf function signature - const balanceOfSelector = "0x70a08231"; - - // Encode the balanceOf call data - const addressWithoutPrefix = address.startsWith("0x") - ? address.slice(2) - : address; - const paddedAddress = addressWithoutPrefix.toLowerCase().padStart(64, "0"); - const callData = `${balanceOfSelector}${paddedAddress}`; - - const response = await fetch(chain.defaultPrimaryRpc, { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ - jsonrpc: "2.0", - method: "eth_call", - params: [{ to: token.tokenAddress, data: callData }, "latest"], - id: 1, - }), + const resultHex = await rpcCallWithFailover(getChainRpcUrls(chain), { + jsonrpc: "2.0", + method: "eth_call", + params: [ + { to: token.tokenAddress, data: encodeBalanceOfCallData(address) }, + "latest", + ], + id: 1, }); - if (!response.ok) { - throw new Error(`HTTP ${response.status}: ${response.statusText}`); - } - - const result = await response.json(); - if (result.error) { - throw new Error(result.error.message || "RPC error"); - } - - if (!result.result || result.result === "0x") { - return { - tokenId: token.id, - chainId: token.chainId, - tokenAddress: token.tokenAddress, - symbol: token.symbol, - name: token.name, - balance: "0.000000", - loading: false, - }; - } - - const balanceWei = BigInt(result.result); + const balanceWei = hexWeiToBigInt(resultHex); return { tokenId: token.id, @@ -272,123 +246,57 @@ export function fetchAllTokenBalances( /** * Fetch balance for a single supported token with retry logic */ -export function fetchSupportedTokenBalance( +export async function fetchSupportedTokenBalance( address: string, token: SupportedToken, - chain: ChainData, - retries = 3 + chain: ChainData ): Promise { - const makeRequest = async ( - attempt: number - // biome-ignore lint/complexity/noExcessiveCognitiveComplexity: Retry logic with exponential backoff requires this complexity - ): Promise => { - try { - // ERC20 balanceOf function signature - const balanceOfSelector = "0x70a08231"; - - // Encode the balanceOf call data - const addressWithoutPrefix = address.startsWith("0x") - ? address.slice(2) - : address; - const paddedAddress = addressWithoutPrefix - .toLowerCase() - .padStart(64, "0"); - const callData = `${balanceOfSelector}${paddedAddress}`; - - const response = await fetch(chain.defaultPrimaryRpc, { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ - jsonrpc: "2.0", - method: "eth_call", - params: [{ to: token.tokenAddress, data: callData }, "latest"], - id: 1, - }), - }); - - // Handle rate limiting with retry - if (response.status === 429 && attempt < retries) { - const backoffMs = Math.min(1000 * 2 ** attempt, 5000); - await new Promise((resolve) => setTimeout(resolve, backoffMs)); - return makeRequest(attempt + 1); - } - - if (!response.ok) { - throw new Error(`HTTP ${response.status}: ${response.statusText}`); - } - - const result = await response.json(); - if (result.error) { - throw new Error(result.error.message || "RPC error"); - } - - const tokenExplorerUrl = buildExplorerAddressUrl( - chain, - token.tokenAddress - ); - - if (!result.result || result.result === "0x") { - return { - chainId: token.chainId, - tokenAddress: token.tokenAddress, - symbol: token.symbol, - name: token.name, - logoUrl: token.logoUrl, - balance: "0.000000", - loading: false, - explorerUrl: tokenExplorerUrl, - }; - } + try { + const resultHex = await rpcCallWithFailover(getChainRpcUrls(chain), { + jsonrpc: "2.0", + method: "eth_call", + params: [ + { to: token.tokenAddress, data: encodeBalanceOfCallData(address) }, + "latest", + ], + id: 1, + }); - const balanceWei = BigInt(result.result); + const balanceWei = hexWeiToBigInt(resultHex); - return { - chainId: token.chainId, - tokenAddress: token.tokenAddress, - symbol: token.symbol, - name: token.name, - logoUrl: token.logoUrl, - balance: formatWeiToBalance(balanceWei, token.decimals), - loading: false, - explorerUrl: tokenExplorerUrl, - }; - } catch (error) { - // Retry on network errors - if ( - attempt < retries && - error instanceof Error && - !error.message.includes("HTTP 4") - ) { - const backoffMs = Math.min(500 * 2 ** attempt, 3000); - await new Promise((resolve) => setTimeout(resolve, backoffMs)); - return makeRequest(attempt + 1); + return { + chainId: token.chainId, + tokenAddress: token.tokenAddress, + symbol: token.symbol, + name: token.name, + logoUrl: token.logoUrl, + balance: formatWeiToBalance(balanceWei, token.decimals), + loading: false, + explorerUrl: buildExplorerAddressUrl(chain, token.tokenAddress), + }; + } catch (error) { + logUserError( + ErrorCategory.NETWORK_RPC, + `Failed to fetch balance for ${token.symbol}:`, + error, + { + chain_id: token.chainId.toString(), + token_symbol: token.symbol, + token_address: token.tokenAddress, } - - logUserError( - ErrorCategory.NETWORK_RPC, - `Failed to fetch balance for ${token.symbol}:`, - error, - { - chain_id: token.chainId.toString(), - token_symbol: token.symbol, - token_address: token.tokenAddress, - } - ); - return { - chainId: token.chainId, - tokenAddress: token.tokenAddress, - symbol: token.symbol, - name: token.name, - logoUrl: token.logoUrl, - balance: "0", - loading: false, - error: error instanceof Error ? error.message : "Failed to fetch", - explorerUrl: buildExplorerAddressUrl(chain, token.tokenAddress), - }; - } - }; - - return makeRequest(0); + ); + return { + chainId: token.chainId, + tokenAddress: token.tokenAddress, + symbol: token.symbol, + name: token.name, + logoUrl: token.logoUrl, + balance: "0", + loading: false, + error: error instanceof Error ? error.message : "Failed to fetch", + explorerUrl: buildExplorerAddressUrl(chain, token.tokenAddress), + }; + } } /** diff --git a/lib/wallet/rpc.ts b/lib/wallet/rpc.ts new file mode 100644 index 000000000..42957ab57 --- /dev/null +++ b/lib/wallet/rpc.ts @@ -0,0 +1,235 @@ +/** + * Shared JSON-RPC client for wallet balance fetches. + * + * Consolidates retry/backoff semantics and payload encoding used by the + * native and ERC20 balance fetchers. Split into its own module so the + * retry logic can be unit-tested without pulling the balance-formatting + * machinery. + */ + +import { addBreadcrumb } from "@sentry/nextjs"; + +const BIGINT_ZERO = BigInt(0); +const EVM_ADDRESS_REGEX = /^(0x)?[0-9a-fA-F]{40}$/; +const ERC20_BALANCE_OF_SELECTOR = "0x70a08231"; +const ERC20_ADDRESS_PADDING = 64; + +export type RpcFailureKind = "standard" | "rate_limit"; + +export type JsonRpcPayload = { + jsonrpc: "2.0"; + method: string; + params: unknown[]; + id: number; +}; + +/** + * RPC retry configuration. + * + * Two exponential-backoff schedules with jitter, picked by failure type: + * + * - `STANDARD`: network errors, HTTP 5xx, and malformed responses (missing + * `result` field). Short backoff because these usually clear quickly. + * - `RATE_LIMIT`: HTTP 429. Longer backoff because the server is actively + * throttling us; retrying too soon just extends the throttle. + * + * Each delay = `min((BASE_MS * 2^attempt) + jitter, ABSOLUTE_MAX_BACKOFF_MS)` + * where `jitter = random() * base * JITTER_FACTOR`. + * + * `RETRIES_PER_URL_WITH_FAILOVER` applies when `rpcCallWithFailover` has a + * fallback URL available — retry fewer times per URL so we hand off to the + * fallback sooner when the primary is throttled or flaky. + */ +export const RPC_RETRY_CONFIG = { + MAX_RETRIES: 3, + RETRIES_PER_URL_WITH_FAILOVER: 1, + JITTER_FACTOR: 0.3, + ABSOLUTE_MAX_BACKOFF_MS: 5000, + STANDARD: { + BASE_MS: 500, + CAP_MS: 3000, + }, + RATE_LIMIT: { + BASE_MS: 1000, + CAP_MS: 5000, + }, +} as const; + +function delay(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)); +} + +/** + * Compute the backoff delay for a given retry attempt and failure kind. + * Adds randomized jitter to avoid lockstep retries across concurrent callers. + * Guaranteed to return at most `RPC_RETRY_CONFIG.ABSOLUTE_MAX_BACKOFF_MS`. + */ +export function getRpcBackoffMs( + attempt: number, + kind: RpcFailureKind +): number { + const schedule = + kind === "rate_limit" + ? RPC_RETRY_CONFIG.RATE_LIMIT + : RPC_RETRY_CONFIG.STANDARD; + const base = Math.min(schedule.BASE_MS * 2 ** attempt, schedule.CAP_MS); + const jitter = Math.random() * base * RPC_RETRY_CONFIG.JITTER_FACTOR; + return Math.min(base + jitter, RPC_RETRY_CONFIG.ABSOLUTE_MAX_BACKOFF_MS); +} + +/** + * Encode an ERC20 `balanceOf(address)` call payload. + * Validates the address is a well-formed 20-byte hex string to prevent + * silent mis-encoding from over-long input slipping past `padStart`. + */ +export function encodeBalanceOfCallData(address: string): string { + if (!EVM_ADDRESS_REGEX.test(address)) { + throw new Error(`Invalid EVM address: ${address}`); + } + const stripped = address.startsWith("0x") ? address.slice(2) : address; + const padded = stripped.toLowerCase().padStart(ERC20_ADDRESS_PADDING, "0"); + return `${ERC20_BALANCE_OF_SELECTOR}${padded}`; +} + +/** + * Parse a hex wei string into BigInt, treating empty `"0x"` as zero. + * `rpcCall` guarantees the input is a non-empty string. + */ +export function hexWeiToBigInt(hex: string): bigint { + return hex === "0x" ? BIGINT_ZERO : BigInt(hex); +} + +/** + * Execute a JSON-RPC POST with retry/backoff for transient failures. + * + * Retries: HTTP 429, HTTP 5xx, network errors, and missing `result` fields + * (malformed gateway responses — the root cause behind `BigInt(undefined)`). + * Does not retry HTTP 4xx (except 429) or RPC-reported errors — those are + * deterministic and would fail again. + * + * Each retry adds a Sentry breadcrumb so the retry history is attached to + * any error eventually captured on the same scope. + * + * Returns the raw `result` string (guaranteed non-empty). Callers interpret + * `"0x"` per their context via {@link hexWeiToBigInt}. + */ +export async function rpcCall( + rpcUrl: string, + payload: JsonRpcPayload, + maxRetries: number = RPC_RETRY_CONFIG.MAX_RETRIES +): Promise { + let lastError: Error = new Error("RPC call failed"); + let lastFailureKind: RpcFailureKind = "standard"; + + for (let attempt = 0; attempt <= maxRetries; attempt++) { + if (attempt > 0) { + const backoffMs = getRpcBackoffMs(attempt - 1, lastFailureKind); + addBreadcrumb({ + category: "rpc.retry", + level: "info", + message: `Retrying RPC after ${lastFailureKind} failure: ${lastError.message}`, + data: { + url: rpcUrl, + method: payload.method, + attempt, + backoffMs: Math.round(backoffMs), + kind: lastFailureKind, + }, + }); + await delay(backoffMs); + } + + let response: Response; + try { + response = await fetch(rpcUrl, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(payload), + }); + } catch (error) { + lastError = error instanceof Error ? error : new Error(String(error)); + lastFailureKind = "standard"; + continue; + } + + if (response.status === 429) { + lastError = new Error("HTTP 429: rate limited"); + lastFailureKind = "rate_limit"; + continue; + } + + if (response.status >= 500) { + lastError = new Error(`HTTP ${response.status}: ${response.statusText}`); + lastFailureKind = "standard"; + continue; + } + + if (!response.ok) { + throw new Error(`HTTP ${response.status}: ${response.statusText}`); + } + + const data = await response.json(); + if (data.error) { + throw new Error(data.error.message || "RPC error"); + } + + if (data.result === undefined || data.result === null) { + lastError = new Error("RPC returned no result"); + lastFailureKind = "standard"; + continue; + } + + return data.result; + } + + throw lastError; +} + +/** + * Execute a JSON-RPC call across a primary URL with optional fallbacks. + * + * When more than one URL is provided, each URL uses the reduced + * `RETRIES_PER_URL_WITH_FAILOVER` budget so a throttled primary hands off to + * the fallback quickly instead of burning the full retry schedule first. + * + * Throws the last error after all URLs are exhausted. A Sentry breadcrumb is + * emitted for every failover hop. + */ +export async function rpcCallWithFailover( + rpcUrls: ReadonlyArray, + payload: JsonRpcPayload +): Promise { + if (rpcUrls.length === 0) { + throw new Error("rpcCallWithFailover requires at least one URL"); + } + + const maxRetries = + rpcUrls.length > 1 + ? RPC_RETRY_CONFIG.RETRIES_PER_URL_WITH_FAILOVER + : RPC_RETRY_CONFIG.MAX_RETRIES; + + let lastError: Error = new Error("RPC call failed"); + + for (const [i, url] of rpcUrls.entries()) { + try { + return await rpcCall(url, payload, maxRetries); + } catch (error) { + lastError = error instanceof Error ? error : new Error(String(error)); + const nextUrl = rpcUrls[i + 1]; + if (nextUrl) { + addBreadcrumb({ + category: "rpc.failover", + level: "info", + message: `RPC primary failed, failing over: ${lastError.message}`, + data: { + method: payload.method, + failedUrl: url, + nextUrl, + }, + }); + } + } + } + + throw lastError; +} diff --git a/lib/wallet/types.ts b/lib/wallet/types.ts index ba82eec57..44fd03993 100644 --- a/lib/wallet/types.ts +++ b/lib/wallet/types.ts @@ -16,6 +16,7 @@ export type ChainData = { symbol: string; chainType: string; defaultPrimaryRpc: string; + defaultFallbackRpc: string | null; explorerUrl: string | null; explorerAddressPath: string | null; isTestnet: boolean; diff --git a/lib/workflow/edge-helpers.ts b/lib/workflow/edge-helpers.ts new file mode 100644 index 000000000..09f019c42 --- /dev/null +++ b/lib/workflow/edge-helpers.ts @@ -0,0 +1,46 @@ +import type { Edge as XYFlowEdge } from "@xyflow/react"; + +/** Normalize handle IDs so null/undefined/"" compare equal. */ +export function normalizeHandle(handle: string | null | undefined): string { + return handle ?? ""; +} + +type EdgeLike = { + source: string; + target: string; + sourceHandle?: string | null; + targetHandle?: string | null; +}; + +/** True if an edge with the same source/sourceHandle -> target/targetHandle + * already exists. Allows multiple connections between the same pair when + * they use different handles (e.g. Condition true/false to the same target). */ +export function hasDuplicateEdge( + edges: readonly XYFlowEdge[], + candidate: EdgeLike +): boolean { + const sh = normalizeHandle(candidate.sourceHandle); + const th = normalizeHandle(candidate.targetHandle); + return edges.some( + (e) => + e.source === candidate.source && + e.target === candidate.target && + normalizeHandle(e.sourceHandle) === sh && + normalizeHandle(e.targetHandle) === th + ); +} + +/** Return a new array with duplicate edges removed, preserving first occurrence. + * Duplicate is defined identically to {@link hasDuplicateEdge}. */ +export function dedupeEdges(edges: readonly E[]): E[] { + const seen = new Set(); + const result: E[] = []; + for (const edge of edges) { + const key = `${edge.source}\u0000${normalizeHandle(edge.sourceHandle)}\u0000${edge.target}\u0000${normalizeHandle(edge.targetHandle)}`; + if (!seen.has(key)) { + seen.add(key); + result.push(edge); + } + } + return result; +} diff --git a/lib/x402/execution-wait.ts b/lib/x402/execution-wait.ts new file mode 100644 index 000000000..73b17d3b3 --- /dev/null +++ b/lib/x402/execution-wait.ts @@ -0,0 +1,145 @@ +import "server-only"; + +import { and, desc, eq } from "drizzle-orm"; +import { db } from "@/lib/db"; +import { workflowExecutionLogs, workflowExecutions } from "@/lib/db/schema"; + +/** + * Default timeout for waiting on read-workflow completion before falling back + * to the async `{executionId, status: "running"}` response. Kept under typical + * HTTP/MCP client timeouts (~30s) so clients don't time out on us. + */ +export const DEFAULT_CALL_WAIT_TIMEOUT_MS = 25_000; +const DEFAULT_POLL_INTERVAL_MS = 250; + +type TerminalStatus = "success" | "error" | "cancelled"; + +type ExecutionResult = { + status: TerminalStatus; + output: unknown; + error: string | null; +}; + +/** + * Poll workflowExecutions.status until it reaches a terminal state (success, + * error, cancelled) or the timeout elapses. Returns null on timeout. + */ +export async function waitForExecutionCompletion( + executionId: string, + timeoutMs: number = DEFAULT_CALL_WAIT_TIMEOUT_MS, + pollIntervalMs: number = DEFAULT_POLL_INTERVAL_MS +): Promise { + if (timeoutMs <= 0) { + return null; + } + const deadline = Date.now() + timeoutMs; + while (true) { + const row = await db.query.workflowExecutions.findFirst({ + where: eq(workflowExecutions.id, executionId), + columns: { status: true, output: true, error: true }, + }); + if (!row) { + return null; + } + if ( + row.status === "success" || + row.status === "error" || + row.status === "cancelled" + ) { + return { + status: row.status, + output: row.output, + error: row.error ?? null, + }; + } + if (Date.now() + pollIntervalMs >= deadline) { + return null; + } + await new Promise((resolve) => setTimeout(resolve, pollIntervalMs)); + } +} + +/** + * Resolve the payload returned inline to the caller for a completed read + * workflow. outputMapping shape is `{ nodeId?: string, fields?: string[] }`: + * - If nodeId is set, fetch that node's successful log output. + * - If fields is set, pick only those keys from the node output. + * - Otherwise, return the workflow-level output as-is. + */ +export async function applyOutputMapping( + executionId: string, + workflowOutput: unknown, + outputMapping: Record | null | undefined +): Promise { + if (!outputMapping || typeof outputMapping !== "object") { + return workflowOutput; + } + const mapping = outputMapping as { nodeId?: unknown; fields?: unknown }; + const nodeId = typeof mapping.nodeId === "string" ? mapping.nodeId : null; + if (!nodeId) { + return workflowOutput; + } + + const log = await db.query.workflowExecutionLogs.findFirst({ + where: and( + eq(workflowExecutionLogs.executionId, executionId), + eq(workflowExecutionLogs.nodeId, nodeId), + eq(workflowExecutionLogs.status, "success") + ), + orderBy: [desc(workflowExecutionLogs.completedAt)], + }); + const nodeOutput = log?.output ?? workflowOutput; + + if ( + Array.isArray(mapping.fields) && + mapping.fields.length > 0 && + nodeOutput && + typeof nodeOutput === "object" + ) { + const picked: Record = {}; + for (const field of mapping.fields) { + if (typeof field === "string") { + picked[field] = (nodeOutput as Record)[field]; + } + } + return picked; + } + return nodeOutput; +} + +export type CallCompletionResponse = + | { executionId: string; status: "success"; output: unknown } + | { executionId: string; status: "error"; error: string } + | { executionId: string; status: "running" }; + +/** + * Wait for the read-workflow execution to complete, then build the response + * payload. On timeout, returns `{status: "running"}` so clients can fall back + * to polling the existing status/logs endpoints. + */ +export async function buildCallCompletionResponse( + executionId: string, + outputMapping: Record | null | undefined, + timeoutMs: number = DEFAULT_CALL_WAIT_TIMEOUT_MS +): Promise { + const result = await waitForExecutionCompletion(executionId, timeoutMs); + if (!result) { + return { executionId, status: "running" }; + } + if (result.status === "success") { + const output = await applyOutputMapping( + executionId, + result.output, + outputMapping + ); + return { executionId, status: "success", output }; + } + if (result.status === "cancelled") { + return { executionId, status: "error", error: "Execution cancelled" }; + } + return { + executionId, + status: "error", + error: result.error ?? "Execution failed", + }; +} diff --git a/lib/x402/types.ts b/lib/x402/types.ts index 2734d7eb1..2aeb326a2 100644 --- a/lib/x402/types.ts +++ b/lib/x402/types.ts @@ -4,6 +4,7 @@ import { workflows } from "@/lib/db/schema"; * Exact columns the call route needs from the workflows table. * priceUsdcPerCall returns string | null from Drizzle (numeric column). * nodes and edges are needed for execution; userId for creating the execution record. + * category/tagId feed the x402 Bazaar extensions for marketplace discovery. */ export type CallRouteWorkflow = { id: string; @@ -19,6 +20,8 @@ export type CallRouteWorkflow = { nodes: unknown[]; edges: unknown[]; userId: string; + category: string | null; + tagName: string | null; }; /** @@ -40,4 +43,5 @@ export const CALL_ROUTE_COLUMNS = { nodes: workflows.nodes, edges: workflows.edges, userId: workflows.userId, + category: workflows.category, } as const; diff --git a/plugins/registry.ts b/plugins/registry.ts index e00cdb96e..bfa3493ab 100644 --- a/plugins/registry.ts +++ b/plugins/registry.ts @@ -51,6 +51,9 @@ export type ActionConfigFieldBase = { // For chain-select: filter by chain type (e.g., "evm" or "solana") chainTypeFilter?: string; + // For chain-select: restrict to specific chain IDs (e.g., ["1", "8453"]) + allowedChainIds?: string[]; + // Placeholder text placeholder?: string; diff --git a/protocols/aave-v3.ts b/protocols/aave-v3.ts index fa71f5f06..686555a5d 100644 --- a/protocols/aave-v3.ts +++ b/protocols/aave-v3.ts @@ -2,7 +2,7 @@ import { defineProtocol } from "@/lib/protocol-registry"; export default defineProtocol({ name: "Aave V3", - slug: "aave", + slug: "aave-v3", description: "Aave V3 lending and borrowing protocol -- supply, borrow, repay, and monitor account health", website: "https://aave.com", diff --git a/protocols/aave-v4.ts b/protocols/aave-v4.ts new file mode 100644 index 000000000..a62ca49c6 --- /dev/null +++ b/protocols/aave-v4.ts @@ -0,0 +1,196 @@ +import { defineAbiProtocol } from "@/lib/protocol-registry"; +import aaveV4Abi from "./abis/aave-v4.json"; + +// Aave V4 launched on Ethereum mainnet 2026-03-30 with a Hub-and-Spoke +// architecture. Users interact with Spokes (not Hubs) for supply/borrow. +// Each Spoke is tied to an ecosystem partner and has its own set of reserves +// identified by an opaque uint256 reserveId. Use `get-reserve-id` to resolve +// an asset into its reserveId before calling supply/withdraw/borrow/repay. +// +// This first cut exposes the Lido Spoke only - the most established of the +// six launch Spokes (Lido, EtherFi, Kelp, Ethena Correlated, Ethena +// Ecosystem, Lombard BTC). Additional Spokes can be added as contract +// entries sharing the same ABI. +// +// Integration tests are gated on the separate aave-v4-mainnet-onchain +// test file - no Sepolia V4 deployment exists at launch. + +export default defineAbiProtocol({ + name: "Aave V4", + slug: "aave-v4", + description: + "Aave V4 Hub-and-Spoke lending protocol - supply, borrow, repay and monitor positions via the Lido Spoke", + website: "https://aave.com", + icon: "/protocols/aave.png", + + contracts: { + lidoSpoke: { + label: "Aave V4 Lido Spoke", + abi: JSON.stringify(aaveV4Abi), + addresses: { + "1": "0xe1900480ac69f0B296841Cd01cC37546d92F35Cd", + }, + overrides: { + // Write actions (supply/withdraw/borrow/repay) omit output overrides + // pending KEEP-296. writeContractCore returns result: undefined, so + // UI template suggestions are gated in buildOutputFieldsFromAction; + // named overrides would be dead metadata until the write path decodes + // function returns. + supply: { + slug: "supply", + label: "Supply Asset", + description: + "Supply an asset to the Aave V4 Lido Spoke to earn interest. Amount is in the underlying asset's smallest unit (wei for 18-decimal tokens).", + inputs: { + reserveId: { + label: "Reserve ID", + helpTip: + "Opaque uint256 identifier for a reserve within this Spoke. Use the Get Reserve ID action to resolve from (hub, assetId).", + docUrl: "https://aave.com/docs/aave-v4/liquidity/spokes", + }, + amount: { label: "Amount (wei)" }, + onBehalfOf: { label: "On Behalf Of Address" }, + }, + }, + withdraw: { + slug: "withdraw", + label: "Withdraw Asset", + description: "Withdraw a supplied asset from the Aave V4 Lido Spoke", + inputs: { + reserveId: { + label: "Reserve ID", + docUrl: "https://aave.com/docs/aave-v4/liquidity/spokes", + }, + amount: { label: "Amount (wei)" }, + onBehalfOf: { label: "Recipient Address" }, + }, + }, + borrow: { + slug: "borrow", + label: "Borrow Asset", + description: + "Borrow an asset from the Aave V4 Lido Spoke against supplied collateral. V4 uses a single rate model (no stable/variable mode).", + inputs: { + reserveId: { + label: "Reserve ID", + docUrl: "https://aave.com/docs/aave-v4/positions/borrow", + }, + amount: { label: "Amount (wei)" }, + onBehalfOf: { label: "On Behalf Of Address" }, + }, + }, + repay: { + slug: "repay", + label: "Repay Debt", + description: "Repay a borrowed asset to the Aave V4 Lido Spoke", + inputs: { + reserveId: { + label: "Reserve ID", + docUrl: "https://aave.com/docs/aave-v4/positions/borrow", + }, + amount: { label: "Amount (wei)" }, + onBehalfOf: { label: "On Behalf Of Address" }, + }, + }, + setUsingAsCollateral: { + slug: "set-collateral", + label: "Set Asset as Collateral", + description: + "Enable or disable a supplied reserve as collateral in the Aave V4 Lido Spoke", + inputs: { + reserveId: { label: "Reserve ID" }, + usingAsCollateral: { + label: "Use as Collateral", + helpTip: + "Toggles the entire supplied balance of this reserve as collateral. There is no partial collateral in Aave V4.", + docUrl: "https://aave.com/docs/aave-v4/positions/supply", + }, + onBehalfOf: { label: "On Behalf Of Address" }, + }, + }, + getReserveId: { + slug: "get-reserve-id", + label: "Get Reserve ID", + description: + "Resolve an asset to its reserveId within this Spoke, given the Hub address and the Hub's assetId for that asset", + inputs: { + hub: { label: "Hub Address" }, + assetId: { + label: "Hub Asset ID", + helpTip: + "Asset identifier within the Hub. Use the Hub's getAssetId(underlying) to resolve from an ERC-20 address.", + docUrl: "https://aave.com/docs/aave-v4/liquidity/spokes", + }, + }, + outputs: { + result: { + name: "reserveId", + label: "Reserve ID", + }, + }, + }, + getUserSuppliedAssets: { + slug: "get-user-supplied-assets", + label: "Get User Supplied Assets", + description: + "Get the amount of underlying asset supplied by a user for a given reserve", + inputs: { + reserveId: { + label: "Reserve ID", + docUrl: "https://aave.com/docs/aave-v4/positions/supply", + }, + user: { label: "User Address" }, + }, + outputs: { + result: { + name: "suppliedAmount", + label: "Supplied Amount (underlying)", + }, + }, + }, + getUserDebt: { + slug: "get-user-debt", + label: "Get User Debt", + description: + "Get the debt of a user for a given reserve, split into drawn debt and premium debt. Total debt = drawn + premium.", + inputs: { + reserveId: { + label: "Reserve ID", + docUrl: "https://aave.com/docs/aave-v4/positions/borrow", + }, + user: { label: "User Address" }, + }, + outputs: { + result0: { + name: "drawnDebt", + label: "Drawn Debt (underlying)", + }, + result1: { + name: "premiumDebt", + label: "Premium Debt (underlying)", + }, + }, + }, + getUserAccountData: { + slug: "get-user-account-data", + label: "Get User Account Data", + description: + "Get overall account health including collateral value, debt, health factor, and risk premium. Returns a struct - access individual fields via dotted path (e.g. result.healthFactor).", + inputs: { + user: { + label: "User Address", + docUrl: "https://aave.com/docs/aave-v4/positions", + }, + }, + outputs: { + result: { + name: "accountData", + label: + "Account Data (struct: riskPremium, avgCollateralFactor, healthFactor, totalCollateralValue, totalDebtValueRay, activeCollateralCount, borrowCount)", + }, + }, + }, + }, + }, + }, +}); diff --git a/protocols/abis/aave-v4.json b/protocols/abis/aave-v4.json new file mode 100644 index 000000000..b0d816068 --- /dev/null +++ b/protocols/abis/aave-v4.json @@ -0,0 +1,123 @@ +[ + { + "type": "function", + "name": "supply", + "stateMutability": "nonpayable", + "inputs": [ + { "name": "reserveId", "type": "uint256" }, + { "name": "amount", "type": "uint256" }, + { "name": "onBehalfOf", "type": "address" } + ], + "outputs": [ + { "name": "", "type": "uint256" }, + { "name": "", "type": "uint256" } + ] + }, + { + "type": "function", + "name": "withdraw", + "stateMutability": "nonpayable", + "inputs": [ + { "name": "reserveId", "type": "uint256" }, + { "name": "amount", "type": "uint256" }, + { "name": "onBehalfOf", "type": "address" } + ], + "outputs": [ + { "name": "", "type": "uint256" }, + { "name": "", "type": "uint256" } + ] + }, + { + "type": "function", + "name": "borrow", + "stateMutability": "nonpayable", + "inputs": [ + { "name": "reserveId", "type": "uint256" }, + { "name": "amount", "type": "uint256" }, + { "name": "onBehalfOf", "type": "address" } + ], + "outputs": [ + { "name": "", "type": "uint256" }, + { "name": "", "type": "uint256" } + ] + }, + { + "type": "function", + "name": "repay", + "stateMutability": "nonpayable", + "inputs": [ + { "name": "reserveId", "type": "uint256" }, + { "name": "amount", "type": "uint256" }, + { "name": "onBehalfOf", "type": "address" } + ], + "outputs": [ + { "name": "", "type": "uint256" }, + { "name": "", "type": "uint256" } + ] + }, + { + "type": "function", + "name": "setUsingAsCollateral", + "stateMutability": "nonpayable", + "inputs": [ + { "name": "reserveId", "type": "uint256" }, + { "name": "usingAsCollateral", "type": "bool" }, + { "name": "onBehalfOf", "type": "address" } + ], + "outputs": [] + }, + { + "type": "function", + "name": "getReserveId", + "stateMutability": "view", + "inputs": [ + { "name": "hub", "type": "address" }, + { "name": "assetId", "type": "uint256" } + ], + "outputs": [{ "name": "", "type": "uint256" }] + }, + { + "type": "function", + "name": "getUserSuppliedAssets", + "stateMutability": "view", + "inputs": [ + { "name": "reserveId", "type": "uint256" }, + { "name": "user", "type": "address" } + ], + "outputs": [{ "name": "", "type": "uint256" }] + }, + { + "type": "function", + "name": "getUserDebt", + "stateMutability": "view", + "inputs": [ + { "name": "reserveId", "type": "uint256" }, + { "name": "user", "type": "address" } + ], + "outputs": [ + { "name": "", "type": "uint256" }, + { "name": "", "type": "uint256" } + ] + }, + { + "type": "function", + "name": "getUserAccountData", + "stateMutability": "view", + "inputs": [{ "name": "user", "type": "address" }], + "outputs": [ + { + "name": "", + "type": "tuple", + "components": [ + { "name": "riskPremium", "type": "uint256" }, + { "name": "avgCollateralFactor", "type": "uint256" }, + { "name": "healthFactor", "type": "uint256" }, + { "name": "totalCollateralValue", "type": "uint256" }, + { "name": "totalDebtValueRay", "type": "uint256" }, + { "name": "activeCollateralCount", "type": "uint256" }, + { "name": "borrowCount", "type": "uint256" } + ] + } + ] + } +] diff --git a/protocols/index.ts b/protocols/index.ts index a037e8506..26b4a0923 100644 --- a/protocols/index.ts +++ b/protocols/index.ts @@ -8,13 +8,14 @@ * This ensures the protocol registry is populated when the Next.js * server starts (via the plugin import chain). * - * Registered protocols: aave, aerodrome, ajna, chainlink, chronicle, compound, cowswap, curve, ethena, lido, morpho, pendle, rocket-pool, safe, sky, spark, uniswap, wrapped, yearn + * Registered protocols: aave-v3, aave-v4, aerodrome, ajna, chainlink, chronicle, compound, cowswap, curve, ethena, lido, morpho, pendle, rocket-pool, safe, sky, spark, uniswap, wrapped, yearn */ import { protocolToPlugin, registerProtocol } from "@/lib/protocol-registry"; import { registerIntegration } from "@/plugins/registry"; -import aaveDef from "./aave-v3"; +import aaveV3Def from "./aave-v3"; +import aaveV4Def from "./aave-v4"; import aerodromeDef from "./aerodrome"; import ajnaDef from "./ajna"; import chainlinkDef from "./chainlink"; @@ -34,8 +35,10 @@ import uniswapDef from "./uniswap-v3"; import wrappedDef from "./wrapped"; import yearnDef from "./yearn-v3"; -registerProtocol(aaveDef); -registerIntegration(protocolToPlugin(aaveDef)); +registerProtocol(aaveV3Def); +registerIntegration(protocolToPlugin(aaveV3Def)); +registerProtocol(aaveV4Def); +registerIntegration(protocolToPlugin(aaveV4Def)); registerProtocol(aerodromeDef); registerIntegration(protocolToPlugin(aerodromeDef)); registerProtocol(ajnaDef); diff --git a/scripts/README.md b/scripts/README.md index b7ed22a0e..8914702af 100644 --- a/scripts/README.md +++ b/scripts/README.md @@ -70,7 +70,7 @@ The API also supports protocol-level featuring (e.g. featured workflows on a pro ```json { "workflowId": "", - "featuredProtocol": "aave", + "featuredProtocol": "aave-v3", "featuredProtocolOrder": 1 } ``` diff --git a/scripts/pr-test/seed-pr-data.sql b/scripts/pr-test/seed-pr-data.sql index e3c4519b0..aa5a3d50e 100644 --- a/scripts/pr-test/seed-pr-data.sql +++ b/scripts/pr-test/seed-pr-data.sql @@ -148,7 +148,7 @@ BEGIN false, false, 0, - '[{"id":"trigger-1","type":"trigger","position":{"x":100,"y":200},"data":{"type":"trigger","label":"Manual Trigger","config":{"triggerType":"Manual"}}},{"id":"action-1","type":"action","position":{"x":400,"y":200},"data":{"type":"action","label":"Wrap ETH to Aave WETH","config":{"actionType":"web3/write-contract","network":"sepolia","contractAddress":"0xC558DBdd856501FCd9aaF1E62eae57A9F0629a3c","abi":"[{\"inputs\":[],\"name\":\"deposit\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"}]","abiFunction":"deposit","functionArgs":"[]","ethValue":"0.001"}}},{"id":"action-2","type":"action","position":{"x":700,"y":200},"data":{"type":"action","label":"Approve ERC20 Token","config":{"actionType":"web3/approve-token","network":"sepolia","tokenConfig":"{\"mode\":\"custom\",\"customToken\":{\"address\":\"0xC558DBdd856501FCd9aaF1E62eae57A9F0629a3c\",\"symbol\":\"WETH\"}}","spenderAddress":"0x6Ae43d3271ff6888e7Fc43Fd7321a503ff738951","amount":"max"}}},{"id":"action-3","type":"action","position":{"x":1000,"y":200},"data":{"type":"action","label":"Aave V3: Get User Account Data","config":{"actionType":"aave/get-user-account-data","network":"11155111","user":"0x4f1089424DCf25B1290631Df483a436B320e51A1","_protocolMeta":"{\"protocolSlug\":\"aave\",\"contractKey\":\"pool\",\"functionName\":\"getUserAccountData\",\"actionType\":\"read\"}"}}},{"id":"action-4","type":"action","position":{"x":1300,"y":200},"data":{"type":"action","label":"Aave V3: Supply Asset","config":{"actionType":"aave/supply","network":"11155111","asset":"0xC558DBdd856501FCd9aaF1E62eae57A9F0629a3c","amount":"1000000000000000","onBehalfOf":"0x4f1089424DCf25B1290631Df483a436B320e51A1","referralCode":"0","_protocolMeta":"{\"protocolSlug\":\"aave\",\"contractKey\":\"pool\",\"functionName\":\"supply\",\"actionType\":\"write\"}"}}}]'::jsonb, + '[{"id":"trigger-1","type":"trigger","position":{"x":100,"y":200},"data":{"type":"trigger","label":"Manual Trigger","config":{"triggerType":"Manual"}}},{"id":"action-1","type":"action","position":{"x":400,"y":200},"data":{"type":"action","label":"Wrap ETH to Aave WETH","config":{"actionType":"web3/write-contract","network":"sepolia","contractAddress":"0xC558DBdd856501FCd9aaF1E62eae57A9F0629a3c","abi":"[{\"inputs\":[],\"name\":\"deposit\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"}]","abiFunction":"deposit","functionArgs":"[]","ethValue":"0.001"}}},{"id":"action-2","type":"action","position":{"x":700,"y":200},"data":{"type":"action","label":"Approve ERC20 Token","config":{"actionType":"web3/approve-token","network":"sepolia","tokenConfig":"{\"mode\":\"custom\",\"customToken\":{\"address\":\"0xC558DBdd856501FCd9aaF1E62eae57A9F0629a3c\",\"symbol\":\"WETH\"}}","spenderAddress":"0x6Ae43d3271ff6888e7Fc43Fd7321a503ff738951","amount":"max"}}},{"id":"action-3","type":"action","position":{"x":1000,"y":200},"data":{"type":"action","label":"Aave V3: Get User Account Data","config":{"actionType":"aave-v3/get-user-account-data","network":"11155111","user":"0x4f1089424DCf25B1290631Df483a436B320e51A1","_protocolMeta":"{\"protocolSlug\":\"aave-v3\",\"contractKey\":\"pool\",\"functionName\":\"getUserAccountData\",\"actionType\":\"read\"}"}}},{"id":"action-4","type":"action","position":{"x":1300,"y":200},"data":{"type":"action","label":"Aave V3: Supply Asset","config":{"actionType":"aave-v3/supply","network":"11155111","asset":"0xC558DBdd856501FCd9aaF1E62eae57A9F0629a3c","amount":"1000000000000000","onBehalfOf":"0x4f1089424DCf25B1290631Df483a436B320e51A1","referralCode":"0","_protocolMeta":"{\"protocolSlug\":\"aave-v3\",\"contractKey\":\"pool\",\"functionName\":\"supply\",\"actionType\":\"write\"}"}}}]'::jsonb, '[{"id":"edge-trigger-1-action-1","source":"trigger-1","target":"action-1","type":"default"},{"id":"edge-action-1-action-2","source":"action-1","target":"action-2","type":"default"},{"id":"edge-action-2-action-3","source":"action-2","target":"action-3","type":"default"},{"id":"edge-action-3-action-4","source":"action-3","target":"action-4","type":"default"}]'::jsonb, 'private', true, diff --git a/scripts/seed/workflows/aave/mcp-test-supply-weth.json b/scripts/seed/workflows/aave-v3/mcp-test-supply-weth.json similarity index 96% rename from scripts/seed/workflows/aave/mcp-test-supply-weth.json rename to scripts/seed/workflows/aave-v3/mcp-test-supply-weth.json index 188d7a76a..dd45a30d5 100644 --- a/scripts/seed/workflows/aave/mcp-test-supply-weth.json +++ b/scripts/seed/workflows/aave-v3/mcp-test-supply-weth.json @@ -1,5 +1,5 @@ { - "protocol": "aave", + "protocol": "aave-v3", "network": "1", "networkName": "mainnet", "type": "write", @@ -42,7 +42,7 @@ "label": "Supply WETH to Aave", "type": "action", "config": { - "actionType": "aave/supply", + "actionType": "aave-v3/supply", "network": "1", "asset": "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2", "amount": "3000000000000000", diff --git a/scripts/seed/workflows/aave/mcp-test-withdraw-weth.json b/scripts/seed/workflows/aave-v3/mcp-test-withdraw-weth.json similarity index 93% rename from scripts/seed/workflows/aave/mcp-test-withdraw-weth.json rename to scripts/seed/workflows/aave-v3/mcp-test-withdraw-weth.json index 294a19607..4404ef4b9 100644 --- a/scripts/seed/workflows/aave/mcp-test-withdraw-weth.json +++ b/scripts/seed/workflows/aave-v3/mcp-test-withdraw-weth.json @@ -1,5 +1,5 @@ { - "protocol": "aave", + "protocol": "aave-v3", "network": "1", "networkName": "mainnet", "type": "write", @@ -25,7 +25,7 @@ "label": "Withdraw WETH from Aave", "type": "action", "config": { - "actionType": "aave/withdraw", + "actionType": "aave-v3/withdraw", "network": "1", "asset": "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2", "amount": "3000000000000000", diff --git a/scripts/seed/workflows/aave/read-actions.json b/scripts/seed/workflows/aave-v3/read-actions.json similarity index 92% rename from scripts/seed/workflows/aave/read-actions.json rename to scripts/seed/workflows/aave-v3/read-actions.json index 9e6767e5d..2d4561ef2 100644 --- a/scripts/seed/workflows/aave/read-actions.json +++ b/scripts/seed/workflows/aave-v3/read-actions.json @@ -1,5 +1,5 @@ { - "protocol": "aave", + "protocol": "aave-v3", "network": "1", "networkName": "mainnet", "type": "read", @@ -26,7 +26,7 @@ "description": "Get overall account health for a known Aave user", "type": "action", "config": { - "actionType": "aave/get-user-account-data", + "actionType": "aave-v3/get-user-account-data", "network": "1", "user": "0x87870Bca3F3fD6335C3F4ce8392D69350B4fA4E2" }, @@ -42,7 +42,7 @@ "description": "Get per-asset position data for DAI", "type": "action", "config": { - "actionType": "aave/get-user-reserve-data", + "actionType": "aave-v3/get-user-reserve-data", "network": "1", "asset": "0x6B175474E89094C44Da98b954EedeAC495271d0F", "user": "0x87870Bca3F3fD6335C3F4ce8392D69350B4fA4E2" diff --git a/scripts/seed/workflows/aave/write-actions.json b/scripts/seed/workflows/aave-v3/write-actions.json similarity index 94% rename from scripts/seed/workflows/aave/write-actions.json rename to scripts/seed/workflows/aave-v3/write-actions.json index 4f65ef13a..6eaa5c41e 100644 --- a/scripts/seed/workflows/aave/write-actions.json +++ b/scripts/seed/workflows/aave-v3/write-actions.json @@ -1,5 +1,5 @@ { - "protocol": "aave", + "protocol": "aave-v3", "network": "11155111", "networkName": "sepolia", "type": "write", @@ -26,7 +26,7 @@ "description": "Supply DAI to Aave V3 on Sepolia", "type": "action", "config": { - "actionType": "aave/supply", + "actionType": "aave-v3/supply", "network": "11155111", "asset": "0xFF34B3d4Aee8ddCd6F9AFFFB6Fe49bD371b8a357", "amount": "1000000000000000000", @@ -45,7 +45,7 @@ "description": "Withdraw DAI from Aave V3 on Sepolia", "type": "action", "config": { - "actionType": "aave/withdraw", + "actionType": "aave-v3/withdraw", "network": "11155111", "asset": "0xFF34B3d4Aee8ddCd6F9AFFFB6Fe49bD371b8a357", "amount": "1000000000000000000", @@ -63,7 +63,7 @@ "description": "Borrow DAI from Aave V3 on Sepolia", "type": "action", "config": { - "actionType": "aave/borrow", + "actionType": "aave-v3/borrow", "network": "11155111", "asset": "0xFF34B3d4Aee8ddCd6F9AFFFB6Fe49bD371b8a357", "amount": "1000000000000000000", @@ -83,7 +83,7 @@ "description": "Repay DAI to Aave V3 on Sepolia", "type": "action", "config": { - "actionType": "aave/repay", + "actionType": "aave-v3/repay", "network": "11155111", "asset": "0xFF34B3d4Aee8ddCd6F9AFFFB6Fe49bD371b8a357", "amount": "1000000000000000000", @@ -102,7 +102,7 @@ "description": "Enable DAI as collateral on Aave V3 Sepolia", "type": "action", "config": { - "actionType": "aave/set-collateral", + "actionType": "aave-v3/set-collateral", "network": "11155111", "asset": "0xFF34B3d4Aee8ddCd6F9AFFFB6Fe49bD371b8a357", "useAsCollateral": "true" diff --git a/tests/integration/protocol-aave-v4-onchain.test.ts b/tests/integration/protocol-aave-v4-onchain.test.ts new file mode 100644 index 000000000..46afe4c49 --- /dev/null +++ b/tests/integration/protocol-aave-v4-onchain.test.ts @@ -0,0 +1,200 @@ +/** + * Aave V4 On-Chain Integration Tests (Lido Spoke) + * + * Verifies that the ABI-driven Aave V4 protocol definition produces valid + * calldata that the deployed Lido Spoke contract accepts. Runs against a + * live Ethereum mainnet RPC endpoint. + * + * Uses a separate env var (INTEGRATION_TEST_MAINNET_RPC_URL) because Aave V4 + * has no Sepolia deployment - the existing INTEGRATION_TEST_RPC_URL targets + * Sepolia and would produce address mismatches. + * + * Gated on INTEGRATION_TEST_MAINNET_RPC_URL - skipped in CI without it. + */ + +import { ethers } from "ethers"; +import { describe, expect, it } from "vitest"; +import { reshapeArgsForAbi } from "@/lib/abi-struct-args"; +import type { + ProtocolAction, + ProtocolContract, + ProtocolDefinition, +} from "@/lib/protocol-registry"; +import aaveV4Def from "@/protocols/aave-v4"; + +const RPC_URL = process.env.INTEGRATION_TEST_MAINNET_RPC_URL; +const CHAIN_ID = "1"; +const TEST_ADDRESS = "0x0000000000000000000000000000000000000001"; +const CORE_HUB = "0xCca852Bc40e560adC3b1Cc58CA5b55638ce826c9"; + +function buildCalldata( + protocol: ProtocolDefinition, + actionSlug: string, + sampleInputs: Record +): { + to: string; + data: string; + action: ProtocolAction; + contract: ProtocolContract; +} { + const action = protocol.actions.find((a) => a.slug === actionSlug); + if (!action) { + throw new Error(`Action ${actionSlug} not found`); + } + + const contract = protocol.contracts[action.contract]; + if (!contract.abi) { + throw new Error(`Contract ${action.contract} has no ABI`); + } + + const contractAddress = contract.addresses[CHAIN_ID]; + if (!contractAddress) { + throw new Error(`Contract ${action.contract} not on chain ${CHAIN_ID}`); + } + + const rawArgs = action.inputs.map((inp) => { + const val = sampleInputs[inp.name] ?? inp.default ?? ""; + return val; + }); + + const abi = JSON.parse(contract.abi); + const functionAbi = abi.find( + (f: { name: string; type: string }) => + f.type === "function" && f.name === action.function + ); + const args = reshapeArgsForAbi(rawArgs, functionAbi); + const iface = new ethers.Interface(abi); + const data = iface.encodeFunctionData(action.function, args); + + return { to: contractAddress, data, action, contract }; +} + +// Assertion model: +// - Read tests: let the RPC call fail loudly. A success path asserts the +// decoded return has the expected shape; anything else (network error, +// ABI mismatch, decode failure) surfaces as a real test failure instead +// of being swallowed. +// - Write tests: use provider.call (not estimateGas) against a zero-balance +// TEST_ADDRESS. The contract should either (a) revert with CALL_EXCEPTION +// on business logic, or (b) succeed and return "0x" for void functions. +// Both outcomes prove the deployed bytecode understood the calldata. +// What we reject: calldata-level ethers errors (INVALID_ARGUMENT, BAD_DATA, +// BUFFER_OVERRUN) which would indicate the ABI doesn't match the +// deployed contract. Observed: supply reverts (ERC20 transferFrom fails +// on zero allowance); setUsingAsCollateral silently succeeds on +// reserveId=0 because the Spoke no-ops on nonexistent reserves. +describe.skipIf(!RPC_URL)("Aave V4 Lido Spoke on-chain integration", () => { + const getProvider = (): ethers.JsonRpcProvider => + new ethers.JsonRpcProvider(RPC_URL); + + it("getReserveId: eth_call returns a decodable uint256", async () => { + const { to, data, contract } = buildCalldata(aaveV4Def, "get-reserve-id", { + hub: CORE_HUB, + assetId: "0", + }); + + const provider = getProvider(); + const result = await provider.call({ to, data }); + const abi = JSON.parse(contract.abi as string); + const iface = new ethers.Interface(abi); + const decoded = iface.decodeFunctionResult("getReserveId", result); + expect(decoded).toBeDefined(); + expect(typeof decoded[0]).toBe("bigint"); + }, 15_000); + + it("getUserSuppliedAssets: eth_call returns a decodable uint256", async () => { + const { to, data, contract } = buildCalldata( + aaveV4Def, + "get-user-supplied-assets", + { reserveId: "0", user: TEST_ADDRESS } + ); + + const provider = getProvider(); + const result = await provider.call({ to, data }); + const abi = JSON.parse(contract.abi as string); + const iface = new ethers.Interface(abi); + const decoded = iface.decodeFunctionResult( + "getUserSuppliedAssets", + result + ); + expect(decoded).toBeDefined(); + expect(typeof decoded[0]).toBe("bigint"); + }, 15_000); + + it("getUserDebt: eth_call returns two decodable uint256 values", async () => { + const { to, data, contract } = buildCalldata(aaveV4Def, "get-user-debt", { + reserveId: "0", + user: TEST_ADDRESS, + }); + + const provider = getProvider(); + const result = await provider.call({ to, data }); + const abi = JSON.parse(contract.abi as string); + const iface = new ethers.Interface(abi); + const decoded = iface.decodeFunctionResult("getUserDebt", result); + expect(decoded).toBeDefined(); + expect(decoded.length).toBeGreaterThanOrEqual(2); + expect(typeof decoded[0]).toBe("bigint"); + expect(typeof decoded[1]).toBe("bigint"); + }, 15_000); + + it("getUserAccountData: eth_call returns a decodable struct with named fields", async () => { + const { to, data, contract } = buildCalldata( + aaveV4Def, + "get-user-account-data", + { user: TEST_ADDRESS } + ); + + const provider = getProvider(); + const result = await provider.call({ to, data }); + const abi = JSON.parse(contract.abi as string); + const iface = new ethers.Interface(abi); + const decoded = iface.decodeFunctionResult("getUserAccountData", result); + expect(decoded).toBeDefined(); + const struct = decoded[0]; + expect(typeof struct.healthFactor).toBe("bigint"); + expect(typeof struct.totalCollateralValue).toBe("bigint"); + expect(typeof struct.riskPremium).toBe("bigint"); + expect(typeof struct.borrowCount).toBe("bigint"); + }, 15_000); + + it("supply: deployed bytecode accepts the calldata", async () => { + const { to, data } = buildCalldata(aaveV4Def, "supply", { + reserveId: "0", + amount: "1000000000000000000", + onBehalfOf: TEST_ADDRESS, + }); + + const provider = getProvider(); + await expectCallAcceptedByBytecode(provider, { to, data }); + }, 15_000); + + it("setUsingAsCollateral: deployed bytecode accepts the calldata", async () => { + const { to, data } = buildCalldata(aaveV4Def, "set-collateral", { + reserveId: "0", + usingAsCollateral: "true", + onBehalfOf: TEST_ADDRESS, + }); + + const provider = getProvider(); + await expectCallAcceptedByBytecode(provider, { to, data }); + }, 15_000); +}); + +/** + * Asserts the deployed bytecode accepted our calldata: either the call + * returned cleanly (void functions return "0x") or reverted at the contract + * level (CALL_EXCEPTION). Any other error class means the ABI doesn't match + * what's deployed. + */ +async function expectCallAcceptedByBytecode( + provider: ethers.JsonRpcProvider, + tx: { to: string; data: string } +): Promise { + try { + const result = await provider.call({ ...tx, from: TEST_ADDRESS }); + expect(result).toMatch(/^0x/); + } catch (err: unknown) { + expect(err).toMatchObject({ code: "CALL_EXCEPTION" }); + } +} diff --git a/tests/unit/billing-handle-event.test.ts b/tests/unit/billing-handle-event.test.ts index 1d8af1e63..a0cce6037 100644 --- a/tests/unit/billing-handle-event.test.ts +++ b/tests/unit/billing-handle-event.test.ts @@ -66,6 +66,9 @@ function createMockProvider( createCustomer: vi.fn(), createCheckoutSession: vi.fn(), createPortalSession: vi.fn(), + getBillingDetails: vi + .fn() + .mockResolvedValue({ paymentMethod: null, billingEmail: null }), verifyWebhook: vi.fn(), getSubscriptionDetails: vi.fn().mockResolvedValue({ priceId: process.env.STRIPE_PRICE_PRO_25K_MONTHLY, diff --git a/tests/unit/billing-stripe-provider.test.ts b/tests/unit/billing-stripe-provider.test.ts index 0cba4bb07..cb2b72c69 100644 --- a/tests/unit/billing-stripe-provider.test.ts +++ b/tests/unit/billing-stripe-provider.test.ts @@ -2,14 +2,15 @@ import { beforeEach, describe, expect, it, vi } from "vitest"; vi.mock("@/lib/stripe", () => ({ stripe: { - customers: { create: vi.fn() }, + customers: { create: vi.fn(), retrieve: vi.fn() }, checkout: { sessions: { create: vi.fn() } }, billingPortal: { sessions: { create: vi.fn() } }, webhooks: { constructEvent: vi.fn() }, invoiceItems: { create: vi.fn() }, invoices: { list: vi.fn(), createPreview: vi.fn() }, - subscriptions: { retrieve: vi.fn(), update: vi.fn() }, + subscriptions: { retrieve: vi.fn(), update: vi.fn(), list: vi.fn() }, prices: { retrieve: vi.fn() }, + paymentMethods: { list: vi.fn() }, }, })); @@ -776,6 +777,145 @@ describe("StripeBillingProvider", () => { expect(callArgs.metadata).toBeUndefined(); }); }); + + describe("getBillingDetails", () => { + const customerBase = { + id: "cus_1", + deleted: false, + email: "user@test.com", + invoice_settings: { default_payment_method: null }, + }; + + function makeCard(last4: string): Record { + return { + id: `pm_${last4}`, + type: "card", + card: { + brand: "visa", + last4, + exp_month: 12, + exp_year: 2030, + }, + }; + } + + type CustomerResponse = Awaited>; + type SubsListResponse = Awaited>; + type PMListResponse = Awaited>; + + it("tier 1: returns the customer's default payment method", async () => { + vi.mocked(s.customers.retrieve).mockResolvedValue({ + ...customerBase, + invoice_settings: { default_payment_method: makeCard("1111") }, + } as unknown as CustomerResponse); + + const result = await provider.getBillingDetails("cus_1"); + + expect(result).toEqual({ + paymentMethod: { + brand: "visa", + last4: "1111", + expMonth: 12, + expYear: 2030, + }, + billingEmail: "user@test.com", + }); + expect(s.subscriptions.list).not.toHaveBeenCalled(); + expect(s.paymentMethods.list).not.toHaveBeenCalled(); + }); + + it("tier 2: returns the active subscription's default PM when customer has none", async () => { + vi.mocked(s.customers.retrieve).mockResolvedValue( + customerBase as unknown as CustomerResponse + ); + vi.mocked(s.subscriptions.list).mockResolvedValueOnce({ + data: [{ default_payment_method: makeCard("2222") }], + } as unknown as SubsListResponse); + + const result = await provider.getBillingDetails("cus_1"); + + expect(result.paymentMethod?.last4).toBe("2222"); + expect(s.subscriptions.list).toHaveBeenCalledTimes(1); + expect(s.subscriptions.list).toHaveBeenCalledWith( + expect.objectContaining({ status: "active" }) + ); + expect(s.paymentMethods.list).not.toHaveBeenCalled(); + }); + + it("tier 2 fallback: queries status=all only when the active list is empty", async () => { + vi.mocked(s.customers.retrieve).mockResolvedValue( + customerBase as unknown as CustomerResponse + ); + vi.mocked(s.subscriptions.list) + .mockResolvedValueOnce({ data: [] } as unknown as SubsListResponse) + .mockResolvedValueOnce({ + data: [{ default_payment_method: makeCard("3333") }], + } as unknown as SubsListResponse); + + const result = await provider.getBillingDetails("cus_1"); + + expect(result.paymentMethod?.last4).toBe("3333"); + expect(s.subscriptions.list).toHaveBeenCalledTimes(2); + expect(vi.mocked(s.subscriptions.list).mock.calls[0][0]).toMatchObject({ + status: "active", + }); + expect(vi.mocked(s.subscriptions.list).mock.calls[1][0]).toMatchObject({ + status: "all", + }); + expect(s.paymentMethods.list).not.toHaveBeenCalled(); + }); + + it("tier 3: falls back to paymentMethods.list when no subscription has a PM", async () => { + vi.mocked(s.customers.retrieve).mockResolvedValue( + customerBase as unknown as CustomerResponse + ); + vi.mocked(s.subscriptions.list).mockResolvedValue({ + data: [], + } as unknown as SubsListResponse); + vi.mocked(s.paymentMethods.list).mockResolvedValue({ + data: [makeCard("4444")], + } as unknown as PMListResponse); + + const result = await provider.getBillingDetails("cus_1"); + + expect(result.paymentMethod?.last4).toBe("4444"); + expect(s.paymentMethods.list).toHaveBeenCalledWith( + expect.objectContaining({ type: "card", limit: 1 }) + ); + }); + + it("returns null paymentMethod when no card is found anywhere", async () => { + vi.mocked(s.customers.retrieve).mockResolvedValue( + customerBase as unknown as CustomerResponse + ); + vi.mocked(s.subscriptions.list).mockResolvedValue({ + data: [], + } as unknown as SubsListResponse); + vi.mocked(s.paymentMethods.list).mockResolvedValue({ + data: [], + } as unknown as PMListResponse); + + const result = await provider.getBillingDetails("cus_1"); + + expect(result).toEqual({ + paymentMethod: null, + billingEmail: "user@test.com", + }); + }); + + it("returns both nulls for a deleted customer and skips the cascade", async () => { + vi.mocked(s.customers.retrieve).mockResolvedValue({ + id: "cus_1", + deleted: true, + } as unknown as CustomerResponse); + + const result = await provider.getBillingDetails("cus_1"); + + expect(result).toEqual({ paymentMethod: null, billingEmail: null }); + expect(s.subscriptions.list).not.toHaveBeenCalled(); + expect(s.paymentMethods.list).not.toHaveBeenCalled(); + }); + }); }); describe("UnknownEventTypeError", () => { diff --git a/tests/unit/earnings-queries.test.ts b/tests/unit/earnings-queries.test.ts index 22530949f..d35e560b1 100644 --- a/tests/unit/earnings-queries.test.ts +++ b/tests/unit/earnings-queries.test.ts @@ -3,6 +3,7 @@ import { describe, expect, it, vi } from "vitest"; vi.mock("server-only", () => ({})); import { + buildPerChainEarnings, computeRevenueSplit, deriveSettlementStatus, formatUsdc, @@ -140,3 +141,53 @@ describe("groupTopCallers", () => { expect(result.size).toBe(0); }); }); + +describe("buildPerChainEarnings", () => { + it("returns zeros for both chains when no rows are present", () => { + const result = buildPerChainEarnings([]); + expect(result.base).toEqual({ + grossRevenue: "$0.00 USDC", + invocationCount: 0, + }); + expect(result.tempo).toEqual({ + grossRevenue: "$0.00 USDC", + invocationCount: 0, + }); + }); + + it("maps base and tempo chain rows into the fixed shape", () => { + const result = buildPerChainEarnings([ + { chain: "base", grossRevenue: "1.50", invocationCount: 15 }, + { chain: "tempo", grossRevenue: "0.75", invocationCount: 9 }, + ]); + expect(result.base.grossRevenue).toBe("$1.50 USDC"); + expect(result.base.invocationCount).toBe(15); + expect(result.tempo.grossRevenue).toBe("$0.75 USDC"); + expect(result.tempo.invocationCount).toBe(9); + }); + + it("leaves the missing chain at zero when only one chain has activity", () => { + const result = buildPerChainEarnings([ + { chain: "base", grossRevenue: "2.00", invocationCount: 20 }, + ]); + expect(result.base.grossRevenue).toBe("$2.00 USDC"); + expect(result.tempo.grossRevenue).toBe("$0.00 USDC"); + expect(result.tempo.invocationCount).toBe(0); + }); + + it("ignores unknown chain values without throwing", () => { + const result = buildPerChainEarnings([ + { chain: "solana", grossRevenue: "99.00", invocationCount: 1 }, + ]); + expect(result.base.grossRevenue).toBe("$0.00 USDC"); + expect(result.tempo.grossRevenue).toBe("$0.00 USDC"); + }); + + it("treats null grossRevenue as zero", () => { + const result = buildPerChainEarnings([ + { chain: "base", grossRevenue: null, invocationCount: 0 }, + ]); + expect(result.base.grossRevenue).toBe("$0.00 USDC"); + expect(result.base.invocationCount).toBe(0); + }); +}); diff --git a/tests/unit/edge-helpers.test.ts b/tests/unit/edge-helpers.test.ts new file mode 100644 index 000000000..2ead67a8e --- /dev/null +++ b/tests/unit/edge-helpers.test.ts @@ -0,0 +1,157 @@ +import type { Edge as XYFlowEdge } from "@xyflow/react"; +import { describe, expect, it } from "vitest"; + +import { + dedupeEdges, + hasDuplicateEdge, + normalizeHandle, +} from "@/lib/workflow/edge-helpers"; + +function edge( + id: string, + source: string, + target: string, + sourceHandle?: string | null, + targetHandle?: string | null +): XYFlowEdge { + return { id, source, target, sourceHandle, targetHandle }; +} + +describe("edge-helpers", () => { + describe("normalizeHandle", () => { + it("returns empty string for null", () => { + expect(normalizeHandle(null)).toBe(""); + }); + + it("returns empty string for undefined", () => { + expect(normalizeHandle(undefined)).toBe(""); + }); + + it("passes through a string value", () => { + expect(normalizeHandle("true")).toBe("true"); + }); + + it("preserves empty string", () => { + expect(normalizeHandle("")).toBe(""); + }); + }); + + describe("hasDuplicateEdge", () => { + it("returns false when no edges exist", () => { + expect( + hasDuplicateEdge([], { source: "a", target: "b" }) + ).toBe(false); + }); + + it("detects duplicate when both handles are null/undefined on both sides", () => { + const existing = [edge("e1", "a", "b")]; + expect( + hasDuplicateEdge(existing, { source: "a", target: "b" }) + ).toBe(true); + }); + + it("treats null, undefined, and empty string handles as equivalent", () => { + const existing = [edge("e1", "a", "b", null, null)]; + expect( + hasDuplicateEdge(existing, { + source: "a", + target: "b", + sourceHandle: "", + targetHandle: undefined, + }) + ).toBe(true); + }); + + it("allows different targets from the same source", () => { + const existing = [edge("e1", "a", "b")]; + expect( + hasDuplicateEdge(existing, { source: "a", target: "c" }) + ).toBe(false); + }); + + it("allows different sources to the same target", () => { + const existing = [edge("e1", "a", "c")]; + expect( + hasDuplicateEdge(existing, { source: "b", target: "c" }) + ).toBe(false); + }); + + it("allows same source->target on different source handles (Condition true/false)", () => { + const existing = [edge("e1", "cond", "target", "true")]; + expect( + hasDuplicateEdge(existing, { + source: "cond", + target: "target", + sourceHandle: "false", + }) + ).toBe(false); + }); + + it("rejects same source->target on the same source handle", () => { + const existing = [edge("e1", "cond", "target", "true")]; + expect( + hasDuplicateEdge(existing, { + source: "cond", + target: "target", + sourceHandle: "true", + }) + ).toBe(true); + }); + + it("allows same source->target on different target handles", () => { + const existing = [edge("e1", "a", "b", null, "in-1")]; + expect( + hasDuplicateEdge(existing, { + source: "a", + target: "b", + targetHandle: "in-2", + }) + ).toBe(false); + }); + + it("rejects when any prior edge in the list matches", () => { + const existing = [ + edge("e1", "x", "y"), + edge("e2", "a", "b"), + edge("e3", "m", "n"), + ]; + expect( + hasDuplicateEdge(existing, { source: "a", target: "b" }) + ).toBe(true); + }); + }); + + describe("dedupeEdges", () => { + it("returns an empty array for empty input", () => { + expect(dedupeEdges([])).toEqual([]); + }); + + it("returns the same edges when all are unique", () => { + const input = [ + edge("e1", "a", "b"), + edge("e2", "b", "c"), + edge("e3", "a", "c"), + ]; + expect(dedupeEdges(input)).toEqual(input); + }); + + it("drops later duplicates and preserves first occurrence order", () => { + const first = edge("e1", "a", "b"); + const second = edge("e2", "b", "c"); + const dup = edge("e3", "a", "b"); + expect(dedupeEdges([first, second, dup])).toEqual([first, second]); + }); + + it("treats null/undefined/empty-string handles as equivalent when deduping", () => { + const first = edge("e1", "a", "b", null, null); + const dup = edge("e2", "a", "b", "", undefined); + expect(dedupeEdges([first, dup])).toEqual([first]); + }); + + it("keeps edges that differ only by sourceHandle", () => { + const trueEdge = edge("e1", "cond", "t", "true"); + const falseEdge = edge("e2", "cond", "t", "false"); + expect(dedupeEdges([trueEdge, falseEdge])).toEqual([trueEdge, falseEdge]); + }); + }); +}); diff --git a/tests/unit/execution-wait.test.ts b/tests/unit/execution-wait.test.ts new file mode 100644 index 000000000..537879025 --- /dev/null +++ b/tests/unit/execution-wait.test.ts @@ -0,0 +1,264 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; + +vi.mock("server-only", () => ({})); + +// --------------------------------------------------------------------------- +// Hoisted mocks +// --------------------------------------------------------------------------- + +const { mockFindFirstExecution, mockFindFirstLog } = vi.hoisted(() => ({ + mockFindFirstExecution: vi.fn(), + mockFindFirstLog: vi.fn(), +})); + +vi.mock("@/lib/db", () => ({ + db: { + query: { + workflowExecutions: { findFirst: mockFindFirstExecution }, + workflowExecutionLogs: { findFirst: mockFindFirstLog }, + }, + }, +})); + +vi.mock("@/lib/db/schema", () => ({ + workflowExecutions: { id: "id" }, + workflowExecutionLogs: { + executionId: "execution_id", + nodeId: "node_id", + status: "status", + completedAt: "completed_at", + }, +})); + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +describe("waitForExecutionCompletion (KEEP-265)", () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + it("returns null immediately when timeout <= 0", async () => { + const { waitForExecutionCompletion } = await import( + "@/lib/x402/execution-wait" + ); + const result = await waitForExecutionCompletion("exec-1", 0); + expect(result).toBeNull(); + expect(mockFindFirstExecution).not.toHaveBeenCalled(); + }); + + it("returns success result when execution is already terminal", async () => { + mockFindFirstExecution.mockResolvedValue({ + status: "success", + output: { foo: "bar" }, + error: null, + }); + const { waitForExecutionCompletion } = await import( + "@/lib/x402/execution-wait" + ); + const result = await waitForExecutionCompletion("exec-1", 1000, 10); + expect(result).toEqual({ + status: "success", + output: { foo: "bar" }, + error: null, + }); + }); + + it("returns error result with error message when execution failed", async () => { + mockFindFirstExecution.mockResolvedValue({ + status: "error", + output: null, + error: "RPC down", + }); + const { waitForExecutionCompletion } = await import( + "@/lib/x402/execution-wait" + ); + const result = await waitForExecutionCompletion("exec-1", 1000, 10); + expect(result?.status).toBe("error"); + expect(result?.error).toBe("RPC down"); + }); + + it("returns null if execution row is missing", async () => { + mockFindFirstExecution.mockResolvedValue(undefined); + const { waitForExecutionCompletion } = await import( + "@/lib/x402/execution-wait" + ); + const result = await waitForExecutionCompletion("exec-missing", 100, 10); + expect(result).toBeNull(); + }); + + it("polls until terminal status appears", async () => { + mockFindFirstExecution + .mockResolvedValueOnce({ status: "running", output: null, error: null }) + .mockResolvedValueOnce({ status: "running", output: null, error: null }) + .mockResolvedValueOnce({ + status: "success", + output: { balance: "1.3286 ETH" }, + error: null, + }); + const { waitForExecutionCompletion } = await import( + "@/lib/x402/execution-wait" + ); + const result = await waitForExecutionCompletion("exec-2", 1000, 5); + expect(result?.status).toBe("success"); + expect(mockFindFirstExecution).toHaveBeenCalledTimes(3); + }); + + it("returns null on timeout when never reaching terminal state", async () => { + mockFindFirstExecution.mockResolvedValue({ + status: "running", + output: null, + error: null, + }); + const { waitForExecutionCompletion } = await import( + "@/lib/x402/execution-wait" + ); + const start = Date.now(); + const result = await waitForExecutionCompletion("exec-3", 40, 10); + const elapsed = Date.now() - start; + expect(result).toBeNull(); + expect(elapsed).toBeGreaterThanOrEqual(30); + }); +}); + +describe("applyOutputMapping (KEEP-265)", () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + it("returns raw workflow output when outputMapping is null", async () => { + const { applyOutputMapping } = await import("@/lib/x402/execution-wait"); + const result = await applyOutputMapping("exec-1", { balance: "1.5" }, null); + expect(result).toEqual({ balance: "1.5" }); + expect(mockFindFirstLog).not.toHaveBeenCalled(); + }); + + it("returns raw workflow output when outputMapping has no nodeId", async () => { + const { applyOutputMapping } = await import("@/lib/x402/execution-wait"); + const result = await applyOutputMapping( + "exec-1", + { balance: "1.5" }, + { fields: ["balance"] } + ); + expect(result).toEqual({ balance: "1.5" }); + }); + + it("picks specific fields from the mapped node output", async () => { + mockFindFirstLog.mockResolvedValue({ + output: { + riskScore: 3, + vulnerabilities: ["reentrancy"], + internalDebug: "ignore-me", + }, + }); + const { applyOutputMapping } = await import("@/lib/x402/execution-wait"); + const result = await applyOutputMapping("exec-1", null, { + nodeId: "audit-1", + fields: ["riskScore", "vulnerabilities"], + }); + expect(result).toEqual({ + riskScore: 3, + vulnerabilities: ["reentrancy"], + }); + }); + + it("returns full node output when nodeId is set but fields is not", async () => { + mockFindFirstLog.mockResolvedValue({ + output: { a: 1, b: 2 }, + }); + const { applyOutputMapping } = await import("@/lib/x402/execution-wait"); + const result = await applyOutputMapping("exec-1", null, { + nodeId: "audit-1", + }); + expect(result).toEqual({ a: 1, b: 2 }); + }); + + it("falls back to workflow output when the mapped node log is missing", async () => { + mockFindFirstLog.mockResolvedValue(undefined); + const { applyOutputMapping } = await import("@/lib/x402/execution-wait"); + const result = await applyOutputMapping( + "exec-1", + { fallback: true }, + { nodeId: "missing-node" } + ); + expect(result).toEqual({ fallback: true }); + }); +}); + +describe("buildCallCompletionResponse (KEEP-265)", () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + it("returns { status: 'running' } on timeout", async () => { + mockFindFirstExecution.mockResolvedValue({ + status: "running", + output: null, + error: null, + }); + const { buildCallCompletionResponse } = await import( + "@/lib/x402/execution-wait" + ); + const res = await buildCallCompletionResponse("exec-timeout", null, 30); + expect(res).toEqual({ executionId: "exec-timeout", status: "running" }); + }); + + it("returns mapped output on successful completion", async () => { + mockFindFirstExecution.mockResolvedValue({ + status: "success", + output: { balance: "1.3286 ETH", _debug: "noise" }, + error: null, + }); + mockFindFirstLog.mockResolvedValue({ + output: { balance: "1.3286 ETH", _debug: "noise" }, + }); + const { buildCallCompletionResponse } = await import( + "@/lib/x402/execution-wait" + ); + const res = await buildCallCompletionResponse( + "exec-success", + { nodeId: "last", fields: ["balance"] }, + 1000 + ); + expect(res).toEqual({ + executionId: "exec-success", + status: "success", + output: { balance: "1.3286 ETH" }, + }); + }); + + it("returns error payload when execution fails within timeout", async () => { + mockFindFirstExecution.mockResolvedValue({ + status: "error", + output: null, + error: "RPC failed", + }); + const { buildCallCompletionResponse } = await import( + "@/lib/x402/execution-wait" + ); + const res = await buildCallCompletionResponse("exec-err", null, 1000); + expect(res).toEqual({ + executionId: "exec-err", + status: "error", + error: "RPC failed", + }); + }); + + it("maps cancelled status to an error response", async () => { + mockFindFirstExecution.mockResolvedValue({ + status: "cancelled", + output: null, + error: null, + }); + const { buildCallCompletionResponse } = await import( + "@/lib/x402/execution-wait" + ); + const res = await buildCallCompletionResponse("exec-cancel", null, 1000); + expect(res).toEqual({ + executionId: "exec-cancel", + status: "error", + error: "Execution cancelled", + }); + }); +}); diff --git a/tests/unit/mcp-meta-tools.test.ts b/tests/unit/mcp-meta-tools.test.ts index 759ff22cb..f1aa5326c 100644 --- a/tests/unit/mcp-meta-tools.test.ts +++ b/tests/unit/mcp-meta-tools.test.ts @@ -382,6 +382,7 @@ describe("POST /api/mcp/workflows/[slug]/call: write workflow returns calldata", mockGenerateCalldata, mockAuthenticateApiKey, mockAuthenticateOAuthToken, + mockBuildCallCompletionResponse, } = vi.hoisted(() => ({ mockDbSelect: vi.fn(), mockDbInsert: vi.fn(), @@ -401,6 +402,7 @@ describe("POST /api/mcp/workflows/[slug]/call: write workflow returns calldata", mockGenerateCalldata: vi.fn(), mockAuthenticateApiKey: vi.fn(), mockAuthenticateOAuthToken: vi.fn(), + mockBuildCallCompletionResponse: vi.fn(), })); vi.mock("@/lib/db", () => ({ @@ -420,8 +422,14 @@ describe("POST /api/mcp/workflows/[slug]/call: write workflow returns calldata", })); vi.mock("@/lib/db/schema", () => ({ - workflows: { id: "id", listedSlug: "listed_slug", isListed: "is_listed" }, + workflows: { + id: "id", + listedSlug: "listed_slug", + isListed: "is_listed", + tagId: "tag_id", + }, workflowExecutions: { id: "id" }, + tags: { id: "id", name: "name" }, })); vi.mock("@/lib/x402/server", () => ({ @@ -463,6 +471,10 @@ describe("POST /api/mcp/workflows/[slug]/call: write workflow returns calldata", checkConcurrencyLimit: mockCheckConcurrencyLimit, })); + vi.mock("@/lib/x402/execution-wait", () => ({ + buildCallCompletionResponse: mockBuildCallCompletionResponse, + })); + vi.mock("@/lib/logging", () => ({ ErrorCategory: { WORKFLOW_ENGINE: "workflow_engine" }, logSystemError: mockLogSystemError, @@ -501,10 +513,15 @@ describe("POST /api/mcp/workflows/[slug]/call: write workflow returns calldata", }; function setupDbSelectWorkflow(row: unknown) { + // lookupWorkflow joins the tags table to project tagName into the row; + // the real chain is select().from().leftJoin().where().limit(). Mirror + // that shape here or the real code throws on the missing .leftJoin(). mockDbSelect.mockReturnValue({ from: vi.fn().mockReturnValue({ - where: vi.fn().mockReturnValue({ - limit: vi.fn().mockResolvedValue(row ? [row] : []), + leftJoin: vi.fn().mockReturnValue({ + where: vi.fn().mockReturnValue({ + limit: vi.fn().mockResolvedValue(row ? [row] : []), + }), }), }), }); @@ -546,6 +563,11 @@ describe("POST /api/mcp/workflows/[slug]/call: write workflow returns calldata", where: vi.fn().mockResolvedValue(undefined), }), }); + // Default: completion wait times out so we fall back to running response. + mockBuildCallCompletionResponse.mockImplementation( + (executionId: string) => + Promise.resolve({ executionId, status: "running" }) + ); // Default: caller is authenticated. The write workflow path requires // an API key or MCP OAuth token, same as the free read path. mockAuthenticateOAuthToken.mockReturnValue({ diff --git a/tests/unit/payment-router.test.ts b/tests/unit/payment-router.test.ts index 4b16badf6..4db61f874 100644 --- a/tests/unit/payment-router.test.ts +++ b/tests/unit/payment-router.test.ts @@ -252,7 +252,7 @@ describe("buildDual402Response", () => { ).toEqual({ executionId: "exec_abc123", status: "running" }); }); - it("omits extensions block when inputSchema is absent", async () => { + it("always emits extensions.bazaar.discoverable:true so CDP Bazaar indexes the resource", async () => { const response = buildDual402Response({ price: "0.01", creatorWalletAddress: "0xCreator", @@ -260,6 +260,25 @@ describe("buildDual402Response", () => { resourceUrl: "https://example.com/api/mcp/workflows/test/call", }); const body = await response.json(); - expect(body.extensions).toBeUndefined(); + expect(body.extensions.bazaar.discoverable).toBe(true); + // schema subtree is only populated when inputSchema is provided + expect(body.extensions.bazaar.schema).toBeUndefined(); + expect(body.extensions.bazaar.category).toBeUndefined(); + expect(body.extensions.bazaar.tags).toBeUndefined(); + }); + + it("emits extensions.bazaar.category and tags when provided", async () => { + const response = buildDual402Response({ + price: "0.01", + creatorWalletAddress: "0xCreator", + workflowName: "Test Workflow", + resourceUrl: "https://example.com/api/mcp/workflows/test/call", + category: "web3", + tagName: "defi", + }); + const body = await response.json(); + expect(body.extensions.bazaar.discoverable).toBe(true); + expect(body.extensions.bazaar.category).toBe("web3"); + expect(body.extensions.bazaar.tags).toEqual(["defi"]); }); }); diff --git a/tests/unit/protocol-aave-v3.test.ts b/tests/unit/protocol-aave-v3.test.ts index dd03ded30..955a135e6 100644 --- a/tests/unit/protocol-aave-v3.test.ts +++ b/tests/unit/protocol-aave-v3.test.ts @@ -9,7 +9,7 @@ describe("Aave V3 Protocol Definition", () => { it("imports without throwing", () => { expect(aaveV3Def).toBeDefined(); expect(aaveV3Def.name).toBe("Aave V3"); - expect(aaveV3Def.slug).toBe("aave"); + expect(aaveV3Def.slug).toBe("aave-v3"); }); it("protocol slug is valid kebab-case", () => { @@ -131,9 +131,9 @@ describe("Aave V3 Protocol Definition", () => { it("registers in the protocol registry and is retrievable", () => { registerProtocol(aaveV3Def); - const retrieved = getProtocol("aave"); + const retrieved = getProtocol("aave-v3"); expect(retrieved).toBeDefined(); - expect(retrieved?.slug).toBe("aave"); + expect(retrieved?.slug).toBe("aave-v3"); expect(retrieved?.name).toBe("Aave V3"); }); }); diff --git a/tests/unit/protocol-aave-v4.test.ts b/tests/unit/protocol-aave-v4.test.ts new file mode 100644 index 000000000..75c74bd3d --- /dev/null +++ b/tests/unit/protocol-aave-v4.test.ts @@ -0,0 +1,249 @@ +import { describe, expect, it } from "vitest"; +import { + getProtocol, + protocolActionToPluginAction, + registerProtocol, +} from "@/lib/protocol-registry"; +import aaveV4Def from "@/protocols/aave-v4"; + +const KEBAB_CASE_REGEX = /^[a-z][a-z0-9]*(-[a-z0-9]+)*$/; +const HEX_ADDRESS_REGEX = /^0x[0-9a-fA-F]{40}$/; + +describe("Aave V4 Protocol Definition (ABI-driven)", () => { + it("imports without throwing", () => { + expect(aaveV4Def).toBeDefined(); + expect(aaveV4Def.name).toBe("Aave V4"); + expect(aaveV4Def.slug).toBe("aave-v4"); + }); + + it("protocol slug is valid kebab-case", () => { + expect(aaveV4Def.slug).toMatch(KEBAB_CASE_REGEX); + }); + + it("all action slugs are valid kebab-case", () => { + for (const action of aaveV4Def.actions) { + expect(action.slug).toMatch(KEBAB_CASE_REGEX); + } + }); + + it("every action references an existing contract", () => { + const contractKeys = new Set(Object.keys(aaveV4Def.contracts)); + for (const action of aaveV4Def.actions) { + expect( + contractKeys.has(action.contract), + `action "${action.slug}" references unknown contract "${action.contract}"` + ).toBe(true); + } + }); + + it("has no duplicate action slugs", () => { + const slugs = aaveV4Def.actions.map((a) => a.slug); + expect(slugs.length).toBe(new Set(slugs).size); + }); + + it("all read actions define outputs", () => { + const readActions = aaveV4Def.actions.filter((a) => a.type === "read"); + for (const action of readActions) { + expect( + action.outputs, + `read action "${action.slug}" must have outputs` + ).toBeDefined(); + expect(action.outputs?.length).toBeGreaterThan(0); + } + }); + + it("all contract addresses are valid hex format", () => { + for (const [key, contract] of Object.entries(aaveV4Def.contracts)) { + for (const [chain, address] of Object.entries(contract.addresses)) { + expect( + address, + `contract "${key}" chain "${chain}" address must be valid hex` + ).toMatch(HEX_ADDRESS_REGEX); + } + } + }); + + it("has 9 actions covering V3 parity + reserveId resolver + getUserAccountData", () => { + expect(aaveV4Def.actions).toHaveLength(9); + const slugs = aaveV4Def.actions.map((a) => a.slug); + expect(slugs).toEqual( + expect.arrayContaining([ + "supply", + "withdraw", + "borrow", + "repay", + "set-collateral", + "get-reserve-id", + "get-user-supplied-assets", + "get-user-debt", + "get-user-account-data", + ]) + ); + }); + + it("has 5 write actions and 4 read actions", () => { + const reads = aaveV4Def.actions.filter((a) => a.type === "read"); + const writes = aaveV4Def.actions.filter((a) => a.type === "write"); + expect(reads).toHaveLength(4); + expect(writes).toHaveLength(5); + }); + + it("has 1 contract (Lido Spoke only for this first cut)", () => { + expect(Object.keys(aaveV4Def.contracts)).toHaveLength(1); + expect(aaveV4Def.contracts.lidoSpoke).toBeDefined(); + }); + + it("Lido Spoke is available on Ethereum mainnet only (V4 launch state)", () => { + const chains = Object.keys(aaveV4Def.contracts.lidoSpoke.addresses); + expect(chains).toHaveLength(1); + expect(chains).toContain("1"); + }); + + it("supply action has reserveId/amount/onBehalfOf inputs and is a write", () => { + const supply = aaveV4Def.actions.find((a) => a.slug === "supply"); + expect(supply).toBeDefined(); + expect(supply?.type).toBe("write"); + expect(supply?.function).toBe("supply"); + expect(supply?.inputs).toHaveLength(3); + expect(supply?.inputs.map((i) => i.name)).toEqual([ + "reserveId", + "amount", + "onBehalfOf", + ]); + expect(supply?.inputs[0].type).toBe("uint256"); + expect(supply?.inputs[1].type).toBe("uint256"); + expect(supply?.inputs[2].type).toBe("address"); + }); + + it("set-collateral action has a bool input", () => { + const setCollateral = aaveV4Def.actions.find( + (a) => a.slug === "set-collateral" + ); + expect(setCollateral).toBeDefined(); + expect(setCollateral?.type).toBe("write"); + expect(setCollateral?.function).toBe("setUsingAsCollateral"); + const boolInput = setCollateral?.inputs.find( + (i) => i.name === "usingAsCollateral" + ); + expect(boolInput?.type).toBe("bool"); + }); + + it("get-reserve-id action has renamed output 'reserveId'", () => { + const getReserveId = aaveV4Def.actions.find( + (a) => a.slug === "get-reserve-id" + ); + expect(getReserveId).toBeDefined(); + expect(getReserveId?.type).toBe("read"); + expect(getReserveId?.outputs).toHaveLength(1); + expect(getReserveId?.outputs?.[0].name).toBe("reserveId"); + expect(getReserveId?.outputs?.[0].type).toBe("uint256"); + }); + + it("get-user-debt action returns two uint256 outputs (drawnDebt + premiumDebt)", () => { + const getUserDebt = aaveV4Def.actions.find( + (a) => a.slug === "get-user-debt" + ); + expect(getUserDebt).toBeDefined(); + expect(getUserDebt?.type).toBe("read"); + expect(getUserDebt?.outputs).toHaveLength(2); + expect(getUserDebt?.outputs?.[0].name).toBe("drawnDebt"); + expect(getUserDebt?.outputs?.[1].name).toBe("premiumDebt"); + }); + + it("get-user-supplied-assets action has a single named output", () => { + const getSupplied = aaveV4Def.actions.find( + (a) => a.slug === "get-user-supplied-assets" + ); + expect(getSupplied).toBeDefined(); + expect(getSupplied?.type).toBe("read"); + expect(getSupplied?.outputs).toHaveLength(1); + expect(getSupplied?.outputs?.[0].name).toBe("suppliedAmount"); + }); + + it("write actions do not surface ABI-derived outputs as UI template suggestions (KEEP-296)", () => { + // writeContractCore returns result: undefined, so any action.outputs on + // write actions must not flow into pluginAction.outputFields (which + // drives template autocomplete). Gated in buildOutputFieldsFromAction. + for (const slug of ["supply", "withdraw", "borrow", "repay"]) { + const action = aaveV4Def.actions.find((a) => a.slug === slug); + expect(action, `action "${slug}" not found`).toBeDefined(); + if (!action) { + continue; + } + expect(action.type).toBe("write"); + const pluginAction = protocolActionToPluginAction(aaveV4Def, action); + const outputFieldNames = (pluginAction.outputFields ?? []).map( + (f) => f.field + ); + // No ABI-derived outputs leak through + expect(outputFieldNames).not.toContain("result0"); + expect(outputFieldNames).not.toContain("result1"); + // Standard write-action fields are still present + expect(outputFieldNames).toEqual( + expect.arrayContaining([ + "success", + "error", + "transactionHash", + "transactionLink", + ]) + ); + } + }); + + it("read actions surface ABI-derived outputs as UI template suggestions", () => { + for (const slug of [ + "get-user-supplied-assets", + "get-user-debt", + "get-reserve-id", + ]) { + const action = aaveV4Def.actions.find((a) => a.slug === slug); + expect(action, `action "${slug}" not found`).toBeDefined(); + if (!action) { + continue; + } + expect(action.type).toBe("read"); + const pluginAction = protocolActionToPluginAction(aaveV4Def, action); + const outputFieldNames = (pluginAction.outputFields ?? []).map( + (f) => f.field + ); + // success/error always present; plus at least one ABI-derived output + expect(outputFieldNames).toEqual( + expect.arrayContaining(["success", "error"]) + ); + expect(outputFieldNames).not.toContain("transactionHash"); + const nonStandardFields = outputFieldNames.filter( + (f) => f !== "success" && f !== "error" + ); + expect(nonStandardFields.length).toBeGreaterThan(0); + } + }); + + it("get-user-account-data returns a single tuple output", () => { + const action = aaveV4Def.actions.find( + (a) => a.slug === "get-user-account-data" + ); + expect(action).toBeDefined(); + expect(action?.type).toBe("read"); + expect(action?.function).toBe("getUserAccountData"); + expect(action?.inputs).toHaveLength(1); + expect(action?.inputs[0].name).toBe("user"); + expect(action?.outputs).toHaveLength(1); + expect(action?.outputs?.[0].name).toBe("accountData"); + expect(action?.outputs?.[0].type).toBe("tuple"); + }); + + it("set-collateral write has no outputs (Solidity returns void)", () => { + const setCollateral = aaveV4Def.actions.find( + (a) => a.slug === "set-collateral" + ); + expect(setCollateral?.outputs).toBeUndefined(); + }); + + it("registers in the protocol registry and is retrievable", () => { + registerProtocol(aaveV4Def); + const retrieved = getProtocol("aave-v4"); + expect(retrieved).toBeDefined(); + expect(retrieved?.slug).toBe("aave-v4"); + expect(retrieved?.name).toBe("Aave V4"); + }); +}); diff --git a/tests/unit/wallet-rpc.test.ts b/tests/unit/wallet-rpc.test.ts new file mode 100644 index 000000000..7b921a5cc --- /dev/null +++ b/tests/unit/wallet-rpc.test.ts @@ -0,0 +1,411 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; + +const addBreadcrumbMock = vi.fn(); +vi.mock("@sentry/nextjs", () => ({ + addBreadcrumb: (...args: unknown[]) => addBreadcrumbMock(...args), +})); + +import { + encodeBalanceOfCallData, + getRpcBackoffMs, + hexWeiToBigInt, + type JsonRpcPayload, + RPC_RETRY_CONFIG, + rpcCall, + rpcCallWithFailover, +} from "@/lib/wallet/rpc"; + +const VALID_ADDRESS = "0x1234567890abcdef1234567890abcdef12345678"; +const TEST_RPC_URL = "https://rpc.example.test"; +const TEST_PAYLOAD: JsonRpcPayload = { + jsonrpc: "2.0", + method: "eth_getBalance", + params: [VALID_ADDRESS, "latest"], + id: 1, +}; + +function jsonResponse(body: unknown, status = 200): Response { + return new Response(JSON.stringify(body), { + status, + headers: { "Content-Type": "application/json" }, + }); +} + +function plainResponse(status: number, statusText = ""): Response { + return new Response(null, { status, statusText }); +} + +describe("encodeBalanceOfCallData", () => { + it("encodes a 0x-prefixed address", () => { + const data = encodeBalanceOfCallData(VALID_ADDRESS); + expect(data).toBe( + "0x70a08231000000000000000000000000" + + "1234567890abcdef1234567890abcdef12345678" + ); + }); + + it("encodes an unprefixed address", () => { + const data = encodeBalanceOfCallData(VALID_ADDRESS.slice(2)); + expect(data).toBe( + "0x70a08231000000000000000000000000" + + "1234567890abcdef1234567890abcdef12345678" + ); + }); + + it("lowercases mixed-case input", () => { + const data = encodeBalanceOfCallData( + "0x1234567890ABCDEF1234567890abcdef12345678" + ); + expect(data).toContain("1234567890abcdef1234567890abcdef12345678"); + }); + + it("throws on too-short input", () => { + expect(() => encodeBalanceOfCallData("0x1234")).toThrow( + /Invalid EVM address/ + ); + }); + + it("throws on too-long input", () => { + expect(() => + encodeBalanceOfCallData(`${VALID_ADDRESS}deadbeef`) + ).toThrow(/Invalid EVM address/); + }); + + it("throws on non-hex characters", () => { + expect(() => + encodeBalanceOfCallData("0xzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz") + ).toThrow(/Invalid EVM address/); + }); +}); + +describe("hexWeiToBigInt", () => { + it('treats "0x" as zero', () => { + expect(hexWeiToBigInt("0x")).toBe(BigInt(0)); + }); + + it('parses "0x0" as zero', () => { + expect(hexWeiToBigInt("0x0")).toBe(BigInt(0)); + }); + + it("parses a non-zero hex value", () => { + expect(hexWeiToBigInt("0x1bc16d674ec80000")).toBe( + BigInt("2000000000000000000") + ); + }); +}); + +describe("getRpcBackoffMs", () => { + beforeEach(() => { + vi.spyOn(Math, "random").mockReturnValue(0); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + it("returns the base delay for standard attempt 0 with no jitter", () => { + expect(getRpcBackoffMs(0, "standard")).toBe( + RPC_RETRY_CONFIG.STANDARD.BASE_MS + ); + }); + + it("doubles the base delay for standard attempt 1", () => { + expect(getRpcBackoffMs(1, "standard")).toBe( + RPC_RETRY_CONFIG.STANDARD.BASE_MS * 2 + ); + }); + + it("caps standard backoff at CAP_MS", () => { + expect(getRpcBackoffMs(20, "standard")).toBeLessThanOrEqual( + RPC_RETRY_CONFIG.STANDARD.CAP_MS + ); + }); + + it("uses longer base for rate_limit", () => { + expect(getRpcBackoffMs(0, "rate_limit")).toBe( + RPC_RETRY_CONFIG.RATE_LIMIT.BASE_MS + ); + }); + + it("never exceeds ABSOLUTE_MAX_BACKOFF_MS even with maximum jitter", () => { + vi.spyOn(Math, "random").mockReturnValue(0.999999); + for (let attempt = 0; attempt < 10; attempt++) { + expect(getRpcBackoffMs(attempt, "standard")).toBeLessThanOrEqual( + RPC_RETRY_CONFIG.ABSOLUTE_MAX_BACKOFF_MS + ); + expect(getRpcBackoffMs(attempt, "rate_limit")).toBeLessThanOrEqual( + RPC_RETRY_CONFIG.ABSOLUTE_MAX_BACKOFF_MS + ); + } + }); + + it("adds jitter proportional to the base delay", () => { + vi.spyOn(Math, "random").mockReturnValue(0.5); + const base = RPC_RETRY_CONFIG.STANDARD.BASE_MS; + const expectedJitter = 0.5 * base * RPC_RETRY_CONFIG.JITTER_FACTOR; + expect(getRpcBackoffMs(0, "standard")).toBeCloseTo(base + expectedJitter); + }); +}); + +describe("rpcCall", () => { + const fetchMock = vi.fn(); + + beforeEach(() => { + vi.stubGlobal("fetch", fetchMock); + vi.useFakeTimers(); + addBreadcrumbMock.mockClear(); + fetchMock.mockReset(); + vi.spyOn(Math, "random").mockReturnValue(0); + }); + + afterEach(() => { + vi.useRealTimers(); + vi.unstubAllGlobals(); + vi.restoreAllMocks(); + }); + + async function runWithTimers(promise: Promise): Promise { + const settled = promise.then( + (value) => ({ ok: true as const, value }), + (error: unknown) => ({ ok: false as const, error }) + ); + await vi.runAllTimersAsync(); + const outcome = await settled; + if (outcome.ok) { + return outcome.value; + } + throw outcome.error; + } + + it("returns the result on first success", async () => { + fetchMock.mockResolvedValueOnce( + jsonResponse({ jsonrpc: "2.0", id: 1, result: "0x1234" }) + ); + + const result = await runWithTimers(rpcCall(TEST_RPC_URL, TEST_PAYLOAD)); + + expect(result).toBe("0x1234"); + expect(fetchMock).toHaveBeenCalledTimes(1); + expect(addBreadcrumbMock).not.toHaveBeenCalled(); + }); + + it("retries on 429 then succeeds", async () => { + fetchMock + .mockResolvedValueOnce(plainResponse(429, "Too Many Requests")) + .mockResolvedValueOnce( + jsonResponse({ jsonrpc: "2.0", id: 1, result: "0xdead" }) + ); + + const result = await runWithTimers(rpcCall(TEST_RPC_URL, TEST_PAYLOAD)); + + expect(result).toBe("0xdead"); + expect(fetchMock).toHaveBeenCalledTimes(2); + expect(addBreadcrumbMock).toHaveBeenCalledTimes(1); + expect(addBreadcrumbMock).toHaveBeenCalledWith( + expect.objectContaining({ + category: "rpc.retry", + data: expect.objectContaining({ kind: "rate_limit", attempt: 1 }), + }) + ); + }); + + it("retries on 5xx then succeeds", async () => { + fetchMock + .mockResolvedValueOnce(plainResponse(502, "Bad Gateway")) + .mockResolvedValueOnce( + jsonResponse({ jsonrpc: "2.0", id: 1, result: "0x1" }) + ); + + const result = await runWithTimers(rpcCall(TEST_RPC_URL, TEST_PAYLOAD)); + + expect(result).toBe("0x1"); + expect(fetchMock).toHaveBeenCalledTimes(2); + expect(addBreadcrumbMock).toHaveBeenCalledWith( + expect.objectContaining({ + data: expect.objectContaining({ kind: "standard" }), + }) + ); + }); + + it("retries on network error then succeeds", async () => { + fetchMock + .mockRejectedValueOnce(new TypeError("fetch failed")) + .mockResolvedValueOnce( + jsonResponse({ jsonrpc: "2.0", id: 1, result: "0x2" }) + ); + + const result = await runWithTimers(rpcCall(TEST_RPC_URL, TEST_PAYLOAD)); + + expect(result).toBe("0x2"); + expect(fetchMock).toHaveBeenCalledTimes(2); + }); + + it("retries when result is missing then succeeds", async () => { + fetchMock + .mockResolvedValueOnce(jsonResponse({ jsonrpc: "2.0", id: 1 })) + .mockResolvedValueOnce( + jsonResponse({ jsonrpc: "2.0", id: 1, result: "0x3" }) + ); + + const result = await runWithTimers(rpcCall(TEST_RPC_URL, TEST_PAYLOAD)); + + expect(result).toBe("0x3"); + expect(fetchMock).toHaveBeenCalledTimes(2); + }); + + it("throws immediately on RPC-reported error without retrying", async () => { + fetchMock.mockResolvedValueOnce( + jsonResponse({ + jsonrpc: "2.0", + id: 1, + error: { code: -32_000, message: "execution reverted" }, + }) + ); + + await expect( + runWithTimers(rpcCall(TEST_RPC_URL, TEST_PAYLOAD)) + ).rejects.toThrow(/execution reverted/); + expect(fetchMock).toHaveBeenCalledTimes(1); + }); + + it("throws immediately on non-429 4xx without retrying", async () => { + fetchMock.mockResolvedValueOnce(plainResponse(404, "Not Found")); + + await expect( + runWithTimers(rpcCall(TEST_RPC_URL, TEST_PAYLOAD)) + ).rejects.toThrow(/HTTP 404/); + expect(fetchMock).toHaveBeenCalledTimes(1); + }); + + it("throws the last error after exhausting retries", async () => { + fetchMock.mockResolvedValue(plainResponse(429, "Too Many Requests")); + + await expect( + runWithTimers(rpcCall(TEST_RPC_URL, TEST_PAYLOAD)) + ).rejects.toThrow(/HTTP 429/); + expect(fetchMock).toHaveBeenCalledTimes(RPC_RETRY_CONFIG.MAX_RETRIES + 1); + expect(addBreadcrumbMock).toHaveBeenCalledTimes( + RPC_RETRY_CONFIG.MAX_RETRIES + ); + }); + + it("honors the custom maxRetries argument", async () => { + fetchMock.mockResolvedValue(plainResponse(429, "Too Many Requests")); + + await expect( + runWithTimers(rpcCall(TEST_RPC_URL, TEST_PAYLOAD, 1)) + ).rejects.toThrow(/HTTP 429/); + expect(fetchMock).toHaveBeenCalledTimes(2); + }); +}); + +describe("rpcCallWithFailover", () => { + const fetchMock = vi.fn(); + const PRIMARY_URL = "https://primary.rpc.test"; + const FALLBACK_URL = "https://fallback.rpc.test"; + + beforeEach(() => { + vi.stubGlobal("fetch", fetchMock); + vi.useFakeTimers(); + addBreadcrumbMock.mockClear(); + fetchMock.mockReset(); + vi.spyOn(Math, "random").mockReturnValue(0); + }); + + afterEach(() => { + vi.useRealTimers(); + vi.unstubAllGlobals(); + vi.restoreAllMocks(); + }); + + async function runWithTimers(promise: Promise): Promise { + const settled = promise.then( + (value) => ({ ok: true as const, value }), + (error: unknown) => ({ ok: false as const, error }) + ); + await vi.runAllTimersAsync(); + const outcome = await settled; + if (outcome.ok) { + return outcome.value; + } + throw outcome.error; + } + + it("returns the primary result when primary succeeds", async () => { + fetchMock.mockResolvedValue( + jsonResponse({ jsonrpc: "2.0", id: 1, result: "0xprimary" }) + ); + + const result = await runWithTimers( + rpcCallWithFailover([PRIMARY_URL, FALLBACK_URL], TEST_PAYLOAD) + ); + + expect(result).toBe("0xprimary"); + expect(fetchMock).toHaveBeenCalledWith( + PRIMARY_URL, + expect.anything() + ); + expect(fetchMock).toHaveBeenCalledTimes(1); + }); + + it("fails over to the fallback URL when primary is exhausted", async () => { + fetchMock.mockImplementation((url: string) => { + if (url === PRIMARY_URL) { + return Promise.resolve(plainResponse(429, "Too Many Requests")); + } + return Promise.resolve( + jsonResponse({ jsonrpc: "2.0", id: 1, result: "0xfallback" }) + ); + }); + + const result = await runWithTimers( + rpcCallWithFailover([PRIMARY_URL, FALLBACK_URL], TEST_PAYLOAD) + ); + + expect(result).toBe("0xfallback"); + // Primary uses the reduced retry budget (1 retry => 2 attempts). + const primaryAttempts = fetchMock.mock.calls.filter( + ([url]) => url === PRIMARY_URL + ).length; + expect(primaryAttempts).toBe( + RPC_RETRY_CONFIG.RETRIES_PER_URL_WITH_FAILOVER + 1 + ); + expect(addBreadcrumbMock).toHaveBeenCalledWith( + expect.objectContaining({ + category: "rpc.failover", + data: expect.objectContaining({ + failedUrl: PRIMARY_URL, + nextUrl: FALLBACK_URL, + }), + }) + ); + }); + + it("throws the last error when every URL is exhausted", async () => { + fetchMock.mockResolvedValue(plainResponse(429, "Too Many Requests")); + + await expect( + runWithTimers( + rpcCallWithFailover([PRIMARY_URL, FALLBACK_URL], TEST_PAYLOAD) + ) + ).rejects.toThrow(/HTTP 429/); + }); + + it("does not emit a failover breadcrumb when there is only one URL", async () => { + fetchMock.mockResolvedValue(plainResponse(429, "Too Many Requests")); + + await expect( + runWithTimers(rpcCallWithFailover([PRIMARY_URL], TEST_PAYLOAD)) + ).rejects.toThrow(/HTTP 429/); + const failoverBreadcrumbs = addBreadcrumbMock.mock.calls.filter( + (args) => (args[0] as { category?: string }).category === "rpc.failover" + ); + expect(failoverBreadcrumbs).toHaveLength(0); + }); + + it("rejects an empty URL list", async () => { + await expect( + runWithTimers(rpcCallWithFailover([], TEST_PAYLOAD)) + ).rejects.toThrow(/at least one URL/); + }); +}); diff --git a/tests/unit/x402-call-route.test.ts b/tests/unit/x402-call-route.test.ts index 109cd5c98..a67011dd3 100644 --- a/tests/unit/x402-call-route.test.ts +++ b/tests/unit/x402-call-route.test.ts @@ -21,6 +21,7 @@ const { mockLogSystemError, mockAuthenticateApiKey, mockAuthenticateOAuthToken, + mockBuildCallCompletionResponse, } = vi.hoisted(() => ({ mockDbSelect: vi.fn(), mockDbInsert: vi.fn(), @@ -38,6 +39,7 @@ const { mockLogSystemError: vi.fn(), mockAuthenticateApiKey: vi.fn(), mockAuthenticateOAuthToken: vi.fn(), + mockBuildCallCompletionResponse: vi.fn(), })); // --------------------------------------------------------------------------- @@ -53,8 +55,14 @@ vi.mock("@/lib/db", () => ({ })); vi.mock("@/lib/db/schema", () => ({ - workflows: { id: "id", listedSlug: "listed_slug", isListed: "is_listed" }, + workflows: { + id: "id", + listedSlug: "listed_slug", + isListed: "is_listed", + tagId: "tag_id", + }, workflowExecutions: { id: "id" }, + tags: { id: "id", name: "name" }, })); vi.mock("@/lib/x402/payment-gate", () => ({ @@ -102,6 +110,10 @@ vi.mock("@/lib/logging", () => ({ logSystemError: mockLogSystemError, })); +vi.mock("@/lib/x402/execution-wait", () => ({ + buildCallCompletionResponse: mockBuildCallCompletionResponse, +})); + // --------------------------------------------------------------------------- // Helpers // --------------------------------------------------------------------------- @@ -128,10 +140,15 @@ const FREE_WORKFLOW_NULL_PRICE = { ...LISTED_WORKFLOW, priceUsdcPerCall: null }; const CREATOR_WALLET = "0xCREATOR_WALLET"; function setupDbSelectWorkflow(row: unknown) { + // lookupWorkflow joins the tags table to project tagName into the row, so + // the chain is: select().from().leftJoin().where().limit(). Mirror that + // shape here or the real code throws on the missing .leftJoin(). mockDbSelect.mockReturnValue({ from: vi.fn().mockReturnValue({ - where: vi.fn().mockReturnValue({ - limit: vi.fn().mockResolvedValue(row ? [row] : []), + leftJoin: vi.fn().mockReturnValue({ + where: vi.fn().mockReturnValue({ + limit: vi.fn().mockResolvedValue(row ? [row] : []), + }), }), }), }); @@ -212,6 +229,12 @@ describe("POST /api/mcp/workflows/[slug]/call", () => { mockRecordPayment.mockResolvedValue(undefined); mockHashPaymentSignature.mockReturnValue("hash-abc"); mockResolveCreatorWallet.mockResolvedValue(CREATOR_WALLET); + // Default: simulate timeout so we fall back to running response. Tests + // exercising the synchronous completion path override this explicitly. + mockBuildCallCompletionResponse.mockImplementation( + (executionId: string) => + Promise.resolve({ executionId, status: "running" }) + ); // Default no-op update chain: db.update(table).set(values).where(filter) mockDbUpdate.mockReturnValue({ set: vi.fn().mockReturnValue({ @@ -534,6 +557,88 @@ describe("POST /api/mcp/workflows/[slug]/call", () => { expect(mockStart).not.toHaveBeenCalled(); }); + it("Test 15b: free read workflow returns mapped output inline when execution completes within timeout (KEEP-265)", async () => { + setupDbSelectWorkflow(FREE_WORKFLOW); + setupDbInsertExecution("exec-sync-1"); + mockBuildCallCompletionResponse.mockResolvedValue({ + executionId: "exec-sync-1", + status: "success", + output: { balance: "1.3286 ETH" }, + }); + const { POST } = await import("@/app/api/mcp/workflows/[slug]/call/route"); + const request = makeRequest("test-workflow"); + const params = Promise.resolve({ slug: "test-workflow" }); + const response = await POST(request, { params }); + expect(response.status).toBe(200); + const body = await response.json(); + expect(body.executionId).toBe("exec-sync-1"); + expect(body.status).toBe("success"); + expect(body.output).toEqual({ balance: "1.3286 ETH" }); + // Workflow still kicked off in the background prior to the wait. + expect(mockStart).toHaveBeenCalled(); + }); + + it("Test 15c: free read workflow falls back to running on timeout (KEEP-265)", async () => { + setupDbSelectWorkflow(FREE_WORKFLOW); + setupDbInsertExecution("exec-timeout-1"); + mockBuildCallCompletionResponse.mockResolvedValue({ + executionId: "exec-timeout-1", + status: "running", + }); + const { POST } = await import("@/app/api/mcp/workflows/[slug]/call/route"); + const request = makeRequest("test-workflow"); + const params = Promise.resolve({ slug: "test-workflow" }); + const response = await POST(request, { params }); + expect(response.status).toBe(200); + const body = await response.json(); + expect(body.executionId).toBe("exec-timeout-1"); + expect(body.status).toBe("running"); + expect(body.output).toBeUndefined(); + }); + + it("Test 15d: free read workflow returns error status when execution fails within timeout (KEEP-265)", async () => { + setupDbSelectWorkflow(FREE_WORKFLOW); + setupDbInsertExecution("exec-err-1"); + mockBuildCallCompletionResponse.mockResolvedValue({ + executionId: "exec-err-1", + status: "error", + error: "RPC provider returned 500", + }); + const { POST } = await import("@/app/api/mcp/workflows/[slug]/call/route"); + const request = makeRequest("test-workflow"); + const params = Promise.resolve({ slug: "test-workflow" }); + const response = await POST(request, { params }); + expect(response.status).toBe(200); + const body = await response.json(); + expect(body.executionId).toBe("exec-err-1"); + expect(body.status).toBe("error"); + expect(body.error).toContain("RPC provider"); + }); + + it("Test 15e: paid read workflow returns mapped output inline on synchronous completion (KEEP-265)", async () => { + setupDbSelectWorkflow(LISTED_WORKFLOW); + setupDbInsertExecution("exec-paid-sync-1"); + makePassThroughGatePayment(); + mockBuildCallCompletionResponse.mockResolvedValue({ + executionId: "exec-paid-sync-1", + status: "success", + output: { riskScore: 2 }, + }); + const { POST } = await import("@/app/api/mcp/workflows/[slug]/call/route"); + const request = makeRequest("test-workflow", { + paymentSignature: "sig-sync", + }); + const params = Promise.resolve({ slug: "test-workflow" }); + const response = await POST(request, { params }); + expect(response.status).toBe(200); + const body = await response.json(); + expect(body.executionId).toBe("exec-paid-sync-1"); + expect(body.status).toBe("success"); + expect(body.output).toEqual({ riskScore: 2 }); + // Payment must still be recorded before completion wait returned a result. + expect(mockRecordPayment).toHaveBeenCalled(); + }); + it("Test 16: paid workflow probe with empty body returns 402 before body validation", async () => { const workflowWithRequiredField = { ...LISTED_WORKFLOW,