diff --git a/docs/public/SUMMARY.md b/docs/public/SUMMARY.md index 0502a1e7..b2885024 100644 --- a/docs/public/SUMMARY.md +++ b/docs/public/SUMMARY.md @@ -20,6 +20,7 @@ * [Alerts](user-guide/alerts.md) * [Templates](user-guide/templates.md) * [Shared Components](user-guide/shared-components.md) +* [AI Suggestions](user-guide/ai-suggestions.md) ## Operations diff --git a/docs/public/user-guide/ai-suggestions.md b/docs/public/user-guide/ai-suggestions.md new file mode 100644 index 00000000..e04241c5 --- /dev/null +++ b/docs/public/user-guide/ai-suggestions.md @@ -0,0 +1,64 @@ +# AI Suggestions + +VectorFlow includes optional AI-powered assistance for writing VRL code and generating pipeline configurations. When enabled, team members with Editor or Admin roles can use AI features in both the VRL editor and pipeline builder. + +## Setup + +Team admins can configure AI in **Settings → AI**. The configuration requires: + +| Field | Description | +|-------|-------------| +| **Provider** | OpenAI, Anthropic, or Custom (any OpenAI-compatible endpoint) | +| **Base URL** | API endpoint — pre-filled for known providers | +| **API Key** | Provider API key — encrypted at rest using AES-256 | +| **Model** | Model identifier (e.g. `gpt-4o`, `claude-sonnet-4-20250514`) | + +After saving, use **Test Connection** to verify the configuration works. + +{% hint style="info" %} +VectorFlow uses the OpenAI-compatible chat completions API format (`/chat/completions`). Most providers support this format natively or via a compatibility layer. For Anthropic, use an OpenAI-compatible proxy such as LiteLLM or OpenRouter. +{% endhint %} + +## VRL Assistant + +In the VRL editor (opened from any remap, filter, or route transform), click the **AI** button in the tools panel to reveal the AI input. + +1. Type a natural language description of what you want the VRL code to do +2. Click **Generate** — the AI streams VRL code in real time +3. When complete, choose: + - **Insert** — append the generated code after your existing code + - **Replace** — replace all existing code with the generated result + - **Regenerate** — try again with the same prompt + +The AI is aware of your upstream source types and available fields, so you can reference them naturally (e.g., "parse the syslog message and extract the hostname"). + +## Pipeline Builder + +In the pipeline editor toolbar, click the **sparkle icon** to open the AI Pipeline Builder dialog. + +### Generate mode + +Describe a pipeline in plain language: + +> "Collect Kubernetes logs from a file source, drop debug-level events, parse JSON, and send to Elasticsearch and S3" + +The AI generates a complete Vector YAML configuration. Click **Apply to Canvas** to add the generated components to your pipeline. If your canvas already has components, the new ones are positioned below the existing layout to avoid overlap. + +### Review mode + +Ask the AI to analyze your current pipeline configuration: + +> "Are there any performance issues with my pipeline?" + +The AI reviews the generated YAML and provides suggestions for improvements, best practices, and potential issues. + +## Rate Limits + +AI requests are rate-limited to 60 requests per hour per team to prevent excessive API usage. The limit resets on a rolling window. + +## Security + +- API keys are encrypted at rest using AES-256-GCM +- Keys are never exposed to the client — the settings page shows only whether a key is saved +- AI configuration changes are recorded in the audit log with the API key redacted +- Only team members with Editor or Admin roles can use AI features diff --git a/docs/public/user-guide/pipeline-editor.md b/docs/public/user-guide/pipeline-editor.md index 1dfe6fc5..552a333f 100644 --- a/docs/public/user-guide/pipeline-editor.md +++ b/docs/public/user-guide/pipeline-editor.md @@ -257,3 +257,19 @@ Click the pipeline name in the top-left corner of the editor to rename it inline {% hint style="info" %} On Windows and Linux, use `Ctrl` instead of `Cmd` for all keyboard shortcuts. {% endhint %} + +## AI-Powered Suggestions + +When AI is configured for your team (Settings → AI), two AI features become available: + +### VRL Assistant +In the VRL editor, click the **AI** button in the tools panel. Type a natural language description of what you want the VRL code to do, and the AI will generate VRL code. You can **Insert** (append) or **Replace** the current code. + +### Pipeline Builder +In the pipeline editor toolbar, click the **sparkle icon** to open the AI Pipeline Builder. Two modes are available: + +- **Generate**: Describe a pipeline in plain language (e.g., "Collect K8s logs, drop debug, send to Datadog"). The AI generates Vector YAML config that is applied directly to your canvas. +- **Review**: Ask the AI to review your current pipeline configuration for performance, correctness, and best practices. + +### Configuration +Team admins can configure AI in **Settings → AI**. VectorFlow supports any OpenAI-compatible API (OpenAI, Anthropic, Ollama, Groq, Together, etc.). diff --git a/prisma/migrations/20260310020000_add_ai_fields/migration.sql b/prisma/migrations/20260310020000_add_ai_fields/migration.sql new file mode 100644 index 00000000..fec51e28 --- /dev/null +++ b/prisma/migrations/20260310020000_add_ai_fields/migration.sql @@ -0,0 +1,6 @@ +-- AlterTable: Add AI configuration fields to Team +ALTER TABLE "Team" ADD COLUMN "aiProvider" TEXT; +ALTER TABLE "Team" ADD COLUMN "aiBaseUrl" TEXT; +ALTER TABLE "Team" ADD COLUMN "aiApiKey" TEXT; +ALTER TABLE "Team" ADD COLUMN "aiModel" TEXT; +ALTER TABLE "Team" ADD COLUMN "aiEnabled" BOOLEAN NOT NULL DEFAULT false; diff --git a/prisma/schema.prisma b/prisma/schema.prisma index 19eb7b7f..50e3076b 100644 --- a/prisma/schema.prisma +++ b/prisma/schema.prisma @@ -66,6 +66,14 @@ model Team { vrlSnippets VrlSnippet[] alertRules AlertRule[] availableTags Json? @default("[]") // string[] of admin-defined classification tags + + // AI-powered suggestions configuration + aiProvider String? // "openai" | "anthropic" | "custom" + aiBaseUrl String? // OpenAI-compatible API endpoint + aiApiKey String? // Encrypted via crypto.ts + aiModel String? // e.g. "gpt-4o", "claude-sonnet-4-20250514" + aiEnabled Boolean @default(false) + createdAt DateTime @default(now()) } diff --git a/src/app/(dashboard)/pipelines/[id]/page.tsx b/src/app/(dashboard)/pipelines/[id]/page.tsx index 45eba29f..1bebf069 100644 --- a/src/app/(dashboard)/pipelines/[id]/page.tsx +++ b/src/app/(dashboard)/pipelines/[id]/page.tsx @@ -30,12 +30,14 @@ import { import { ComponentPalette } from "@/components/flow/component-palette"; import { FlowCanvas } from "@/components/flow/flow-canvas"; import { FlowToolbar } from "@/components/flow/flow-toolbar"; +import { AiPipelineDialog } from "@/components/flow/ai-pipeline-dialog"; import { DetailPanel } from "@/components/flow/detail-panel"; import { DeployDialog } from "@/components/flow/deploy-dialog"; import { SaveTemplateDialog } from "@/components/flow/save-template-dialog"; import { ConfirmDialog } from "@/components/confirm-dialog"; import { PipelineMetricsChart } from "@/components/pipeline/metrics-chart"; import { PipelineLogs } from "@/components/pipeline/pipeline-logs"; +import { useTeamStore } from "@/stores/team-store"; function aggregateProcessStatus( statuses: Array<{ status: string }> @@ -130,6 +132,16 @@ function PipelineBuilderInner({ pipelineId }: { pipelineId: string }) { const [discardOpen, setDiscardOpen] = useState(false); const [metricsOpen, setMetricsOpen] = useState(false); const [logsOpen, setLogsOpen] = useState(false); + const [aiDialogOpen, setAiDialogOpen] = useState(false); + + const selectedTeamId = useTeamStore((s) => s.selectedTeamId); + const teamQuery = useQuery( + trpc.team.get.queryOptions( + { id: selectedTeamId! }, + { enabled: !!selectedTeamId }, + ), + ); + const aiEnabled = teamQuery.data?.aiEnabled ?? false; const loadGraph = useFlowStore((s) => s.loadGraph); const isDirty = useFlowStore((s) => s.isDirty); @@ -431,6 +443,8 @@ function PipelineBuilderInner({ pipelineId }: { pipelineId: string }) { } gitOpsMode={pipelineQuery.data?.gitOpsMode} onDiscardChanges={() => setDiscardOpen(true)} + aiEnabled={aiEnabled} + onAiOpen={() => setAiDialogOpen(true)} />
@@ -522,6 +536,13 @@ function PipelineBuilderInner({ pipelineId }: { pipelineId: string }) { + {aiEnabled && ( + + )}
); } diff --git a/src/app/(dashboard)/settings/_components/ai-settings.tsx b/src/app/(dashboard)/settings/_components/ai-settings.tsx new file mode 100644 index 00000000..4230e90e --- /dev/null +++ b/src/app/(dashboard)/settings/_components/ai-settings.tsx @@ -0,0 +1,269 @@ +"use client"; + +import { useState } from "react"; +import { useQuery, useMutation, useQueryClient } from "@tanstack/react-query"; +import { useTRPC } from "@/trpc/client"; +import { useTeamStore } from "@/stores/team-store"; +import { toast } from "sonner"; +import { Sparkles, Loader2, CheckCircle, XCircle } from "lucide-react"; + +import { Button } from "@/components/ui/button"; +import { Input } from "@/components/ui/input"; +import { Label } from "@/components/ui/label"; +import { + Card, + CardContent, + CardDescription, + CardHeader, + CardTitle, +} from "@/components/ui/card"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select"; +import { Switch } from "@/components/ui/switch"; +import { Skeleton } from "@/components/ui/skeleton"; +import { Badge } from "@/components/ui/badge"; + +const PROVIDER_DEFAULTS: Record = { + openai: { baseUrl: "https://api.openai.com/v1", placeholder: "gpt-4o" }, + anthropic: { baseUrl: "https://api.anthropic.com/v1", placeholder: "claude-sonnet-4-20250514" }, + custom: { baseUrl: "", placeholder: "model-name" }, +}; + +interface AiConfig { + aiEnabled: boolean; + aiProvider: string | null; + aiBaseUrl: string | null; + aiModel: string | null; + hasApiKey: boolean; +} + +function AiSettingsForm({ config, teamId }: { config: AiConfig; teamId: string }) { + const trpc = useTRPC(); + const queryClient = useQueryClient(); + + const [provider, setProvider] = useState(config.aiProvider ?? "openai"); + const [baseUrl, setBaseUrl] = useState(config.aiBaseUrl ?? ""); + const [apiKey, setApiKey] = useState(""); + const [model, setModel] = useState(config.aiModel ?? ""); + const [enabled, setEnabled] = useState(config.aiEnabled); + const [testResult, setTestResult] = useState<{ ok: boolean; error?: string } | null>(null); + + const updateMutation = useMutation( + trpc.team.updateAiConfig.mutationOptions({ + onSuccess: () => { + queryClient.invalidateQueries({ queryKey: trpc.team.getAiConfig.queryKey() }); + queryClient.invalidateQueries({ queryKey: trpc.team.get.queryKey() }); + toast.success("AI configuration saved"); + }, + onError: (error) => { + toast.error(error.message || "Failed to save AI config"); + }, + }), + ); + + const testMutation = useMutation( + trpc.team.testAiConnection.mutationOptions({ + onSuccess: (result) => { + setTestResult(result); + if (result.ok) { + toast.success("AI connection successful!"); + } else { + toast.error("Connection failed", { description: result.error }); + } + }, + onError: (error) => { + setTestResult({ ok: false, error: error.message }); + toast.error("Connection test failed", { description: error.message }); + }, + }), + ); + + const handleSave = () => { + const data: Record = { + teamId, + aiEnabled: enabled, + aiProvider: provider as "openai" | "anthropic" | "custom", + aiBaseUrl: baseUrl || null, + aiModel: model || null, + }; + if (apiKey) { + data.aiApiKey = apiKey; + } + updateMutation.mutate(data as Parameters[0]); + }; + + const handleTest = () => { + setTestResult(null); + testMutation.mutate({ teamId }); + }; + + const handleProviderChange = (value: string) => { + setProvider(value); + const defaults = PROVIDER_DEFAULTS[value]; + if (defaults) { + setBaseUrl(defaults.baseUrl); + } + setTestResult(null); + }; + + return ( +
+ + + + + AI-Powered Suggestions + + + Configure an OpenAI-compatible AI provider for VRL code assistance and + pipeline generation. Credentials are encrypted at rest and scoped to this team. + + + + {/* Enable/Disable Toggle */} +
+
+ +

+ When enabled, team members with Editor+ role can use AI assistance in the + VRL editor and pipeline builder. +

+
+ +
+ + {/* Provider Selection */} +
+ + +
+ + {/* Base URL */} +
+ + setBaseUrl(e.target.value)} + placeholder={PROVIDER_DEFAULTS[provider]?.baseUrl || "https://api.example.com/v1"} + /> +

+ OpenAI-compatible API endpoint. Pre-filled for known providers. +

+
+ + {/* API Key */} +
+ + setApiKey(e.target.value)} + placeholder={config.hasApiKey ? "••••••••••• (saved)" : "sk-..."} + /> +

+ {config.hasApiKey + ? "A key is already saved. Enter a new value to replace it." + : "Encrypted at rest using AES-256."} +

+
+ + {/* Model */} +
+ + setModel(e.target.value)} + placeholder={PROVIDER_DEFAULTS[provider]?.placeholder || "model-name"} + /> +
+ + {/* Actions */} +
+ + + {testResult && ( + + {testResult.ok ? ( + <> Connected + ) : ( + <> Failed + )} + + )} +
+
+
+
+ ); +} + +export function AiSettings() { + const trpc = useTRPC(); + const selectedTeamId = useTeamStore((s) => s.selectedTeamId); + + const configQuery = useQuery( + trpc.team.getAiConfig.queryOptions( + { teamId: selectedTeamId! }, + { enabled: !!selectedTeamId }, + ), + ); + + if (configQuery.isLoading || !configQuery.data) { + return ( +
+ + +
+ ); + } + + return ; +} diff --git a/src/app/(dashboard)/settings/ai/page.tsx b/src/app/(dashboard)/settings/ai/page.tsx new file mode 100644 index 00000000..23ebda40 --- /dev/null +++ b/src/app/(dashboard)/settings/ai/page.tsx @@ -0,0 +1,7 @@ +"use client"; + +import { AiSettings } from "../_components/ai-settings"; + +export default function AiPage() { + return ; +} diff --git a/src/app/api/ai/pipeline/route.ts b/src/app/api/ai/pipeline/route.ts new file mode 100644 index 00000000..bd4ed1da --- /dev/null +++ b/src/app/api/ai/pipeline/route.ts @@ -0,0 +1,112 @@ +export const runtime = "nodejs"; + +import { auth } from "@/auth"; +import { prisma } from "@/lib/prisma"; +import { streamCompletion } from "@/server/services/ai"; +import { buildPipelineSystemPrompt } from "@/lib/ai/prompts"; + +export async function POST(request: Request) { + const session = await auth(); + if (!session?.user?.id) { + return new Response(JSON.stringify({ error: "Unauthorized" }), { + status: 401, + headers: { "Content-Type": "application/json" }, + }); + } + + let body: { + teamId: string; + prompt: string; + mode: "generate" | "review"; + currentYaml?: string; + environmentName?: string; + }; + + try { + body = await request.json(); + } catch { + return new Response(JSON.stringify({ error: "Invalid JSON" }), { + status: 400, + headers: { "Content-Type": "application/json" }, + }); + } + + if (!body.teamId || !body.prompt || !body.mode) { + return new Response(JSON.stringify({ error: "teamId, prompt, and mode are required" }), { + status: 400, + headers: { "Content-Type": "application/json" }, + }); + } + + if (body.mode !== "generate" && body.mode !== "review") { + return new Response(JSON.stringify({ error: "mode must be 'generate' or 'review'" }), { + status: 400, + headers: { "Content-Type": "application/json" }, + }); + } + + // Verify user is at least EDITOR on this team + const membership = await prisma.teamMember.findUnique({ + where: { userId_teamId: { userId: session.user.id, teamId: body.teamId } }, + }); + const user = await prisma.user.findUnique({ + where: { id: session.user.id }, + select: { isSuperAdmin: true }, + }); + + if (!membership && !user?.isSuperAdmin) { + return new Response(JSON.stringify({ error: "Forbidden" }), { + status: 403, + headers: { "Content-Type": "application/json" }, + }); + } + if (membership && membership.role === "VIEWER" && !user?.isSuperAdmin) { + return new Response(JSON.stringify({ error: "EDITOR role required" }), { + status: 403, + headers: { "Content-Type": "application/json" }, + }); + } + + const mode = body.mode; + + const systemPrompt = buildPipelineSystemPrompt({ + mode, + currentYaml: body.currentYaml, + environmentName: body.environmentName, + }); + + const encoder = new TextEncoder(); + + const stream = new ReadableStream({ + async start(controller) { + try { + await streamCompletion({ + teamId: body.teamId, + systemPrompt, + userPrompt: body.prompt, + onToken: (token) => { + const data = JSON.stringify({ token }); + controller.enqueue(encoder.encode(`data: ${data}\n\n`)); + }, + signal: request.signal, + }); + controller.enqueue(encoder.encode(`data: ${JSON.stringify({ done: true })}\n\n`)); + } catch (err) { + const message = err instanceof Error ? err.message : "AI request failed"; + controller.enqueue( + encoder.encode(`data: ${JSON.stringify({ error: message })}\n\n`) + ); + } finally { + controller.close(); + } + }, + }); + + return new Response(stream, { + headers: { + "Content-Type": "text/event-stream", + "Cache-Control": "no-cache", + Connection: "keep-alive", + }, + }); +} diff --git a/src/app/api/ai/vrl/route.ts b/src/app/api/ai/vrl/route.ts new file mode 100644 index 00000000..cc8b348c --- /dev/null +++ b/src/app/api/ai/vrl/route.ts @@ -0,0 +1,105 @@ +export const runtime = "nodejs"; + +import { auth } from "@/auth"; +import { prisma } from "@/lib/prisma"; +import { streamCompletion } from "@/server/services/ai"; +import { buildVrlSystemPrompt } from "@/lib/ai/prompts"; + +export async function POST(request: Request) { + const session = await auth(); + if (!session?.user?.id) { + return new Response(JSON.stringify({ error: "Unauthorized" }), { + status: 401, + headers: { "Content-Type": "application/json" }, + }); + } + + let body: { + teamId: string; + prompt: string; + currentCode?: string; + fields?: { name: string; type: string }[]; + componentType?: string; + sourceTypes?: string[]; + }; + + try { + body = await request.json(); + } catch { + return new Response(JSON.stringify({ error: "Invalid JSON" }), { + status: 400, + headers: { "Content-Type": "application/json" }, + }); + } + + if (!body.teamId || !body.prompt) { + return new Response(JSON.stringify({ error: "teamId and prompt are required" }), { + status: 400, + headers: { "Content-Type": "application/json" }, + }); + } + + // Verify user is at least EDITOR on this team + const membership = await prisma.teamMember.findUnique({ + where: { userId_teamId: { userId: session.user.id, teamId: body.teamId } }, + }); + const user = await prisma.user.findUnique({ + where: { id: session.user.id }, + select: { isSuperAdmin: true }, + }); + + if (!membership && !user?.isSuperAdmin) { + return new Response(JSON.stringify({ error: "Forbidden" }), { + status: 403, + headers: { "Content-Type": "application/json" }, + }); + } + if (membership && membership.role === "VIEWER" && !user?.isSuperAdmin) { + return new Response(JSON.stringify({ error: "EDITOR role required" }), { + status: 403, + headers: { "Content-Type": "application/json" }, + }); + } + + const systemPrompt = buildVrlSystemPrompt({ + fields: body.fields, + currentCode: body.currentCode, + componentType: body.componentType, + sourceTypes: body.sourceTypes, + }); + + const encoder = new TextEncoder(); + + const stream = new ReadableStream({ + async start(controller) { + try { + await streamCompletion({ + teamId: body.teamId, + systemPrompt, + userPrompt: body.prompt, + onToken: (token) => { + const data = JSON.stringify({ token }); + controller.enqueue(encoder.encode(`data: ${data}\n\n`)); + }, + signal: request.signal, + }); + controller.enqueue(encoder.encode(`data: ${JSON.stringify({ done: true })}\n\n`)); + } catch (err) { + const message = err instanceof Error ? err.message : "AI request failed"; + controller.enqueue( + encoder.encode(`data: ${JSON.stringify({ error: message })}\n\n`) + ); + } finally { + controller.close(); + } + }, + }); + + return new Response(stream, { + headers: { + "Content-Type": "text/event-stream", + "Cache-Control": "no-cache", + Connection: "keep-alive", + }, + }); +} diff --git a/src/components/flow/ai-pipeline-dialog.tsx b/src/components/flow/ai-pipeline-dialog.tsx new file mode 100644 index 00000000..ad9a1214 --- /dev/null +++ b/src/components/flow/ai-pipeline-dialog.tsx @@ -0,0 +1,271 @@ +// src/components/flow/ai-pipeline-dialog.tsx +"use client"; + +import { useState, useRef, useCallback } from "react"; +import { Loader2, RotateCcw, Sparkles, AlertTriangle } from "lucide-react"; +import { Button } from "@/components/ui/button"; +import { Input } from "@/components/ui/input"; +import { Label } from "@/components/ui/label"; +import { + Dialog, + DialogContent, + DialogDescription, + DialogHeader, + DialogTitle, +} from "@/components/ui/dialog"; +import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs"; +import { useTeamStore } from "@/stores/team-store"; +import { useFlowStore } from "@/stores/flow-store"; +import { generateVectorYaml, importVectorConfig } from "@/lib/config-generator"; +import { toast } from "sonner"; + +interface AiPipelineDialogProps { + open: boolean; + onOpenChange: (open: boolean) => void; + environmentName?: string; +} + +export function AiPipelineDialog({ + open, + onOpenChange, + environmentName, +}: AiPipelineDialogProps) { + const [prompt, setPrompt] = useState(""); + const [result, setResult] = useState(""); + const [isStreaming, setIsStreaming] = useState(false); + const [error, setError] = useState(null); + const [mode, setMode] = useState<"generate" | "review">("generate"); + const abortRef = useRef(null); + const selectedTeamId = useTeamStore((s) => s.selectedTeamId); + const nodes = useFlowStore((s) => s.nodes); + const edges = useFlowStore((s) => s.edges); + const globalConfig = useFlowStore((s) => s.globalConfig); + const loadGraph = useFlowStore((s) => s.loadGraph); + + const currentYaml = nodes.length > 0 + ? generateVectorYaml(nodes, edges, globalConfig) + : undefined; + + const handleSubmit = useCallback( + async (e?: React.FormEvent) => { + e?.preventDefault(); + if (!prompt.trim() || !selectedTeamId || isStreaming) return; + + setIsStreaming(true); + setResult(""); + setError(null); + + abortRef.current = new AbortController(); + + try { + const response = await fetch("/api/ai/pipeline", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ + teamId: selectedTeamId, + prompt: prompt.trim(), + mode, + currentYaml: mode === "review" ? currentYaml : undefined, + environmentName, + }), + signal: abortRef.current.signal, + }); + + if (!response.ok) { + const errData = await response.json().catch(() => ({ error: "Request failed" })); + throw new Error(errData.error || `HTTP ${response.status}`); + } + + const reader = response.body?.getReader(); + if (!reader) throw new Error("No response stream"); + + const decoder = new TextDecoder(); + let buffer = ""; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split("\n"); + buffer = lines.pop() ?? ""; + + for (const line of lines) { + const trimmed = line.trim(); + if (!trimmed || !trimmed.startsWith("data: ")) continue; + + try { + const data = JSON.parse(trimmed.slice(6)); + if (data.done) break; + if (data.error) throw new Error(data.error); + if (data.token) { + setResult((prev) => prev + data.token); + } + } catch (parseErr) { + if (parseErr instanceof Error && parseErr.message !== "Unexpected end of JSON input") { + throw parseErr; + } + } + } + } + } catch (err) { + if (err instanceof Error && err.name === "AbortError") return; + setError(err instanceof Error ? err.message : "AI request failed"); + } finally { + setIsStreaming(false); + abortRef.current = null; + } + }, + [prompt, selectedTeamId, currentYaml, mode, environmentName, isStreaming], + ); + + const handleApplyToCanvas = () => { + try { + // Strip any markdown fencing the LLM might have added + let yaml = result.trim(); + if (yaml.startsWith("```yaml")) yaml = yaml.slice(7); + if (yaml.startsWith("```")) yaml = yaml.slice(3); + if (yaml.endsWith("```")) yaml = yaml.slice(0, -3); + yaml = yaml.trim(); + + const { nodes: newNodes, edges: newEdges, globalConfig: importedGlobalConfig } = + importVectorConfig(yaml); + + if (nodes.length === 0) { + // Empty pipeline: replace + loadGraph(newNodes, newEdges, importedGlobalConfig); + } else { + // Existing pipeline: add alongside (offset positions to avoid overlap) + const maxY = Math.max(...nodes.map((n) => n.position.y), 0); + const offsetNodes = newNodes.map((n) => ({ + ...n, + position: { x: n.position.x, y: n.position.y + maxY + 200 }, + })); + const mergedConfig = importedGlobalConfig + ? { ...importedGlobalConfig, ...globalConfig } + : globalConfig; + loadGraph([...nodes, ...offsetNodes], [...edges, ...newEdges], mergedConfig); + } + + toast.success(`Applied ${newNodes.length} components to canvas`); + onOpenChange(false); + setResult(""); + setPrompt(""); + } catch (err) { + toast.error("Failed to parse YAML", { + description: err instanceof Error ? err.message : "Invalid YAML output", + }); + } + }; + + const handleCancel = () => { + abortRef.current?.abort(); + }; + + return ( + + + + + + AI Pipeline Builder + + + Describe what you want to build, or ask for a review of your current pipeline. + + + + setMode(v as "generate" | "review")}> + + Generate + + Review + + + + +
+ +
+ setPrompt(e.target.value)} + placeholder="Collect K8s logs, drop debug, send to Datadog and S3" + disabled={isStreaming} + /> + {isStreaming ? ( + + ) : ( + + )} +
+
+
+ + +
+ +
+ setPrompt(e.target.value)} + placeholder="Is my pipeline config optimal? Any issues?" + disabled={isStreaming} + /> + {isStreaming ? ( + + ) : ( + + )} +
+
+
+
+ + {error && ( +
+ + {error} +
+ )} + + {(result || isStreaming) && ( +
+ +
+ {result || ( + + + {mode === "generate" ? "Generating pipeline..." : "Reviewing pipeline..."} + + )} +
+ {!isStreaming && result && ( +
+ {mode === "generate" && ( + + )} + +
+ )} +
+ )} +
+
+ ); +} diff --git a/src/components/flow/flow-toolbar.tsx b/src/components/flow/flow-toolbar.tsx index ddf71228..ab0ce46f 100644 --- a/src/components/flow/flow-toolbar.tsx +++ b/src/components/flow/flow-toolbar.tsx @@ -21,6 +21,7 @@ import { Info, Clock, X, + Sparkles, } from "lucide-react"; import { toast } from "sonner"; import { Button } from "@/components/ui/button"; @@ -72,6 +73,8 @@ interface FlowToolbarProps { processStatus?: ProcessStatusValue | null; gitOpsMode?: string; onDiscardChanges?: () => void; + aiEnabled?: boolean; + onAiOpen?: () => void; } function downloadFile(content: string, filename: string) { @@ -103,6 +106,8 @@ export function FlowToolbar({ processStatus, gitOpsMode, onDiscardChanges, + aiEnabled, + onAiOpen, }: FlowToolbarProps) { const globalConfig = useFlowStore((s) => s.globalConfig); const canUndo = useFlowStore((s) => s.canUndo); @@ -330,6 +335,23 @@ export function FlowToolbar({ Save as template + {aiEnabled && ( + + + + + AI pipeline builder + + )} + {pipelineId && ( <> diff --git a/src/components/settings-sidebar-nav.tsx b/src/components/settings-sidebar-nav.tsx index 0b012812..7de46153 100644 --- a/src/components/settings-sidebar-nav.tsx +++ b/src/components/settings-sidebar-nav.tsx @@ -9,6 +9,7 @@ import { HardDrive, KeyRound, Bot, + Sparkles, } from "lucide-react"; export const settingsNavGroups = [ @@ -33,6 +34,7 @@ export const settingsNavGroups = [ { title: "Teams", href: "/settings/teams", icon: Building2, requiredSuperAdmin: true }, { title: "Team Settings", href: "/settings/team", icon: Users, requiredSuperAdmin: false }, { title: "Service Accounts", href: "/settings/service-accounts", icon: Bot, requiredSuperAdmin: false }, + { title: "AI", href: "/settings/ai", icon: Sparkles, requiredSuperAdmin: false }, ], }, { diff --git a/src/components/vrl-editor/ai-input.tsx b/src/components/vrl-editor/ai-input.tsx new file mode 100644 index 00000000..f32d7feb --- /dev/null +++ b/src/components/vrl-editor/ai-input.tsx @@ -0,0 +1,186 @@ +// src/components/vrl-editor/ai-input.tsx +"use client"; + +import { useState, useRef, useCallback } from "react"; +import { Loader2, RotateCcw, Plus, Replace } from "lucide-react"; +import { Button } from "@/components/ui/button"; +import { Input } from "@/components/ui/input"; +import { useTeamStore } from "@/stores/team-store"; + +interface AiInputProps { + currentCode: string; + fields?: { name: string; type: string }[]; + componentType?: string; + sourceTypes?: string[]; + onInsert: (code: string) => void; + onReplace: (code: string) => void; +} + +export function AiInput({ + currentCode, + fields, + componentType, + sourceTypes, + onInsert, + onReplace, +}: AiInputProps) { + const [prompt, setPrompt] = useState(""); + const [result, setResult] = useState(""); + const [isStreaming, setIsStreaming] = useState(false); + const [error, setError] = useState(null); + const abortRef = useRef(null); + const selectedTeamId = useTeamStore((s) => s.selectedTeamId); + + const handleSubmit = useCallback( + async (e?: React.FormEvent) => { + e?.preventDefault(); + if (!prompt.trim() || !selectedTeamId || isStreaming) return; + + setIsStreaming(true); + setResult(""); + setError(null); + + abortRef.current = new AbortController(); + + try { + const response = await fetch("/api/ai/vrl", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ + teamId: selectedTeamId, + prompt: prompt.trim(), + currentCode, + fields, + componentType, + sourceTypes, + }), + signal: abortRef.current.signal, + }); + + if (!response.ok) { + const errData = await response.json().catch(() => ({ error: "Request failed" })); + throw new Error(errData.error || `HTTP ${response.status}`); + } + + const reader = response.body?.getReader(); + if (!reader) throw new Error("No response stream"); + + const decoder = new TextDecoder(); + let buffer = ""; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split("\n"); + buffer = lines.pop() ?? ""; + + for (const line of lines) { + const trimmed = line.trim(); + if (!trimmed || !trimmed.startsWith("data: ")) continue; + + try { + const data = JSON.parse(trimmed.slice(6)); + if (data.done) break; + if (data.error) throw new Error(data.error); + if (data.token) { + setResult((prev) => prev + data.token); + } + } catch (parseErr) { + if (parseErr instanceof Error && parseErr.message !== "Unexpected end of JSON input") { + throw parseErr; + } + } + } + } + } catch (err) { + if (err instanceof Error && err.name === "AbortError") return; + setError(err instanceof Error ? err.message : "AI request failed"); + } finally { + setIsStreaming(false); + abortRef.current = null; + } + }, + [prompt, selectedTeamId, currentCode, fields, componentType, sourceTypes, isStreaming], + ); + + const handleCancel = () => { + abortRef.current?.abort(); + }; + + const handleRegenerate = () => { + setResult(""); + handleSubmit(); + }; + + return ( +
+
+ setPrompt(e.target.value)} + placeholder="Describe what you want the VRL to do..." + disabled={isStreaming} + className="text-sm" + /> + {isStreaming ? ( + + ) : ( + + )} +
+ + {error && ( +
+ {error} +
+ )} + + {(result || isStreaming) && ( +
+
+ {result || ( + + + Generating... + + )} +
+ {!isStreaming && result && ( +
+ + + +
+ )} +
+ )} +
+ ); +} diff --git a/src/components/vrl-editor/vrl-editor.tsx b/src/components/vrl-editor/vrl-editor.tsx index 371b37a1..c8e9d78e 100644 --- a/src/components/vrl-editor/vrl-editor.tsx +++ b/src/components/vrl-editor/vrl-editor.tsx @@ -4,7 +4,7 @@ import { useState, useCallback, useRef, useMemo, useEffect } from "react"; import dynamic from "next/dynamic"; import { useTRPC } from "@/trpc/client"; import { useMutation, useQuery } from "@tanstack/react-query"; -import { BookOpen, Code, ChevronLeft, ChevronRight, Columns3, Download, Loader2 } from "lucide-react"; +import { BookOpen, Code, ChevronLeft, ChevronRight, Columns3, Download, Loader2, Sparkles } from "lucide-react"; import { Button } from "@/components/ui/button"; import { Badge } from "@/components/ui/badge"; import { Skeleton } from "@/components/ui/skeleton"; @@ -26,6 +26,8 @@ import { vrlTheme } from "./vrl-theme"; import { VRL_SNIPPETS } from "@/lib/vrl/snippets"; import { VrlSnippetDrawer } from "@/components/flow/vrl-snippet-drawer"; import { VrlFieldsPanel } from "./vrl-fields-panel"; +import { AiInput } from "./ai-input"; +import { useTeamStore } from "@/stores/team-store"; import { getMergedOutputSchemas, getSourceOutputSchema } from "@/lib/vector/source-output-schemas"; import type { Monaco, OnMount } from "@monaco-editor/react"; @@ -64,7 +66,7 @@ export function VrlEditor({ value, onChange, sourceTypes, pipelineId, upstreamSo const [sampleInput, setSampleInput] = useState(""); const [testOutput, setTestOutput] = useState(null); const [testError, setTestError] = useState(null); - const [toolsPanel, setToolsPanel] = useState<"fields" | "snippets" | null>(null); + const [toolsPanel, setToolsPanel] = useState<"fields" | "snippets" | "ai" | null>(null); const [expanded, setExpanded] = useState(false); const editorRef = useRef(null); const monacoRef = useRef(null); @@ -77,6 +79,15 @@ export function VrlEditor({ value, onChange, sourceTypes, pipelineId, upstreamSo const [sampleIndex, setSampleIndex] = useState(0); const [liveSchemaFields, setLiveSchemaFields] = useState>([]); + const selectedTeamId = useTeamStore((s) => s.selectedTeamId); + const teamQuery = useQuery( + trpc.team.get.queryOptions( + { id: selectedTeamId! }, + { enabled: !!selectedTeamId }, + ), + ); + const aiEnabled = teamQuery.data?.aiEnabled ?? false; + const isRawTextSource = useMemo(() => { if (!sourceTypes || sourceTypes.length === 0) return false; return sourceTypes.some((t) => { @@ -92,6 +103,19 @@ export function VrlEditor({ value, onChange, sourceTypes, pipelineId, upstreamSo [sourceTypes], ); + const mergedFieldsForAi = useMemo(() => { + const staticFields = getMergedOutputSchemas(sourceTypes ?? []); + const liveByPath = new Map(liveSchemaFields.map((f) => [f.path, f])); + const all = staticFields.map((f) => { + liveByPath.delete(f.path); + return { name: f.path.replace(/^\./, ""), type: f.type }; + }); + for (const [, f] of liveByPath) { + all.push({ name: f.path.replace(/^\./, ""), type: f.type }); + } + return all; + }, [sourceTypes, liveSchemaFields]); + const testMutation = useMutation( trpc.vrl.test.mutationOptions({ onSuccess: (data) => { @@ -368,6 +392,16 @@ export function VrlEditor({ value, onChange, sourceTypes, pipelineId, upstreamSo Snippets + {aiEnabled && ( + + )} {pipelineId && upstreamSourceKeys && upstreamSourceKeys.length > 0 && ( <>