diff --git a/apps/web/src/app/page.tsx b/apps/web/src/app/page.tsx index 9e327be..2618a07 100644 --- a/apps/web/src/app/page.tsx +++ b/apps/web/src/app/page.tsx @@ -21,6 +21,7 @@ function getGraphData(): GraphData { serviceFlow: EMPTY_VIEW, dataFlow: EMPTY_VIEW, functionFlow: EMPTY_VIEW, + containerDiagram: EMPTY_VIEW, }, }; } diff --git a/apps/web/src/components/GraphTabs.tsx b/apps/web/src/components/GraphTabs.tsx index d8e860f..a9dfcbb 100644 --- a/apps/web/src/components/GraphTabs.tsx +++ b/apps/web/src/components/GraphTabs.tsx @@ -1,11 +1,12 @@ "use client"; -export type ViewTab = "serviceFlow" | "dataFlow" | "functionFlow"; +export type ViewTab = "serviceFlow" | "dataFlow" | "functionFlow" | "containerDiagram"; const TABS: { id: ViewTab; label: string; description: string }[] = [ { id: "serviceFlow", label: "Service Flow", description: "Service dependencies" }, { id: "dataFlow", label: "Data Flow", description: "DTO contracts" }, { id: "functionFlow", label: "Function Flow", description: "Intra-service method call graph" }, + { id: "containerDiagram", label: "Container", description: "C4-style container diagram with infrastructure" }, ]; interface Props { diff --git a/apps/web/src/components/GraphView.tsx b/apps/web/src/components/GraphView.tsx index bb6e030..babb23a 100644 --- a/apps/web/src/components/GraphView.tsx +++ b/apps/web/src/components/GraphView.tsx @@ -1,6 +1,6 @@ "use client"; -import { useEffect, useCallback } from "react"; +import { useEffect, useCallback, useMemo } from "react"; import { ReactFlow, Background, @@ -20,15 +20,132 @@ import { buildKindMap } from "@/lib/classify"; import ServiceNode from "./nodes/ServiceNode"; import DataTypeNode from "./nodes/DataTypeNode"; import FunctionNode from "./nodes/FunctionNode"; +import DatabaseNode from "./nodes/DatabaseNode"; +import QueueNode from "./nodes/QueueNode"; +import CacheNode from "./nodes/CacheNode"; +import ExternalNode from "./nodes/ExternalNode"; import ServiceSearch from "./ServiceSearch"; -import { applyDagreLayout } from "@/lib/layout"; +import { getLayoutedElements } from "@/lib/layout"; const nodeTypes = { serviceNode: ServiceNode, dataTypeNode: DataTypeNode, functionNode: FunctionNode, + databaseNode: DatabaseNode, + queueNode: QueueNode, + cacheNode: CacheNode, + externalNode: ExternalNode, }; +// ─── Edge routing helpers ───────────────────────────────────────────────────── + +/** + * Returns the absolute canvas center of a node. For child nodes (parentId set) + * the parent top-left is added so all positions share the same coordinate space. + */ +function getAbsoluteCenter( + nodeId: string, + nodeMap: Map +): { x: number; y: number } { + const n = nodeMap.get(nodeId); + if (!n) return { x: 0, y: 0 }; + const w = n.measured?.width ?? 260; + const h = n.measured?.height ?? 160; + let x = (n.position?.x ?? 0) + w / 2; + let y = (n.position?.y ?? 0) + h / 2; + if (n.parentId) { + const parent = nodeMap.get(n.parentId); + if (parent) { + x += parent.position?.x ?? 0; + y += parent.position?.y ?? 0; + } + } + return { x, y }; +} + +/** + * Assigns sourceHandle / targetHandle and enforces smoothstep routing so edges + * travel through the channels between nodes rather than crossing node boxes. + * + * A per-node per-handle occupancy counter distributes multiple edges that leave + * the same node on the same side to different dock points instead of stacking. + * + * Priority order for each direction: + * going right → right, bottom, top, left + * going left → left, bottom, top, right + * going down → bottom, right, left, top + * going up → top, right, left, bottom + */ +function routeEdges(edges: any[], nodeMap: Map): any[] { + const usage = new Map(); // `${nodeId}:${handleId}` → count + + function inc(nodeId: string, handleId: string) { + const key = `${nodeId}:${handleId}`; + usage.set(key, (usage.get(key) ?? 0) + 1); + } + + function pickHandle(nodeId: string, prefs: string[]): string { + let best = prefs[0]; + let bestCount = usage.get(`${nodeId}:${best}`) ?? 0; + for (let i = 1; i < prefs.length; i++) { + const c = usage.get(`${nodeId}:${prefs[i]}`) ?? 0; + if (c < bestCount) { bestCount = c; best = prefs[i]; } + } + return best; + } + + return edges.map((e) => { + const src = getAbsoluteCenter(e.source, nodeMap); + const tgt = getAbsoluteCenter(e.target, nodeMap); + const dx = tgt.x - src.x; + const dy = tgt.y - src.y; + + let srcPrefs: string[]; + let tgtPrefs: string[]; + + if (Math.abs(dx) >= Math.abs(dy)) { + if (dx >= 0) { + srcPrefs = ["source-right", "source-bottom", "source-top", "source-left"]; + tgtPrefs = ["target-left", "target-bottom", "target-top", "target-right"]; + } else { + srcPrefs = ["source-left", "source-bottom", "source-top", "source-right"]; + tgtPrefs = ["target-right", "target-bottom", "target-top", "target-left"]; + } + } else { + if (dy >= 0) { + srcPrefs = ["source-bottom", "source-right", "source-left", "source-top"]; + tgtPrefs = ["target-top", "target-right", "target-left", "target-bottom"]; + } else { + srcPrefs = ["source-top", "source-right", "source-left", "source-bottom"]; + tgtPrefs = ["target-bottom", "target-right", "target-left", "target-top"]; + } + } + + const sourceHandle = pickHandle(e.source, srcPrefs); + const targetHandle = pickHandle(e.target, tgtPrefs); + inc(e.source, sourceHandle); + inc(e.target, targetHandle); + + // markerEnd: accept both string "arrow" (from graph-builder) and already- + // converted { type: "arrow" } objects (when re-routing after auto-layout). + const markerEnd = e.markerEnd + ? typeof e.markerEnd === "string" ? { type: e.markerEnd } : e.markerEnd + : undefined; + + return { + ...e, + // smoothstep produces orthogonal (right-angle) paths that travel through + // the channels between nodes instead of cutting diagonally across them. + type: e.type ?? "smoothstep", + sourceHandle, + targetHandle, +...(markerEnd ? { markerEnd } : {}), + }; + }); +} + +// ─── Canvas component ───────────────────────────────────────────────────────── + interface Props { view: GraphViewData; viewType: ViewTab; @@ -49,7 +166,7 @@ function GraphCanvas({ onSelectedServiceChange, onDrillIn, }: Omit) { - // Compute filtered nodes/edges for functionFlow + // ── 1. Filter nodes and edges ───────────────────────────────────────────── const serviceFilteredNodes = viewType === "functionFlow" && selectedServiceId ? view.nodes.filter((n) => (n.data as any).serviceId === selectedServiceId) @@ -64,7 +181,6 @@ function GraphCanvas({ ) : view.edges; - // Filter orphan nodes in functionFlow const filteredNodes = viewType === "functionFlow" ? (() => { const connectedIds = new Set(filteredEdges.flatMap((e) => [e.source, e.target])); @@ -72,7 +188,7 @@ function GraphCanvas({ })() : serviceFilteredNodes; - // Enrich function nodes with kind metadata + // ── 2. Enrich and assign z-index ────────────────────────────────────────── const enrichedNodes = viewType === "functionFlow" ? (() => { const kindMap = buildKindMap(filteredNodes, allServices); @@ -83,20 +199,49 @@ function GraphCanvas({ })() : filteredNodes; - const [nodes, setNodes, onNodesChange] = useNodesState(enrichedNodes as any); - const [edges, setEdges, onEdgesChange] = useEdgesState(filteredEdges as any); + // ── 3. Dagre layout (memoised — re-runs only when view data changes) ─────── + // Applying layout before routing means handle selection is based on the + // final node positions, so edge dock assignments are accurate. + // On first render node.measured is undefined; dagre uses the hardcoded + // defaults (260×160). The Auto Layout button re-runs with real dimensions. + const laidNodes = useMemo(() => { + const { nodes } = getLayoutedElements(enrichedNodes as any, filteredEdges as any); + return nodes; + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [view, viewType, selectedServiceId]); + + // ── 4. Route edges using laid-out positions ─────────────────────────────── + const rfEdges = useMemo(() => { + const map = new Map((laidNodes as any[]).map((n: any) => [n.id, n])); + return routeEdges(filteredEdges, map); + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [laidNodes]); + + // ── 5. React Flow state ─────────────────────────────────────────────────── + const [nodes, setNodes, onNodesChange] = useNodesState(laidNodes as any); + const [edges, setEdges, onEdgesChange] = useEdgesState(rfEdges as any); const { fitView } = useReactFlow(); + // Sync state whenever the laid-out data changes (view switch, filter change) useEffect(() => { - setNodes(enrichedNodes as any); - setEdges(filteredEdges as any); - }, [view, viewType, selectedServiceId, setNodes, setEdges]); // eslint-disable-line react-hooks/exhaustive-deps + setNodes(laidNodes as any); + setEdges(rfEdges as any); + setTimeout(() => fitView({ duration: 300 }), 50); + }, [laidNodes, rfEdges, setNodes, setEdges, fitView]); + // Manual re-layout: uses measured dimensions from current state and re-routes + // edges from the original (unprocessed) filteredEdges to avoid double- + // converting markerEnd. const handleAutoLayout = useCallback(() => { - const laid = applyDagreLayout(nodes as any, edges as any); + // Re-run with actual measured dimensions (available after first render). + const { nodes: laid } = getLayoutedElements(nodes as any, filteredEdges as any); + const nm = new Map(laid.map((n: any) => [n.id, n])); + const rerouted = routeEdges(filteredEdges, nm); setNodes(laid as any); + setEdges(rerouted as any); setTimeout(() => fitView({ duration: 400 }), 50); - }, [nodes, edges, setNodes, fitView]); + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [nodes, filteredEdges, setNodes, setEdges, fitView]); return (
-
📡

No graph data yet

Run the scanner to generate your architecture map

@@ -172,7 +318,6 @@ export default function GraphView({ ); } - // Function flow empty state when no service selected if (viewType === "functionFlow" && !selectedServiceId) { return (
@@ -201,6 +346,8 @@ export default function GraphView({ ? "No DTOs/data types detected" : viewType === "functionFlow" ? "No functions detected in scanned services" + : viewType === "containerDiagram" + ? "No infrastructure declared in archmap.yml files" : "No services detected"}

diff --git a/apps/web/src/components/ServiceSearch.tsx b/apps/web/src/components/ServiceSearch.tsx index 7993563..fa06fee 100644 --- a/apps/web/src/components/ServiceSearch.tsx +++ b/apps/web/src/components/ServiceSearch.tsx @@ -70,7 +70,7 @@ export default function ServiceSearch({ services, selectedId, onChange }: Props) className="text-gray-400 hover:text-gray-200 shrink-0" aria-label="Clear selection" > - ✕ + x )} diff --git a/apps/web/src/components/nodes/CacheNode.tsx b/apps/web/src/components/nodes/CacheNode.tsx new file mode 100644 index 0000000..bab0292 --- /dev/null +++ b/apps/web/src/components/nodes/CacheNode.tsx @@ -0,0 +1,48 @@ +"use client"; + +import { Handle, Position } from "@xyflow/react"; +import type { NodeProps } from "@xyflow/react"; + +interface InfraNodeData { + name: string; + technology?: string; + description?: string; + ref?: string; +} + +export default function CacheNode({ data }: NodeProps) { + const d = data as unknown as InfraNodeData; + const refLabel = d.ref + ? d.ref.includes("?ref=") || (d.ref.includes("/") && !d.ref.startsWith("./")) + ? d.ref.replace(/^([^/]+)\/.*\?ref=(.+)$/, "$1@$2") + : d.ref + : null; + + return ( + <> + + + + +
+
+ CACHE + {d.name} +
+ {d.technology && ( +

{d.technology}

+ )} + {d.description && ( +

{d.description}

+ )} + {refLabel && ( +

{refLabel}

+ )} +
+ + + + + + ); +} diff --git a/apps/web/src/components/nodes/DataTypeNode.tsx b/apps/web/src/components/nodes/DataTypeNode.tsx index f9a7ed8..924229d 100644 --- a/apps/web/src/components/nodes/DataTypeNode.tsx +++ b/apps/web/src/components/nodes/DataTypeNode.tsx @@ -16,7 +16,10 @@ export default function DataTypeNode({ data }: NodeProps) { return ( <> - + + + +
DTO @@ -36,7 +39,10 @@ export default function DataTypeNode({ data }: NodeProps) { )}
- + + + + ); } diff --git a/apps/web/src/components/nodes/DatabaseNode.tsx b/apps/web/src/components/nodes/DatabaseNode.tsx new file mode 100644 index 0000000..5bf08ce --- /dev/null +++ b/apps/web/src/components/nodes/DatabaseNode.tsx @@ -0,0 +1,53 @@ +"use client"; + +import { Handle, Position } from "@xyflow/react"; +import type { NodeProps } from "@xyflow/react"; + +interface InfraNodeData { + name: string; + technology?: string; + description?: string; + ref?: string; +} + +export default function DatabaseNode({ data }: NodeProps) { + const d = data as unknown as InfraNodeData; + const refLabel = d.ref + ? d.ref.includes("?ref=") || (d.ref.includes("/") && !d.ref.startsWith("./")) + ? d.ref.replace(/^([^/]+)\/.*\?ref=(.+)$/, "$1@$2") + : d.ref + : null; + + return ( + <> + + + + +
+
+ DB + {/* Cylinder icon */} + + + + + {d.name} +
+ {d.technology && ( +

{d.technology}

+ )} + {d.description && ( +

{d.description}

+ )} + {refLabel && ( +

{refLabel}

+ )} +
+ + + + + + ); +} diff --git a/apps/web/src/components/nodes/ExternalNode.tsx b/apps/web/src/components/nodes/ExternalNode.tsx new file mode 100644 index 0000000..3434e2a --- /dev/null +++ b/apps/web/src/components/nodes/ExternalNode.tsx @@ -0,0 +1,48 @@ +"use client"; + +import { Handle, Position } from "@xyflow/react"; +import type { NodeProps } from "@xyflow/react"; + +interface InfraNodeData { + name: string; + technology?: string; + description?: string; + ref?: string; +} + +export default function ExternalNode({ data }: NodeProps) { + const d = data as unknown as InfraNodeData; + const refLabel = d.ref + ? d.ref.includes("?ref=") || (d.ref.includes("/") && !d.ref.startsWith("./")) + ? d.ref.replace(/^([^/]+)\/.*\?ref=(.+)$/, "$1@$2") + : d.ref + : null; + + return ( + <> + + + + +
+
+ EXT + {d.name} +
+ {d.technology && ( +

{d.technology}

+ )} + {d.description && ( +

{d.description}

+ )} + {refLabel && ( +

{refLabel}

+ )} +
+ + + + + + ); +} diff --git a/apps/web/src/components/nodes/FunctionNode.tsx b/apps/web/src/components/nodes/FunctionNode.tsx index 99fe12c..2fe9ab8 100644 --- a/apps/web/src/components/nodes/FunctionNode.tsx +++ b/apps/web/src/components/nodes/FunctionNode.tsx @@ -43,7 +43,7 @@ const KIND_STYLES: Record< scheduler: { border: "border-orange-500", accent: "bg-orange-500", - badge: "⏰ SCHEDULED", + badge: "SCHEDULED", badgeClass: "bg-orange-900 text-orange-300", }, service: { @@ -96,7 +96,10 @@ export default function FunctionNode({ data }: NodeProps) { return ( <> - + + + +
@@ -146,7 +149,10 @@ export default function FunctionNode({ data }: NodeProps) {
- + + + + ); } diff --git a/apps/web/src/components/nodes/QueueNode.tsx b/apps/web/src/components/nodes/QueueNode.tsx new file mode 100644 index 0000000..9d12ec2 --- /dev/null +++ b/apps/web/src/components/nodes/QueueNode.tsx @@ -0,0 +1,54 @@ +"use client"; + +import { Handle, Position } from "@xyflow/react"; +import type { NodeProps } from "@xyflow/react"; + +interface InfraNodeData { + name: string; + technology?: string; + description?: string; + ref?: string; +} + +export default function QueueNode({ data }: NodeProps) { + const d = data as unknown as InfraNodeData; + const refLabel = d.ref + ? d.ref.includes("?ref=") || (d.ref.includes("/") && !d.ref.startsWith("./")) + ? d.ref.replace(/^([^/]+)\/.*\?ref=(.+)$/, "$1@$2") + : d.ref + : null; + + return ( + <> + + + + +
+
+ QUEUE + {/* Parallel lines icon */} + + + + + + {d.name} +
+ {d.technology && ( +

{d.technology}

+ )} + {d.description && ( +

{d.description}

+ )} + {refLabel && ( +

{refLabel}

+ )} +
+ + + + + + ); +} diff --git a/apps/web/src/components/nodes/ServiceNode.tsx b/apps/web/src/components/nodes/ServiceNode.tsx index 2b2f388..179f8cb 100644 --- a/apps/web/src/components/nodes/ServiceNode.tsx +++ b/apps/web/src/components/nodes/ServiceNode.tsx @@ -39,7 +39,14 @@ export default function ServiceNode({ data }: NodeProps) { return ( <> - + + + + + + + +
)}
- ); } diff --git a/apps/web/src/lib/layout.ts b/apps/web/src/lib/layout.ts index 2364bd2..f147a61 100644 --- a/apps/web/src/lib/layout.ts +++ b/apps/web/src/lib/layout.ts @@ -1,35 +1,270 @@ +/** + * Architecture-aware Dagre layout for microservice diagrams. + * + * Layering rules (LR direction): + * Rank 0 – Group / domain boundary nodes + * Rank 1 – Service nodes (main anchors) + * Rank 2 – Database / Cache nodes (forced right via edge minlen) + * Rank 3 – External nodes + * Manual – Queue / Message-bus nodes pulled out of Dagre and placed + * below-centre of all other nodes after layout completes. + */ + import dagre from "@dagrejs/dagre"; import type { Node, Edge } from "@xyflow/react"; -const NODE_WIDTH = 180; -const NODE_HEIGHT = 60; +// ─── Node-type categories ───────────────────────────────────────────────────── -export function applyDagreLayout( +/** Service anchors — placed in the leftmost ranks by Dagre. */ +const SERVICE_TYPES = new Set(["serviceNode", "group"]); + +/** + * Database / cache nodes — always placed to the RIGHT of their owning service + * by assigning higher edge weight + minlen on service→db edges. + */ +const DATABASE_TYPES = new Set(["databaseNode", "cacheNode"]); + +/** + * Message-bus nodes — excluded from Dagre and manually placed as a horizontal + * strip below-centre of all other nodes after layout finishes. + */ +const BUS_TYPES = new Set(["queueNode"]); + +// ─── Constants ──────────────────────────────────────────────────────────────── + +export const DEFAULT_LAYOUT_SPACING = 64; + +/** + * Dimensions given to the Dagre algorithm for spacing calculations. + * These are intentionally conservative; the actual rendered node size + * (node.measured) is used for positioning when available. + */ +const ALGO_NODE_WIDTH = 200; +const ALGO_NODE_HEIGHT = 150; + +// ─── Private helpers ────────────────────────────────────────────────────────── + +function nodeW(n: Node, fallback = ALGO_NODE_WIDTH): number { + return (n as any).measured?.width ?? fallback; +} + +function nodeH(n: Node, fallback = ALGO_NODE_HEIGHT): number { + return (n as any).measured?.height ?? fallback; +} + +/** + * Iteratively push apart any top-level nodes whose bounding boxes overlap. + * Child nodes (parentId set) are skipped — they are positioned relative to + * their parent group and do not participate in global overlap resolution. + */ +function resolveOverlaps(nodes: Node[], padding = 16): Node[] { + const result = nodes.map((n) => ({ ...n, position: { ...n.position } })); + const topLevel = result.filter((n) => !(n as any).parentId); + + for (let iter = 0; iter < 100; iter++) { + let changed = false; + for (let i = 0; i < topLevel.length; i++) { + for (let j = i + 1; j < topLevel.length; j++) { + const a = topLevel[i]; + const b = topLevel[j]; + + const ax1 = a.position.x - padding; + const ay1 = a.position.y - padding; + const ax2 = a.position.x + nodeW(a) + padding; + const ay2 = a.position.y + nodeH(a) + padding; + + const bx1 = b.position.x - padding; + const by1 = b.position.y - padding; + const bx2 = b.position.x + nodeW(b) + padding; + const by2 = b.position.y + nodeH(b) + padding; + + if (ax1 >= bx2 || ax2 <= bx1 || ay1 >= by2 || ay2 <= by1) continue; + + const overlapX = Math.min(ax2 - bx1, bx2 - ax1); + const overlapY = Math.min(ay2 - by1, by2 - ay1); + + if (overlapX <= overlapY) { + const push = overlapX / 2 + 1; + if (a.position.x < b.position.x) { a.position.x -= push; b.position.x += push; } + else { a.position.x += push; b.position.x -= push; } + } else { + const push = overlapY / 2 + 1; + if (a.position.y < b.position.y) { a.position.y -= push; b.position.y += push; } + else { a.position.y += push; b.position.y -= push; } + } + changed = true; + } + } + if (!changed) break; + } + + return result; +} + +// ─── Public API ─────────────────────────────────────────────────────────────── + +export interface LayoutConfig { + /** Graph direction. Default: "LR" (Left → Right). */ + direction?: "LR" | "TB"; + /** Node-width hint passed to Dagre for spacing maths (px). Default: 200. */ + nodeWidth?: number; + /** Node-height hint passed to Dagre for spacing maths (px). Default: 150. */ + nodeHeight?: number; + /** Gap between sibling nodes and between rank levels (px). Default: 64. */ + spacing?: number; +} + +/** + * Architecture-aware layout helper. + * + * Algorithm: + * 1. Split nodes into three buckets: + * – dagreNodes : everything except bus nodes and React-Flow child nodes + * – busNodes : queueNode / message-bus (manually positioned) + * – childNodes : nodes with parentId (positioned by their parent group) + * + * 2. Build the Dagre graph: + * – Service/group nodes get default edge weights (anchor left). + * – Edges that target a database/cache node get weight=3, minlen=2 so + * Dagre pushes those nodes two ranks further right than their service. + * – Edges involving bus or child nodes are resolved to their top-level + * representative before being added (child → parent group). + * + * 3. Run dagre.layout() and read back (x, y) positions. + * + * 4. Post-process bus/queue nodes: + * – Compute the bounding box of all Dagre-positioned nodes. + * – Place the bus strip below-centre with `spacing * 2` vertical gap. + * + * 5. Resolve any remaining bounding-box overlaps among top-level nodes. + * + * 6. Reattach child nodes (untouched) and return. + */ +export function getLayoutedElements( nodes: Node[], edges: Edge[], - direction: "TB" | "LR" = "LR" -): Node[] { + config: LayoutConfig = {} +): { nodes: Node[]; edges: Edge[] } { + const { + direction = "LR", + nodeWidth = ALGO_NODE_WIDTH, + nodeHeight = ALGO_NODE_HEIGHT, + spacing = DEFAULT_LAYOUT_SPACING, + } = config; + + // ── 1. Bucket nodes ─────────────────────────────────────────────────────── + const busNodes = nodes.filter((n) => BUS_TYPES.has(n.type ?? "")); + const childNodes = nodes.filter((n) => !BUS_TYPES.has(n.type ?? "") && (n as any).parentId); + const dagreNodes = nodes.filter((n) => !BUS_TYPES.has(n.type ?? "") && !(n as any).parentId); + + // ── 2. Build Dagre graph ────────────────────────────────────────────────── const g = new dagre.graphlib.Graph(); g.setDefaultEdgeLabel(() => ({})); - g.setGraph({ rankdir: direction, nodesep: 60, ranksep: 100 }); + g.setGraph({ + rankdir: direction, + nodesep: spacing, + ranksep: spacing * 2, + marginx: spacing, + marginy: spacing, + }); - for (const node of nodes) { - g.setNode(node.id, { width: NODE_WIDTH, height: NODE_HEIGHT }); + // Register nodes with measured (or fallback) dimensions. + for (const node of dagreNodes) { + g.setNode(node.id, { + width: nodeW(node, nodeWidth), + height: nodeH(node, nodeHeight), + }); } + + // Build a quick lookup and a helper that resolves child IDs → top-level IDs. + const nodeById = new Map(nodes.map((n) => [n.id, n])); + const topLevelId = (id: string): string => { + const n = nodeById.get(id); + return (n as any)?.parentId ?? id; + }; + + // Add edges, resolving child nodes to their parents and skipping bus nodes. + const seenEdges = new Set(); for (const edge of edges) { - g.setEdge(edge.source, edge.target); + const srcId = topLevelId(edge.source); + const tgtId = topLevelId(edge.target); + + if (srcId === tgtId) continue; // intra-group + if (!g.hasNode(srcId) || !g.hasNode(tgtId)) continue; // involves bus node + const key = `${srcId}→${tgtId}`; + if (seenEdges.has(key)) continue; // deduplicate + seenEdges.add(key); + + const tgtNode = nodeById.get(edge.target); + const isDbEdge = DATABASE_TYPES.has(tgtNode?.type ?? ""); + + g.setEdge(srcId, tgtId, { + // Database/cache edges: push two ranks right so databases always appear + // clearly to the right of their owning service node. + weight: isDbEdge ? 3 : 1, + minlen: isDbEdge ? 2 : 1, + }); } + // ── 3. Run Dagre ────────────────────────────────────────────────────────── dagre.layout(g); - return nodes.map((node) => { - const { x, y } = g.node(node.id); - return { - ...node, - position: { - x: x - NODE_WIDTH / 2, - y: y - NODE_HEIGHT / 2, - }, - }; + const positioned = dagreNodes.map((node) => { + const pos = g.node(node.id); + if (!pos) return node; + const w = nodeW(node, nodeWidth); + const h = nodeH(node, nodeHeight); + return { ...node, position: { x: pos.x - w / 2, y: pos.y - h / 2 } }; }); + + // ── 4. Place bus/queue nodes below-centre ───────────────────────────────── + let finalNodes: Node[]; + + if (busNodes.length > 0 && positioned.length > 0) { + // Compute the bounding box of all Dagre-laid nodes. + let minX = Infinity, maxX = -Infinity, maxY = -Infinity; + for (const n of positioned) { + minX = Math.min(minX, n.position.x); + maxX = Math.max(maxX, n.position.x + nodeW(n, nodeWidth)); + maxY = Math.max(maxY, n.position.y + nodeH(n, nodeHeight)); + } + + const canvasCenterX = (minX + maxX) / 2; + const busY = maxY + spacing * 2; + + // Total bus-strip width including inter-node gaps. + const totalBusW = + busNodes.reduce((sum, n) => sum + nodeW(n, nodeWidth), 0) + + (busNodes.length - 1) * spacing; + + let busX = canvasCenterX - totalBusW / 2; + const placedBus = busNodes.map((n) => { + const node = { ...n, position: { x: busX, y: busY } }; + busX += nodeW(n, nodeWidth) + spacing; + return node; + }); + + finalNodes = [...positioned, ...placedBus]; + } else { + finalNodes = [...positioned, ...busNodes]; + } + + // ── 5. Resolve residual overlaps ────────────────────────────────────────── + const resolved = resolveOverlaps(finalNodes); + + // ── 6. Reattach child nodes (positions are relative to parent) ──────────── + return { nodes: [...resolved, ...childNodes], edges }; +} + +/** + * Convenience wrapper — runs getLayoutedElements and returns only the nodes. + * Kept for backward compatibility with existing call sites. + */ +export function applyDagreLayout( + nodes: Node[], + edges: Edge[], + direction: "TB" | "LR" = "LR", + spacing: number = DEFAULT_LAYOUT_SPACING +): Node[] { + return getLayoutedElements(nodes, edges, { direction, spacing }).nodes; } diff --git a/apps/web/src/types/graph.ts b/apps/web/src/types/graph.ts index 7196c61..fed4fb2 100644 --- a/apps/web/src/types/graph.ts +++ b/apps/web/src/types/graph.ts @@ -1,3 +1,14 @@ +export type InfraType = "database" | "queue" | "cache" | "external"; + +export interface InfraNode { + id: string; + name: string; + type: InfraType; + technology?: string; + description?: string; + ref?: string; +} + export interface Endpoint { method: string; path: string; @@ -69,6 +80,7 @@ export interface AnalyzedService { kafkaProducers?: KafkaProducer[]; kafkaConsumers?: KafkaConsumer[]; nodeConfig?: NodeConfig; + infrastructure?: InfraNode[]; } export interface ViewNode { @@ -76,6 +88,9 @@ export interface ViewNode { type?: string; position: { x: number; y: number }; data: Record; + parentId?: string; + extent?: "parent"; + style?: Record; } export interface ViewEdge { @@ -84,6 +99,8 @@ export interface ViewEdge { target: string; label?: string; animated?: boolean; + markerEnd?: string; + type?: string; } export interface GraphView { @@ -103,5 +120,6 @@ export interface GraphData { serviceFlow: GraphView; dataFlow: GraphView; functionFlow: GraphView; + containerDiagram: GraphView; }; } diff --git a/docs/configuration.md b/docs/configuration.md index 34ca336..d7a042b 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -1,5 +1,7 @@ # Configuration Reference +For a working end-to-end example see [github.com/docktail/archmap](https://github.com/docktail/archmap) — the `services/` directory contains Order Service and Inventory Service with annotated `archmap.yml` files and infrastructure ref files. + ## archmap.yml Place an `archmap.yml` file in the root of any repository to control how ArchMap treats it during a scan. @@ -17,6 +19,13 @@ tags: - critical - pci skip: false # set to true to exclude this repo entirely +infrastructure: + - id: payments-db + ref: ./infra/postgres-archmap.yml # same-repo ref + - id: stripe + name: Stripe API + type: external + description: Payment processing API ``` ### Fields @@ -30,10 +39,47 @@ skip: false # set to true to exclude this repo entirely | `depends_on` | string[] | Explicit dependency edges to other services, by repo name. Supplements auto-detected dependencies. | | `tags` | string[] | Arbitrary labels. Currently stored on the service record; available for future filtering. | | `skip` | boolean | If `true`, the repo is excluded from the scan entirely. | +| `infrastructure` | InfraNode[] | Infrastructure dependencies (databases, queues, caches, external APIs) shown in the Container Diagram. | + +### infrastructure — InfraNode fields + +Each entry under `infrastructure` declares one infrastructure dependency. Fields can be set inline or loaded from a ref file. + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `id` | string | Yes | Unique identifier. Nodes with the same `id` across services are deduplicated into one node in the Container Diagram. | +| `name` | string | Yes* | Display name on the node. Can be omitted if provided by the ref file. | +| `type` | string | Yes* | One of `database`, `queue`, `cache`, `external`. Controls node shape and edge label. Can be omitted if provided by the ref file. | +| `technology` | string | No | Technology name shown on the node (e.g. `PostgreSQL 14`, `Apache Kafka`). | +| `description` | string | No | Short description shown on the node. | +| `ref` | string | No | Path to an archmap-format YAML file that provides the full node definition. Inline fields take priority over the ref file. | + +### ref file format + +A ref file is a standalone YAML file that carries the full infrastructure node definition. This lets multiple services reference one shared definition without repeating it. + +```yaml +# infra/postgres-archmap.yml +id: orders-db +name: Orders DB +type: database +technology: PostgreSQL 14 +description: Primary relational store for order records +``` + +**Ref path formats:** + +| Format | Resolved during | Example | +|--------|----------------|---------| +| `./path.yml` | Local scan and GitHub scan (same repo) | `./infra/postgres-archmap.yml` | +| `repo/path.yml` | GitHub scan only | `shared-infra/redis-archmap.yml` | +| `repo/path.yml?ref=tag` | GitHub scan only | `shared-infra/kafka-archmap.yml?ref=v2` | + +Local scan skips cross-repo refs and logs a warning. If a ref cannot be resolved, the scanner falls back to whatever fields were declared inline. ## Graph views -ArchMap produces three graph views from a single scan: +ArchMap produces four graph views from a single scan: ### Service Flow @@ -42,6 +88,8 @@ Nodes are services. Edges represent: - Feign client and RestTemplate/WebClient calls detected in source code - Kafka producer/consumer relationships (service A produces to topic X, service B consumes from topic X) +All edges have directed arrowheads so dependency direction is unambiguous. + ### Data Flow Nodes are data types (entities, DTOs, request/response classes, events). Edges connect data types to the services that produce or consume them. Useful for tracing where a shared data contract is defined and who uses it. @@ -50,6 +98,19 @@ Nodes are data types (entities, DTOs, request/response classes, events). Edges c Nodes are public service-layer methods. Edges represent cross-service calls detected in source code — when one service method calls another service bean's method. Useful for drilling into call chains within a service. +### Container Diagram + +A C4-style container view showing services and their infrastructure dependencies together. Services are grouped into domain boundary boxes. Infrastructure nodes use distinct shapes: + +| Node type | Shape | Edge label | +|-----------|-------|------------| +| `database` | Cyan cylinder | `persists to` | +| `queue` | Purple parallel lines | `publishes to` / `subscribes to` / `uses` | +| `cache` | Amber box | `caches via` | +| `external` | Gray dashed box | `calls` | + +Infrastructure nodes with the same `id` across multiple services are deduplicated — a shared Kafka cluster appears as a single node with edges from every service that uses it. + ## What the analyzer detects The static analyzer extracts the following from `.java` and `.kt` files: diff --git a/docs/use-cases.md b/docs/use-cases.md index 50d7520..36a229f 100644 --- a/docs/use-cases.md +++ b/docs/use-cases.md @@ -35,3 +35,89 @@ ArchMap generates the graph from source code on a schedule. The graph is always ## Incident investigation During an incident, the first question is often "what calls this service?" or "what does this service call?". The Service Flow graph answers both immediately. Engineers can trace the blast radius of a failing service or identify which upstream caller is sending unexpected traffic. + +--- + +## Example organization: Commerce domain + +The `services/` directory of [github.com/docktail/archmap](https://github.com/docktail/archmap) contains a working two-service example that demonstrates the core features of ArchMap. + +### Services + +**Order Service** (`services/order-service`) + +Accepts order creation requests via REST, persists records to PostgreSQL, and publishes `order.created` events to Kafka. + +```yaml +# services/order-service/archmap.yml +name: Order Service +type: service +domain: commerce +tags: [rest, kafka-producer, postgres, critical] +infrastructure: + - id: orders-db + ref: ./infra/postgres-archmap.yml + - id: kafka + ref: ./infra/kafka-archmap.yml +``` + +**Inventory Service** (`services/inventory-service`) + +Consumes `order.created` events from Kafka and reserves stock in PostgreSQL. + +```yaml +# services/inventory-service/archmap.yml +name: Inventory Service +type: service +domain: commerce +tags: [kafka-consumer, postgres] +depends_on: + - order-service +infrastructure: + - id: inventory-db + ref: ./infra/postgres-archmap.yml + - id: kafka + ref: ./infra/kafka-archmap.yml +``` + +### Infrastructure ref files + +Each service declares its infrastructure via ref files under `infra/`: + +``` +services/ + order-service/ + infra/ + postgres-archmap.yml # id: orders-db, type: database + kafka-archmap.yml # id: kafka, type: queue + inventory-service/ + infra/ + postgres-archmap.yml # id: inventory-db, type: database + kafka-archmap.yml # id: kafka, type: queue (same id — deduped in graph) +``` + +Both services reference `id: kafka` in their Kafka ref file. The Container Diagram deduplicates this into a single Kafka node with edges from both services. + +### Running the example locally + +Clone [github.com/docktail/archmap](https://github.com/docktail/archmap) and run: + +```bash +# From the archmap/ directory +pnpm install +pnpm build --filter @archmap/analyzer --filter @archmap/graph-builder --filter @archmap/deployers + +SCANNER_SOURCE=local \ +SERVICES_DIR=./services \ +DEPLOYER=files \ +OUTPUT_DIR=./data \ +pnpm --filter @archmap/scanner scan + +ARCHMAP_DATA_PATH=./data/graph.json pnpm --filter @archmap/web dev +``` + +Open `http://localhost:3000` and switch to the **Container** tab to see: +- A `commerce` domain boundary box containing both services +- `Orders DB` and `Inventory DB` as database nodes (cyan cylinder) +- A single shared `Apache Kafka` queue node (purple) +- Directed edges labeled `persists to` and `subscribes to` diff --git a/packages/graph-builder/src/__tests__/container-diagram.test.ts b/packages/graph-builder/src/__tests__/container-diagram.test.ts new file mode 100644 index 0000000..a984f35 --- /dev/null +++ b/packages/graph-builder/src/__tests__/container-diagram.test.ts @@ -0,0 +1,208 @@ +import { describe, it, expect } from "vitest"; +import { buildContainerDiagram } from "../views/container-diagram"; +import type { AnalyzedService, InfraNode } from "../types"; + +function makeService(overrides: Partial & { id: string; name: string }): AnalyzedService { + return { + repoName: overrides.id, + repoUrl: `https://github.com/org/${overrides.id}`, + language: "java", + summary: "", + endpoints: [], + dataTypes: [], + functions: [], + dependsOn: [], + kafkaProducers: [], + kafkaConsumers: [], + tags: [], + ...overrides, + }; +} + +function makeInfra(overrides: Partial & { id: string; name: string; type: InfraNode["type"] }): InfraNode { + return { ...overrides }; +} + +describe("buildContainerDiagram", () => { + it("returns empty graph for no services", () => { + const { nodes, edges } = buildContainerDiagram([]); + expect(nodes).toHaveLength(0); + expect(edges).toHaveLength(0); + }); + + it("creates a service node per service", () => { + const services = [ + makeService({ id: "order-service", name: "Order Service" }), + makeService({ id: "inventory-service", name: "Inventory Service" }), + ]; + const { nodes } = buildContainerDiagram(services); + const serviceNodes = nodes.filter((n) => n.type === "serviceNode"); + expect(serviceNodes).toHaveLength(2); + }); + + it("creates a group node per domain and sets parentId on service nodes", () => { + const services = [ + makeService({ id: "a", name: "A", domain: "commerce" }), + makeService({ id: "b", name: "B", domain: "commerce" }), + ]; + const { nodes } = buildContainerDiagram(services); + const groupNode = nodes.find((n) => n.type === "group"); + expect(groupNode).toBeDefined(); + expect(groupNode!.id).toBe("group:commerce"); + + const serviceNodes = nodes.filter((n) => n.type === "serviceNode"); + expect(serviceNodes.every((n) => n.parentId === "group:commerce")).toBe(true); + expect(serviceNodes.every((n) => n.extent === "parent")).toBe(true); + }); + + it("creates separate group nodes for different domains", () => { + const services = [ + makeService({ id: "a", name: "A", domain: "commerce" }), + makeService({ id: "b", name: "B", domain: "logistics" }), + ]; + const { nodes } = buildContainerDiagram(services); + const groupNodes = nodes.filter((n) => n.type === "group"); + expect(groupNodes).toHaveLength(2); + }); + + it("maps database infra to databaseNode type", () => { + const infra = makeInfra({ id: "orders-db", name: "Orders DB", type: "database" }); + const services = [makeService({ id: "a", name: "A", infrastructure: [infra] })]; + const { nodes } = buildContainerDiagram(services); + const dbNode = nodes.find((n) => n.id === "orders-db"); + expect(dbNode?.type).toBe("databaseNode"); + }); + + it("maps queue infra to queueNode type", () => { + const infra = makeInfra({ id: "kafka", name: "Kafka", type: "queue" }); + const services = [makeService({ id: "a", name: "A", infrastructure: [infra] })]; + const { nodes } = buildContainerDiagram(services); + expect(nodes.find((n) => n.id === "kafka")?.type).toBe("queueNode"); + }); + + it("maps cache infra to cacheNode type", () => { + const infra = makeInfra({ id: "redis", name: "Redis", type: "cache" }); + const services = [makeService({ id: "a", name: "A", infrastructure: [infra] })]; + const { nodes } = buildContainerDiagram(services); + expect(nodes.find((n) => n.id === "redis")?.type).toBe("cacheNode"); + }); + + it("maps external infra to externalNode type", () => { + const infra = makeInfra({ id: "stripe", name: "Stripe", type: "external" }); + const services = [makeService({ id: "a", name: "A", infrastructure: [infra] })]; + const { nodes } = buildContainerDiagram(services); + expect(nodes.find((n) => n.id === "stripe")?.type).toBe("externalNode"); + }); + + it("deduplicates infra nodes with the same id across services", () => { + const kafka = makeInfra({ id: "kafka", name: "Kafka", type: "queue" }); + const services = [ + makeService({ id: "a", name: "A", infrastructure: [kafka] }), + makeService({ id: "b", name: "B", infrastructure: [kafka] }), + ]; + const { nodes } = buildContainerDiagram(services); + const kafkaNodes = nodes.filter((n) => n.id === "kafka"); + expect(kafkaNodes).toHaveLength(1); + }); + + it("creates an edge from each service to its infra nodes", () => { + const db = makeInfra({ id: "orders-db", name: "Orders DB", type: "database" }); + const services = [makeService({ id: "order-service", name: "Order Service", infrastructure: [db] })]; + const { edges } = buildContainerDiagram(services); + expect(edges).toHaveLength(1); + expect(edges[0]).toMatchObject({ source: "order-service", target: "orders-db" }); + }); + + it("does not duplicate edges when two services share the same infra", () => { + const kafka = makeInfra({ id: "kafka", name: "Kafka", type: "queue" }); + const services = [ + makeService({ id: "a", name: "A", infrastructure: [kafka] }), + makeService({ id: "b", name: "B", infrastructure: [kafka] }), + ]; + const { edges } = buildContainerDiagram(services); + // Two services, two edges (one per service→kafka) — no duplicate for same pair + const ids = edges.map((e) => e.id); + expect(new Set(ids).size).toBe(ids.length); + expect(edges).toHaveLength(2); + }); + + it("sets markerEnd: arrow on all edges", () => { + const db = makeInfra({ id: "db", name: "DB", type: "database" }); + const services = [makeService({ id: "a", name: "A", infrastructure: [db] })]; + const { edges } = buildContainerDiagram(services); + expect(edges.every((e) => e.markerEnd === "arrow")).toBe(true); + }); + + it("sets type: smoothstep on all edges", () => { + const db = makeInfra({ id: "db", name: "DB", type: "database" }); + const services = [makeService({ id: "a", name: "A", infrastructure: [db] })]; + const { edges } = buildContainerDiagram(services); + expect(edges.every((e) => e.type === "smoothstep")).toBe(true); + }); + + it("labels database edges 'persists to'", () => { + const db = makeInfra({ id: "db", name: "DB", type: "database" }); + const services = [makeService({ id: "a", name: "A", infrastructure: [db] })]; + const { edges } = buildContainerDiagram(services); + expect(edges[0].label).toBe("persists to"); + }); + + it("labels queue edges 'publishes to' for pure producers", () => { + const kafka = makeInfra({ id: "kafka", name: "Kafka", type: "queue" }); + const services = [ + makeService({ + id: "a", + name: "A", + infrastructure: [kafka], + kafkaProducers: [{ topic: "events" }], + kafkaConsumers: [], + }), + ]; + const { edges } = buildContainerDiagram(services); + expect(edges[0].label).toBe("publishes to"); + }); + + it("labels queue edges 'subscribes to' for pure consumers", () => { + const kafka = makeInfra({ id: "kafka", name: "Kafka", type: "queue" }); + const services = [ + makeService({ + id: "a", + name: "A", + infrastructure: [kafka], + kafkaProducers: [], + kafkaConsumers: [{ topics: ["events"], handlerMethod: "handle" }], + }), + ]; + const { edges } = buildContainerDiagram(services); + expect(edges[0].label).toBe("subscribes to"); + }); + + it("labels queue edges 'uses' for both producer and consumer", () => { + const kafka = makeInfra({ id: "kafka", name: "Kafka", type: "queue" }); + const services = [ + makeService({ + id: "a", + name: "A", + infrastructure: [kafka], + kafkaProducers: [{ topic: "events" }], + kafkaConsumers: [{ topics: ["other"], handlerMethod: "handle" }], + }), + ]; + const { edges } = buildContainerDiagram(services); + expect(edges[0].label).toBe("uses"); + }); + + it("labels cache edges 'caches via'", () => { + const redis = makeInfra({ id: "redis", name: "Redis", type: "cache" }); + const services = [makeService({ id: "a", name: "A", infrastructure: [redis] })]; + const { edges } = buildContainerDiagram(services); + expect(edges[0].label).toBe("caches via"); + }); + + it("labels external edges 'calls'", () => { + const stripe = makeInfra({ id: "stripe", name: "Stripe", type: "external" }); + const services = [makeService({ id: "a", name: "A", infrastructure: [stripe] })]; + const { edges } = buildContainerDiagram(services); + expect(edges[0].label).toBe("calls"); + }); +}); diff --git a/packages/graph-builder/src/__tests__/data-flow.test.ts b/packages/graph-builder/src/__tests__/data-flow.test.ts new file mode 100644 index 0000000..273b04f --- /dev/null +++ b/packages/graph-builder/src/__tests__/data-flow.test.ts @@ -0,0 +1,154 @@ +import { describe, it, expect } from "vitest"; +import { buildDataFlow } from "../views/data-flow"; +import type { AnalyzedService } from "../types"; + +function makeService(overrides: Partial & { id: string; name: string }): AnalyzedService { + return { + repoName: overrides.id, + repoUrl: `https://github.com/org/${overrides.id}`, + language: "java", + summary: "", + endpoints: [], + dataTypes: [], + functions: [], + dependsOn: [], + kafkaProducers: [], + kafkaConsumers: [], + tags: [], + ...overrides, + }; +} + +describe("buildDataFlow", () => { + it("returns empty graph when no services have data types", () => { + const { nodes, edges } = buildDataFlow([makeService({ id: "a", name: "A" })]); + expect(nodes).toHaveLength(0); + expect(edges).toHaveLength(0); + }); + + it("creates one node per unique data type", () => { + const services = [ + makeService({ + id: "a", + name: "A", + dataTypes: [{ name: "OrderDto", fields: [], producedBy: ["a"], consumedBy: [] }], + }), + makeService({ + id: "b", + name: "B", + dataTypes: [{ name: "UserDto", fields: [], producedBy: ["b"], consumedBy: [] }], + }), + ]; + const { nodes } = buildDataFlow(services); + expect(nodes).toHaveLength(2); + expect(nodes.map((n) => n.id)).toEqual(expect.arrayContaining(["dt:OrderDto", "dt:UserDto"])); + }); + + it("deduplicates data types with the same name across services", () => { + const services = [ + makeService({ + id: "a", + name: "A", + dataTypes: [{ name: "OrderDto", fields: [], producedBy: ["a"], consumedBy: [] }], + }), + makeService({ + id: "b", + name: "B", + dataTypes: [{ name: "OrderDto", fields: [], producedBy: [], consumedBy: ["b"] }], + }), + ]; + const { nodes } = buildDataFlow(services); + expect(nodes).toHaveLength(1); + }); + + it("merges producedBy and consumedBy for same-name types", () => { + const services = [ + makeService({ + id: "a", + name: "A", + dataTypes: [{ name: "OrderDto", fields: [], producedBy: ["a"], consumedBy: [] }], + }), + makeService({ + id: "b", + name: "B", + dataTypes: [{ name: "OrderDto", fields: [], producedBy: [], consumedBy: ["b"] }], + }), + ]; + const { nodes } = buildDataFlow(services); + const node = nodes[0]; + expect(node.data.producedBy).toContain("a"); + expect(node.data.consumedBy).toContain("b"); + }); + + it("creates an edge between producer and consumer service", () => { + const services = [ + makeService({ + id: "a", + name: "A", + dataTypes: [{ name: "OrderDto", fields: [], producedBy: ["a"], consumedBy: [] }], + }), + makeService({ + id: "b", + name: "B", + dataTypes: [{ name: "OrderDto", fields: [], producedBy: [], consumedBy: ["b"] }], + }), + ]; + const { edges } = buildDataFlow(services); + expect(edges).toHaveLength(1); + expect(edges[0]).toMatchObject({ source: "a", target: "b" }); + }); + + it("sets markerEnd: arrow on edges", () => { + const services = [ + makeService({ + id: "a", + name: "A", + dataTypes: [{ name: "OrderDto", fields: [], producedBy: ["a"], consumedBy: [] }], + }), + makeService({ + id: "b", + name: "B", + dataTypes: [{ name: "OrderDto", fields: [], producedBy: [], consumedBy: ["b"] }], + }), + ]; + const { edges } = buildDataFlow(services); + expect(edges[0].markerEnd).toBe("arrow"); + }); + + it("does not create self-loop edges", () => { + const services = [ + makeService({ + id: "a", + name: "A", + dataTypes: [{ name: "OrderDto", fields: [], producedBy: ["a"], consumedBy: ["a"] }], + }), + ]; + const { edges } = buildDataFlow(services); + expect(edges).toHaveLength(0); + }); + + it("consolidates multiple shared types into one edge with a label", () => { + const services = [ + makeService({ + id: "a", + name: "A", + dataTypes: [ + { name: "TypeOne", fields: [], producedBy: ["a"], consumedBy: [] }, + { name: "TypeTwo", fields: [], producedBy: ["a"], consumedBy: [] }, + ], + }), + makeService({ + id: "b", + name: "B", + dataTypes: [ + { name: "TypeOne", fields: [], producedBy: [], consumedBy: ["b"] }, + { name: "TypeTwo", fields: [], producedBy: [], consumedBy: ["b"] }, + ], + }), + ]; + const { edges } = buildDataFlow(services); + expect(edges).toHaveLength(1); + expect(edges[0].label).toContain("TypeOne"); + expect(edges[0].label).toContain("TypeTwo"); + }); +}); diff --git a/packages/graph-builder/src/__tests__/service-flow.test.ts b/packages/graph-builder/src/__tests__/service-flow.test.ts index 5475775..36462d1 100644 --- a/packages/graph-builder/src/__tests__/service-flow.test.ts +++ b/packages/graph-builder/src/__tests__/service-flow.test.ts @@ -97,4 +97,22 @@ describe("buildServiceFlow", () => { // Both nodes should have valid positions assigned expect(nodes.every((n) => typeof n.position.x === "number" && typeof n.position.y === "number")).toBe(true); }); + + it("sets markerEnd: arrow on dependsOn edges", () => { + const services = [ + makeService({ id: "a", name: "A", dependsOn: ["b"] }), + makeService({ id: "b", name: "B" }), + ]; + const { edges } = buildServiceFlow(services); + expect(edges[0].markerEnd).toBe("arrow"); + }); + + it("sets markerEnd: arrow on kafka edges", () => { + const services = [ + makeService({ id: "a", name: "A", kafkaProducers: [{ topic: "events" }] }), + makeService({ id: "b", name: "B", kafkaConsumers: [{ topics: ["events"], handlerMethod: "handle" }] }), + ]; + const { edges } = buildServiceFlow(services); + expect(edges.every((e) => e.markerEnd === "arrow")).toBe(true); + }); }); diff --git a/packages/graph-builder/src/index.ts b/packages/graph-builder/src/index.ts index a6e1cd7..635fe33 100644 --- a/packages/graph-builder/src/index.ts +++ b/packages/graph-builder/src/index.ts @@ -2,6 +2,7 @@ import type { AnalyzedService, GraphView } from "./types"; import { buildServiceFlow } from "./views/service-flow"; import { buildDataFlow } from "./views/data-flow"; import { buildFunctionFlow } from "./views/function-flow"; +import { buildContainerDiagram } from "./views/container-diagram"; export type { AnalyzedService, GraphView }; export { gridLayout, groupedLayout } from "./layout"; @@ -10,6 +11,7 @@ export interface AllViews { serviceFlow: GraphView; dataFlow: GraphView; functionFlow: GraphView; + containerDiagram: GraphView; } export function buildAllViews(services: AnalyzedService[]): AllViews { @@ -17,5 +19,6 @@ export function buildAllViews(services: AnalyzedService[]): AllViews { serviceFlow: buildServiceFlow(services), dataFlow: buildDataFlow(services), functionFlow: buildFunctionFlow(services), + containerDiagram: buildContainerDiagram(services), }; } diff --git a/packages/graph-builder/src/types.ts b/packages/graph-builder/src/types.ts index 9384069..9f87bec 100644 --- a/packages/graph-builder/src/types.ts +++ b/packages/graph-builder/src/types.ts @@ -1,11 +1,25 @@ // Local type definitions to avoid circular workspace dependencies. // These mirror the scanner's AnalyzedService shape at runtime. +export type InfraType = "database" | "queue" | "cache" | "external"; + +export interface InfraNode { + id: string; + name: string; + type: InfraType; + technology?: string; + description?: string; + ref?: string; +} + export interface ViewNode { id: string; type?: string; position: { x: number; y: number }; data: Record; + parentId?: string; + extent?: "parent"; + style?: Record; } export interface ViewEdge { @@ -14,6 +28,8 @@ export interface ViewEdge { target: string; label?: string; animated?: boolean; + markerEnd?: string; + type?: string; } export interface GraphView { @@ -100,4 +116,5 @@ export interface AnalyzedService { kafkaProducers?: KafkaProducer[]; kafkaConsumers?: KafkaConsumer[]; nodeConfig?: NodeConfig; + infrastructure?: InfraNode[]; } diff --git a/packages/graph-builder/src/views/container-diagram.ts b/packages/graph-builder/src/views/container-diagram.ts new file mode 100644 index 0000000..8d0cb99 --- /dev/null +++ b/packages/graph-builder/src/views/container-diagram.ts @@ -0,0 +1,177 @@ +import type { AnalyzedService, InfraNode, GraphView } from "../types"; +import { gridLayout } from "../layout"; + +const INFRA_TYPE_TO_NODE: Record = { + database: "databaseNode", + queue: "queueNode", + cache: "cacheNode", + external: "externalNode", +}; + +const GROUP_WIDTH = 360; +const GROUP_PADDING = 60; +const NODE_SPACING = 220; +const GROUP_GAP = 80; +const INFRA_ROW_Y = 560; + +export function buildContainerDiagram(services: AnalyzedService[]): GraphView { + // Collect unique infra nodes across all services (first declaration wins) + const infraMap = new Map(); + for (const svc of services) { + for (const infra of svc.infrastructure ?? []) { + if (!infraMap.has(infra.id)) infraMap.set(infra.id, infra); + } + } + + if (services.length === 0 && infraMap.size === 0) { + return { nodes: [], edges: [] }; + } + + const hasDomains = services.some((s) => s.domain); + const nodes: GraphView["nodes"] = []; + + if (hasDomains) { + // Group services by domain, emit React Flow group (parent) nodes + child service nodes + const domainGroups = new Map(); + for (const svc of services) { + const domain = svc.domain ?? "__ungrouped__"; + if (!domainGroups.has(domain)) domainGroups.set(domain, []); + domainGroups.get(domain)!.push(svc); + } + + let groupX = 0; + for (const [domain, members] of domainGroups) { + const groupHeight = members.length * NODE_SPACING + GROUP_PADDING * 2; + nodes.push({ + id: `group:${domain}`, + type: "group", + position: { x: groupX, y: 0 }, + data: { label: domain }, + style: { + width: GROUP_WIDTH, + height: groupHeight, + backgroundColor: "rgba(55,65,81,0.3)", + borderColor: "#374151", + borderRadius: 8, + }, + }); + + members.forEach((svc, i) => { + nodes.push({ + id: svc.id, + type: "serviceNode", + position: { x: GROUP_PADDING, y: GROUP_PADDING + i * NODE_SPACING }, + parentId: `group:${domain}`, + extent: "parent", + data: { + label: svc.name, + name: svc.name, + repoName: svc.repoName, + repoUrl: svc.repoUrl, + language: svc.language, + summary: svc.summary, + endpoints: svc.endpoints, + serviceType: svc.type, + domain: svc.domain, + tags: svc.tags, + nodeConfig: svc.nodeConfig, + }, + }); + }); + + groupX += GROUP_WIDTH + GROUP_GAP; + } + } else { + const nodesNoPos = services.map((svc) => ({ + id: svc.id, + type: "serviceNode", + data: { + label: svc.name, + name: svc.name, + repoName: svc.repoName, + repoUrl: svc.repoUrl, + language: svc.language, + summary: svc.summary, + endpoints: svc.endpoints, + serviceType: svc.type, + domain: svc.domain, + tags: svc.tags, + nodeConfig: svc.nodeConfig, + }, + })); + nodes.push(...gridLayout(nodesNoPos, undefined, 320, 200)); + } + + // Infra nodes — placed in a row below the service groups + const infraNodes = Array.from(infraMap.values()); + if (infraNodes.length > 0) { + const infraNodesNoPos = infraNodes.map((infra) => ({ + id: infra.id, + type: INFRA_TYPE_TO_NODE[infra.type] ?? "externalNode", + data: { + label: infra.name, + name: infra.name, + technology: infra.technology, + description: infra.description, + ref: infra.ref, + }, + })); + + const totalGroupsWidth = hasDomains + ? (new Map(services.map((s) => [s.domain ?? "__ungrouped__", true])).size) * (GROUP_WIDTH + GROUP_GAP) + : Math.ceil(Math.sqrt(services.length)) * 320; + const infraCols = Math.max(1, Math.ceil(Math.sqrt(infraNodesNoPos.length))); + const infraSpacingX = 260; + const infraSpacingY = 200; + const infraStartX = Math.max(0, (totalGroupsWidth - infraCols * infraSpacingX) / 2); + + infraNodesNoPos.forEach((node, i) => { + nodes.push({ + ...node, + position: { + x: infraStartX + (i % infraCols) * infraSpacingX, + y: INFRA_ROW_Y + Math.floor(i / infraCols) * infraSpacingY, + }, + }); + }); + } + + // Edges: service → infra + const edges: GraphView["edges"] = []; + const edgeSet = new Set(); + + for (const svc of services) { + for (const infra of svc.infrastructure ?? []) { + let label: string; + if (infra.type === "database") { + label = "persists to"; + } else if (infra.type === "queue") { + const isProducer = (svc.kafkaProducers?.length ?? 0) > 0; + const isConsumer = (svc.kafkaConsumers?.length ?? 0) > 0; + if (isProducer && !isConsumer) label = "publishes to"; + else if (isConsumer && !isProducer) label = "subscribes to"; + else label = "uses"; + } else if (infra.type === "cache") { + label = "caches via"; + } else { + label = "calls"; + } + + const edgeId = `container:${svc.id}→${infra.id}`; + if (!edgeSet.has(edgeId)) { + edgeSet.add(edgeId); + edges.push({ + id: edgeId, + source: svc.id, + target: infra.id, + label, + markerEnd: "arrow", + type: "smoothstep", + animated: false, + }); + } + } + } + + return { nodes, edges }; +} diff --git a/packages/graph-builder/src/views/data-flow.ts b/packages/graph-builder/src/views/data-flow.ts index 90fde6a..f58e1fd 100644 --- a/packages/graph-builder/src/views/data-flow.ts +++ b/packages/graph-builder/src/views/data-flow.ts @@ -63,6 +63,7 @@ export function buildDataFlow(services: AnalyzedService[]): GraphView { target, label: types.slice(0, 3).join(", ") + (types.length > 3 ? "…" : ""), animated: false, + markerEnd: "arrow", }; }); diff --git a/packages/graph-builder/src/views/service-flow.ts b/packages/graph-builder/src/views/service-flow.ts index efffb81..0d02792 100644 --- a/packages/graph-builder/src/views/service-flow.ts +++ b/packages/graph-builder/src/views/service-flow.ts @@ -41,6 +41,7 @@ export function buildServiceFlow(services: AnalyzedService[]): GraphView { target: dep, label: "calls", animated: true, + markerEnd: "arrow", }); } } @@ -72,6 +73,7 @@ export function buildServiceFlow(services: AnalyzedService[]): GraphView { target: consumerId, label: producer.topic, animated: true, + markerEnd: "arrow", }); } } diff --git a/packages/scanner/package.json b/packages/scanner/package.json index d5eefac..b110128 100644 --- a/packages/scanner/package.json +++ b/packages/scanner/package.json @@ -5,7 +5,9 @@ "main": "dist/index.js", "scripts": { "scan": "tsx src/index.ts", - "build": "tsc" + "build": "tsc", + "test": "vitest run", + "test:watch": "vitest" }, "dependencies": { "@octokit/rest": "^21.0.0", diff --git a/packages/scanner/src/__tests__/config.test.ts b/packages/scanner/src/__tests__/config.test.ts new file mode 100644 index 0000000..c460d64 --- /dev/null +++ b/packages/scanner/src/__tests__/config.test.ts @@ -0,0 +1,226 @@ +import { describe, it, expect, beforeEach, afterEach } from "vitest"; +import { mkdtempSync, writeFileSync, mkdirSync, rmdirSync, rmSync } from "fs"; +import { tmpdir } from "os"; +import { join } from "path"; +import { parseRepoConfig, resolveInfraRefsLocal } from "../config"; + +// ─── parseRepoConfig ────────────────────────────────────────────────────────── + +describe("parseRepoConfig", () => { + it("returns null for invalid YAML", () => { + expect(parseRepoConfig("{ invalid: yaml: : :")).toBeNull(); + }); + + it("parses basic fields", () => { + const yaml = ` +name: Order Service +description: Handles orders +type: service +domain: commerce +skip: false +depends_on: + - inventory-service +tags: + - rest + - critical +`; + const config = parseRepoConfig(yaml); + expect(config).not.toBeNull(); + expect(config!.name).toBe("Order Service"); + expect(config!.description).toBe("Handles orders"); + expect(config!.type).toBe("service"); + expect(config!.domain).toBe("commerce"); + expect(config!.skip).toBe(false); + expect(config!.depends_on).toEqual(["inventory-service"]); + expect(config!.tags).toEqual(["rest", "critical"]); + }); + + it("treats skip: true correctly", () => { + const config = parseRepoConfig("skip: true"); + expect(config!.skip).toBe(true); + }); + + it("accepts group as alias for domain", () => { + const config = parseRepoConfig("group: payments"); + expect(config!.domain).toBe("payments"); + }); + + it("ignores unknown type values", () => { + const config = parseRepoConfig("type: unknown-thing"); + expect(config!.type).toBeUndefined(); + }); + + it("parses inline infrastructure with all fields", () => { + const yaml = ` +infrastructure: + - id: orders-db + name: Orders DB + type: database + technology: PostgreSQL 14 + description: Primary store +`; + const config = parseRepoConfig(yaml); + expect(config!.infrastructure).toHaveLength(1); + const node = config!.infrastructure![0]; + expect(node.id).toBe("orders-db"); + expect(node.name).toBe("Orders DB"); + expect(node.type).toBe("database"); + expect(node.technology).toBe("PostgreSQL 14"); + expect(node.description).toBe("Primary store"); + }); + + it("parses infrastructure entry with only id and ref", () => { + const yaml = ` +infrastructure: + - id: orders-db + ref: ./infra/postgres-archmap.yml +`; + const config = parseRepoConfig(yaml); + expect(config!.infrastructure).toHaveLength(1); + expect(config!.infrastructure![0].id).toBe("orders-db"); + expect(config!.infrastructure![0].ref).toBe("./infra/postgres-archmap.yml"); + expect(config!.infrastructure![0].name).toBeUndefined(); + }); + + it("skips infrastructure entries without an id", () => { + const yaml = ` +infrastructure: + - name: Orphan + type: database +`; + const config = parseRepoConfig(yaml); + expect(config!.infrastructure).toBeUndefined(); + }); + + it("skips entries with invalid infra type but keeps valid ones", () => { + const yaml = ` +infrastructure: + - id: db + name: DB + type: database + - id: unknown + name: Unknown + type: not-a-type +`; + const config = parseRepoConfig(yaml); + expect(config!.infrastructure).toHaveLength(2); + expect(config!.infrastructure![0].type).toBe("database"); + expect(config!.infrastructure![1].type).toBeUndefined(); + }); + + it("parses all four valid infra types", () => { + const yaml = ` +infrastructure: + - id: db + name: DB + type: database + - id: q + name: Q + type: queue + - id: c + name: C + type: cache + - id: e + name: E + type: external +`; + const config = parseRepoConfig(yaml); + const types = config!.infrastructure!.map((n) => n.type); + expect(types).toEqual(["database", "queue", "cache", "external"]); + }); + + it("returns undefined infrastructure when section is absent", () => { + const config = parseRepoConfig("name: Service"); + expect(config!.infrastructure).toBeUndefined(); + }); +}); + +// ─── resolveInfraRefsLocal ──────────────────────────────────────────────────── + +describe("resolveInfraRefsLocal", () => { + let tmpDir: string; + + beforeEach(() => { + tmpDir = mkdtempSync(join(tmpdir(), "archmap-test-")); + mkdirSync(join(tmpDir, "infra"), { recursive: true }); + }); + + afterEach(() => { + rmSync(tmpDir, { recursive: true, force: true }); + }); + + it("returns undefined for empty input", () => { + expect(resolveInfraRefsLocal(undefined, tmpDir)).toBeUndefined(); + expect(resolveInfraRefsLocal([], tmpDir)).toBeUndefined(); + }); + + it("resolves a same-repo ref and merges fields", () => { + writeFileSync(join(tmpDir, "infra", "postgres-archmap.yml"), ` +id: orders-db +name: Orders DB +type: database +technology: PostgreSQL 14 +description: Primary store +`); + const decls = [{ id: "orders-db", ref: "./infra/postgres-archmap.yml" }]; + const result = resolveInfraRefsLocal(decls, tmpDir); + expect(result).toHaveLength(1); + expect(result![0].name).toBe("Orders DB"); + expect(result![0].type).toBe("database"); + expect(result![0].technology).toBe("PostgreSQL 14"); + }); + + it("inline fields override ref file fields", () => { + writeFileSync(join(tmpDir, "infra", "postgres-archmap.yml"), ` +id: orders-db +name: Generic DB +type: database +technology: PostgreSQL 14 +`); + const decls = [{ id: "orders-db", name: "My Custom DB", ref: "./infra/postgres-archmap.yml" }]; + const result = resolveInfraRefsLocal(decls, tmpDir); + expect(result![0].name).toBe("My Custom DB"); + expect(result![0].technology).toBe("PostgreSQL 14"); + }); + + it("skips nodes where ref cannot be resolved and name/type are missing", () => { + const decls = [{ id: "missing", ref: "./infra/does-not-exist.yml" }]; + const result = resolveInfraRefsLocal(decls, tmpDir); + expect(result).toBeUndefined(); + }); + + it("includes fully inline nodes without a ref", () => { + const decls = [{ id: "kafka", name: "Apache Kafka", type: "queue" as const }]; + const result = resolveInfraRefsLocal(decls, tmpDir); + expect(result).toHaveLength(1); + expect(result![0].name).toBe("Apache Kafka"); + }); + + it("skips inline nodes missing name or type", () => { + const decls = [{ id: "incomplete" }]; + const result = resolveInfraRefsLocal(decls, tmpDir); + expect(result).toBeUndefined(); + }); + + it("resolves multiple nodes including one ref and one inline", () => { + writeFileSync(join(tmpDir, "infra", "postgres-archmap.yml"), ` +id: orders-db +name: Orders DB +type: database +`); + const decls = [ + { id: "orders-db", ref: "./infra/postgres-archmap.yml" }, + { id: "kafka", name: "Apache Kafka", type: "queue" as const }, + ]; + const result = resolveInfraRefsLocal(decls, tmpDir); + expect(result).toHaveLength(2); + expect(result!.map((n) => n.id)).toEqual(["orders-db", "kafka"]); + }); + + it("ignores cross-repo refs (non-relative paths)", () => { + const decls = [{ id: "kafka", ref: "shared-infra/kafka-archmap.yml" }]; + const result = resolveInfraRefsLocal(decls, tmpDir); + // cross-repo ref, no inline name/type → skipped + expect(result).toBeUndefined(); + }); +}); diff --git a/packages/scanner/src/config.ts b/packages/scanner/src/config.ts new file mode 100644 index 0000000..32e22bc --- /dev/null +++ b/packages/scanner/src/config.ts @@ -0,0 +1,88 @@ +import path from "path"; +import fs from "fs"; +import { load as loadYaml } from "js-yaml"; +import type { RepoConfig, InfraNode, InfraType, RawInfraDecl } from "./types"; + +export function parseRepoConfig(text: string): RepoConfig | null { + try { + const parsed = loadYaml(text) as Record; + return { + name: typeof parsed.name === "string" ? parsed.name : undefined, + description: typeof parsed.description === "string" ? parsed.description : undefined, + skip: parsed.skip === true, + type: (["service", "library", "tool", "infra"] as const).includes(parsed.type as any) + ? (parsed.type as RepoConfig["type"]) : undefined, + domain: typeof parsed.domain === "string" ? parsed.domain + : typeof parsed.group === "string" ? parsed.group : undefined, + depends_on: Array.isArray(parsed.depends_on) + ? (parsed.depends_on as unknown[]).filter((x): x is string => typeof x === "string") + : [], + tags: Array.isArray(parsed.tags) + ? (parsed.tags as unknown[]).filter((x): x is string => typeof x === "string") + : [], + node: (() => { + const n = parsed.node as Record | undefined; + if (!n || typeof n !== "object") return undefined; + return { + color: typeof n.color === "string" ? n.color : undefined, + icon: typeof n.icon === "string" ? n.icon : undefined, + badge: typeof n.badge === "string" ? n.badge : undefined, + description: typeof n.description === "string" ? n.description : undefined, + }; + })(), + infrastructure: (() => { + const raw = parsed.infrastructure; + if (!Array.isArray(raw)) return undefined; + const validTypes = new Set(["database", "queue", "cache", "external"]); + const result: RawInfraDecl[] = []; + for (const entry of raw as unknown[]) { + if (!entry || typeof entry !== "object") continue; + const e = entry as Record; + if (typeof e.id !== "string") continue; + const t = validTypes.has(e.type as string) ? (e.type as InfraType) : undefined; + result.push({ + id: e.id, + ...(typeof e.name === "string" && { name: e.name }), + ...(t && { type: t }), + ...(typeof e.technology === "string" && { technology: e.technology }), + ...(typeof e.description === "string" && { description: e.description }), + ...(typeof e.ref === "string" && { ref: e.ref }), + }); + } + return result.length > 0 ? result : undefined; + })(), + }; + } catch { + return null; + } +} + +export function resolveLocalInfraRef(ref: string, repoDir: string): Partial | null { + if (!ref.startsWith("./") && !ref.startsWith("../")) return null; + try { + const p = path.resolve(repoDir, ref); + const text = fs.readFileSync(p, "utf-8"); + return loadYaml(text) as Partial; + } catch { return null; } +} + +export function resolveInfraRefsLocal(decls: RawInfraDecl[] | undefined, repoDir: string): InfraNode[] | undefined { + if (!decls?.length) return undefined; + const validTypes = new Set(["database", "queue", "cache", "external"]); + const result: InfraNode[] = []; + for (const decl of decls) { + let merged: Record = { ...decl }; + if (decl.ref) { + const fromRef = resolveLocalInfraRef(decl.ref, repoDir); + if (fromRef) merged = { ...fromRef, ...decl }; // inline wins + } + if (typeof merged.name === "string" && typeof merged.type === "string" && validTypes.has(merged.type)) { + result.push(merged as unknown as InfraNode); + } else if (!merged.ref) { + // No ref and missing required fields — skip silently + } else { + console.warn(` Infra node "${decl.id}" missing name/type after ref resolution — skipped`); + } + } + return result.length > 0 ? result : undefined; +} diff --git a/packages/scanner/src/index.ts b/packages/scanner/src/index.ts index 4150c0f..f1da9a4 100644 --- a/packages/scanner/src/index.ts +++ b/packages/scanner/src/index.ts @@ -7,43 +7,57 @@ import type { AnalyzedService } from "@archmap/graph-builder"; import { createDeployer } from "@archmap/deployers"; import { analyzeSpringBoot } from "@archmap/analyzer"; import type { FileContent } from "@archmap/analyzer"; -import type { GraphData, RepoConfig } from "./types"; +import type { GraphData, RepoConfig, InfraNode, RawInfraDecl } from "./types"; +import { parseRepoConfig, resolveInfraRefsLocal } from "./config"; const SCANNER_SOURCE = process.env.SCANNER_SOURCE ?? "github"; // "github" | "local" -// ─── Shared helpers ─────────────────────────────────────────────────────────── +// ─── Infra ref resolution (GitHub-only helpers) ──────────────────────────── -function parseRepoConfig(text: string): RepoConfig | null { +async function resolveGithubInfraRef(ref: string, octokit: Octokit, owner: string): Promise | null> { + const match = ref.match(/^([^/]+)\/(.+?)(?:\?ref=(.+))?$/); + if (!match) return null; + const [, repo, filePath, gitRef = "HEAD"] = match; try { - const parsed = loadYaml(text) as Record; - return { - name: typeof parsed.name === "string" ? parsed.name : undefined, - description: typeof parsed.description === "string" ? parsed.description : undefined, - skip: parsed.skip === true, - type: (["service", "library", "tool", "infra"] as const).includes(parsed.type as any) - ? (parsed.type as RepoConfig["type"]) : undefined, - domain: typeof parsed.domain === "string" ? parsed.domain - : typeof parsed.group === "string" ? parsed.group : undefined, - depends_on: Array.isArray(parsed.depends_on) - ? (parsed.depends_on as unknown[]).filter((x): x is string => typeof x === "string") - : [], - tags: Array.isArray(parsed.tags) - ? (parsed.tags as unknown[]).filter((x): x is string => typeof x === "string") - : [], - node: (() => { - const n = parsed.node as Record | undefined; - if (!n || typeof n !== "object") return undefined; - return { - color: typeof n.color === "string" ? n.color : undefined, - icon: typeof n.icon === "string" ? n.icon : undefined, - badge: typeof n.badge === "string" ? n.badge : undefined, - description: typeof n.description === "string" ? n.description : undefined, - }; - })(), - }; - } catch { - return null; + const { data } = await octokit.repos.getContent({ owner, repo, path: filePath, ref: gitRef }); + if (!("content" in data)) return null; + return loadYaml(Buffer.from(data.content, "base64").toString("utf-8")) as Partial; + } catch { return null; } +} + +async function resolveInfraRefsGithub( + decls: RawInfraDecl[] | undefined, + octokit: Octokit, + owner: string, + repoName: string +): Promise { + if (!decls?.length) return undefined; + const validTypes = new Set(["database", "queue", "cache", "external"]); + const result: InfraNode[] = []; + for (const decl of decls) { + let merged: Record = { ...decl }; + if (decl.ref) { + if (decl.ref.startsWith("./") || decl.ref.startsWith("../")) { + // Same-repo ref — fetch from GitHub + const relPath = decl.ref.startsWith("./") ? decl.ref.slice(2) : decl.ref; + try { + const { data } = await octokit.repos.getContent({ owner, repo: repoName, path: relPath }); + if ("content" in data) { + const fromRef = loadYaml(Buffer.from(data.content, "base64").toString("utf-8")) as Partial; + merged = { ...fromRef, ...decl }; + } + } catch {} + } else { + // Cross-repo ref + const fromRef = await resolveGithubInfraRef(decl.ref, octokit, owner); + if (fromRef) merged = { ...fromRef, ...decl }; + } + } + if (typeof merged.name === "string" && typeof merged.type === "string" && validTypes.has(merged.type)) { + result.push(merged as unknown as InfraNode); + } } + return result.length > 0 ? result : undefined; } function buildAnalyzedService( @@ -237,7 +251,9 @@ async function runLocalScan(servicesDir: string): Promise<{ services: AnalyzedSe analyzed = buildStubService(repoName, repoUrl, description, language); } + const resolvedInfra = resolveInfraRefsLocal(config?.infrastructure, repoDir); applyConfigOverrides(analyzed, config); + if (resolvedInfra) analyzed.infrastructure = resolvedInfra; services.push(analyzed); } @@ -287,7 +303,7 @@ async function fetchGithubSourceFiles( ): Promise { try { const { data } = await octokit.git.getTree({ owner, repo, tree_sha: branch, recursive: "1" }); - if (data.truncated) console.warn(` ⚠ Tree truncated for ${repo}`); + if (data.truncated) console.warn(` Tree truncated for ${repo}`); const files = data.tree .filter((f) => f.type === "blob" && /^src\/main\/.*\.(java|kt)$/.test(f.path ?? "")) .slice(0, 200); @@ -338,7 +354,9 @@ async function runGithubScan(): Promise<{ services: AnalyzedService[]; repoCount analyzed = buildStubService(repo.name, repo.html_url, repo.description ?? "", repo.language ?? "unknown"); } + const resolvedInfra = await resolveInfraRefsGithub(config?.infrastructure, octokit, org, repo.name); applyConfigOverrides(analyzed, config); + if (resolvedInfra) analyzed.infrastructure = resolvedInfra; services.push(analyzed); } diff --git a/packages/scanner/src/types.ts b/packages/scanner/src/types.ts index b10f140..c01ba71 100644 --- a/packages/scanner/src/types.ts +++ b/packages/scanner/src/types.ts @@ -40,6 +40,27 @@ export interface ServiceFunction { export type ServiceType = "service" | "library" | "tool" | "infra"; +export type InfraType = "database" | "queue" | "cache" | "external"; + +export interface InfraNode { + id: string; + name: string; + type: InfraType; + technology?: string; + description?: string; + ref?: string; +} + +// Partial declaration as it appears in archmap.yml — name/type may come from a ref file +export interface RawInfraDecl { + id: string; + name?: string; + type?: InfraType; + technology?: string; + description?: string; + ref?: string; +} + // ─── Kafka (mirrored from @archmap/analyzer, kept local to avoid circular dep) ─ export interface KafkaProducer { @@ -73,6 +94,7 @@ export interface RepoConfig { depends_on?: string[]; tags?: string[]; node?: NodeConfig; + infrastructure?: RawInfraDecl[]; } export interface AnalyzedService { @@ -92,6 +114,7 @@ export interface AnalyzedService { kafkaProducers?: KafkaProducer[]; kafkaConsumers?: KafkaConsumer[]; nodeConfig?: NodeConfig; + infrastructure?: InfraNode[]; } export interface ViewNode { @@ -99,6 +122,9 @@ export interface ViewNode { type?: string; position: { x: number; y: number }; data: Record; + parentId?: string; + extent?: "parent"; + style?: Record; } export interface ViewEdge { @@ -107,6 +133,8 @@ export interface ViewEdge { target: string; label?: string; animated?: boolean; + markerEnd?: string; + type?: string; } export interface GraphView { @@ -126,5 +154,6 @@ export interface GraphData { serviceFlow: GraphView; dataFlow: GraphView; functionFlow: GraphView; + containerDiagram: GraphView; }; } diff --git a/packages/scanner/tsconfig.json b/packages/scanner/tsconfig.json index 52212e5..08535a0 100644 --- a/packages/scanner/tsconfig.json +++ b/packages/scanner/tsconfig.json @@ -10,5 +10,6 @@ "resolveJsonModule": true, "skipLibCheck": true }, - "include": ["src"] + "include": ["src"], + "exclude": ["src/__tests__"] }