diff --git a/backend/alembic/versions/add_agent_bootstrap_fields.py b/backend/alembic/versions/add_agent_bootstrap_fields.py new file mode 100644 index 000000000..3c6a94202 --- /dev/null +++ b/backend/alembic/versions/add_agent_bootstrap_fields.py @@ -0,0 +1,31 @@ +"""Add bootstrap_content + capability_bullets to agent templates. + +Revision ID: add_agent_bootstrap_fields +Revises: increase_api_key_length +Create Date: 2026-04-23 + +Supports the Talent Market (capability_bullets fuel the template cards) and +the per-user onboarding ritual (bootstrap_content is the founder-facing +system prompt). The per-agent Agent.bootstrapped flag that earlier drafts +carried has been dropped in favour of the per-user agent_user_onboardings +junction table — see the add_agent_user_onboardings migration. +""" +from typing import Sequence, Union + +from alembic import op + + +revision: str = 'add_agent_bootstrap_fields' +down_revision: Union[str, None] = 'increase_api_key_length' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + op.execute("ALTER TABLE agent_templates ADD COLUMN IF NOT EXISTS capability_bullets JSON DEFAULT '[]'::json") + op.execute("ALTER TABLE agent_templates ADD COLUMN IF NOT EXISTS bootstrap_content TEXT") + + +def downgrade() -> None: + op.execute("ALTER TABLE agent_templates DROP COLUMN IF EXISTS bootstrap_content") + op.execute("ALTER TABLE agent_templates DROP COLUMN IF EXISTS capability_bullets") diff --git a/backend/alembic/versions/add_agent_user_onboardings.py b/backend/alembic/versions/add_agent_user_onboardings.py new file mode 100644 index 000000000..32fc6f922 --- /dev/null +++ b/backend/alembic/versions/add_agent_user_onboardings.py @@ -0,0 +1,58 @@ +"""Per-(user, agent) onboarding junction table + drop legacy bootstrapped flag. + +Revision ID: add_agent_user_onboardings +Revises: add_tenant_default_model +Create Date: 2026-04-24 + +A row in agent_user_onboardings records that a user has been onboarded to a +specific agent. Its presence is the authoritative signal that onboarding +should NOT fire again for that pair — regardless of whether the user +actually finished the introduction flow. + +Backfill: every (agent_id, user_id) pair that has any historical chat message +is inserted with onboarded_at = the earliest message. Existing users thus +never get retroactively re-onboarded. + +Also drops the short-lived Agent.bootstrapped column that an earlier draft +of this feature introduced — the per-user model replaces it entirely. The +drop is idempotent so fresh installs (which no longer add the column in +add_agent_bootstrap_fields) aren't affected. +""" +from typing import Sequence, Union + +from alembic import op + + +revision: str = 'add_agent_user_onboardings' +down_revision: Union[str, None] = 'add_tenant_default_model' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + op.execute(""" + CREATE TABLE IF NOT EXISTS agent_user_onboardings ( + agent_id UUID NOT NULL REFERENCES agents(id) ON DELETE CASCADE, + user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + onboarded_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + PRIMARY KEY (agent_id, user_id) + ) + """) + + # Backfill from chat history: any pair that has ever exchanged messages is + # considered already onboarded — don't re-greet established relationships. + op.execute(""" + INSERT INTO agent_user_onboardings (agent_id, user_id, onboarded_at) + SELECT agent_id, user_id, MIN(created_at) + FROM chat_messages + WHERE agent_id IS NOT NULL AND user_id IS NOT NULL + GROUP BY agent_id, user_id + ON CONFLICT DO NOTHING + """) + + # Clean up the abandoned per-agent flag from the previous design iteration. + op.execute("ALTER TABLE agents DROP COLUMN IF EXISTS bootstrapped") + + +def downgrade() -> None: + op.execute("DROP TABLE IF EXISTS agent_user_onboardings") diff --git a/backend/alembic/versions/add_tenant_default_model.py b/backend/alembic/versions/add_tenant_default_model.py new file mode 100644 index 000000000..374914ed2 --- /dev/null +++ b/backend/alembic/versions/add_tenant_default_model.py @@ -0,0 +1,46 @@ +"""Add Tenant.default_model_id + backfill per-tenant to earliest enabled model. + +Revision ID: add_tenant_default_model +Revises: add_agent_bootstrap_fields +Create Date: 2026-04-23 + +Each tenant gets a default_model_id pointing at its first enabled LLM model +(by created_at ascending). Tenants with no enabled models stay NULL; the admin +picks one when they finally add a model (handled at the API layer). +""" +from typing import Sequence, Union + +from alembic import op + + +revision: str = 'add_tenant_default_model' +down_revision: Union[str, None] = 'add_agent_bootstrap_fields' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # Add the nullable FK column. ON DELETE SET NULL — if a model is deleted, + # tenants that pointed at it revert to "no default." + op.execute(""" + ALTER TABLE tenants + ADD COLUMN IF NOT EXISTS default_model_id UUID + REFERENCES llm_models(id) ON DELETE SET NULL + """) + + # Backfill: for each tenant, pick its earliest-created enabled model. + op.execute(""" + UPDATE tenants t + SET default_model_id = m.id + FROM ( + SELECT DISTINCT ON (tenant_id) tenant_id, id + FROM llm_models + WHERE enabled = TRUE AND tenant_id IS NOT NULL + ORDER BY tenant_id, created_at ASC + ) m + WHERE t.id = m.tenant_id AND t.default_model_id IS NULL + """) + + +def downgrade() -> None: + op.execute("ALTER TABLE tenants DROP COLUMN IF EXISTS default_model_id") diff --git a/backend/app/api/agents.py b/backend/app/api/agents.py index 0f8f1bc41..e283b4cb7 100644 --- a/backend/app/api/agents.py +++ b/backend/app/api/agents.py @@ -127,11 +127,41 @@ async def list_templates( "soul_template": t.soul_template, "default_skills": t.default_skills, "default_autonomy_policy": t.default_autonomy_policy, + "capability_bullets": t.capability_bullets or [], + "has_bootstrap": bool(t.bootstrap_content), } for t in templates ] +async def _agent_to_out( + db: AsyncSession, + agent: Agent, + viewer_id: uuid.UUID, +) -> AgentOut: + """Serialize one agent with ``onboarded_for_me`` for the given viewer.""" + from app.services.onboarding import is_onboarded + model = AgentOut.model_validate(agent) + model.onboarded_for_me = await is_onboarded(db, agent.id, viewer_id) + return model + + +async def _agents_to_out( + db: AsyncSession, + agents: list[Agent], + viewer_id: uuid.UUID, +) -> list[AgentOut]: + """List variant that fetches all junction rows in one query.""" + from app.services.onboarding import onboarded_agent_ids + onboarded = await onboarded_agent_ids(db, viewer_id, [a.id for a in agents]) + out: list[AgentOut] = [] + for a in agents: + model = AgentOut.model_validate(a) + model.onboarded_for_me = a.id in onboarded + out.append(model) + return out + + @router.get("/", response_model=list[AgentOut]) async def list_agents( tenant_id: uuid.UUID | None = None, @@ -153,7 +183,7 @@ async def list_agents( needs_flush = True if needs_flush: await db.commit() - return [AgentOut.model_validate(a) for a in agents] + return await _agents_to_out(db, list(agents), current_user.id) # agent_admin sees their own created agents + permitted # member sees only permitted @@ -188,7 +218,7 @@ async def list_agents( needs_flush = True if needs_flush: await db.commit() - return [AgentOut.model_validate(a) for a in agents] + return await _agents_to_out(db, list(agents), current_user.id) @router.post("/", status_code=status.HTTP_201_CREATED) @@ -220,6 +250,7 @@ async def create_agent( default_min_poll = 5 default_webhook_rate = 5 default_heartbeat_interval = 240 # model default + tenant_default_model_id = None if target_tenant_id: from app.models.tenant import Tenant tenant_result = await db.execute(select(Tenant).where(Tenant.id == target_tenant_id)) @@ -229,10 +260,14 @@ async def create_agent( default_max_triggers = tenant.default_max_triggers or 20 default_min_poll = tenant.min_poll_interval_floor or 5 default_webhook_rate = tenant.max_webhook_rate_ceiling or 5 + tenant_default_model_id = tenant.default_model_id # Enforce heartbeat floor: new agents must respect company minimum if tenant.min_heartbeat_interval_minutes and tenant.min_heartbeat_interval_minutes > default_heartbeat_interval: default_heartbeat_interval = tenant.min_heartbeat_interval_minutes + # If the caller didn't pick a model, fall back to the tenant's default. + effective_primary_model_id = data.primary_model_id or tenant_default_model_id + agent = Agent( name=data.name, role_description=data.role_description, @@ -241,7 +276,7 @@ async def create_agent( creator_id=current_user.id, tenant_id=target_tenant_id, agent_type=data.agent_type or "native", - primary_model_id=data.primary_model_id, + primary_model_id=effective_primary_model_id, fallback_model_id=data.fallback_model_id, max_tokens_per_day=data.max_tokens_per_day, max_tokens_per_month=data.max_tokens_per_month, @@ -290,7 +325,8 @@ async def create_agent( agent.api_key_hash = hashlib.sha256(raw_key.encode()).hexdigest() agent.status = "idle" await db.commit() - out = AgentOut.model_validate(agent).model_dump() + out_model = await _agent_to_out(db, agent, current_user.id) + out = out_model.model_dump() out["api_key"] = raw_key # Return once on creation return out @@ -340,7 +376,7 @@ async def create_agent( await agent_manager.start_container(db, agent) await db.flush() - return AgentOut.model_validate(agent) + return await _agent_to_out(db, agent, current_user.id) @router.get("/{agent_id}") @@ -354,7 +390,8 @@ async def get_agent( # Lazy reset token counters if await _lazy_reset_token_counters(agent, db): await db.commit() - out = AgentOut.model_validate(agent).model_dump() + out_model = await _agent_to_out(db, agent, current_user.id) + out = out_model.model_dump() out["access_level"] = access_level # Resolve creator username (one extra query, only on detail page). @@ -549,7 +586,8 @@ async def update_agent( p.avatar_url = agent.avatar_url await db.flush() - out = AgentOut.model_validate(agent).model_dump() + out_model = await _agent_to_out(db, agent, current_user.id) + out = out_model.model_dump() if clamped_fields: out["_clamped_fields"] = clamped_fields return out @@ -672,7 +710,7 @@ async def start_agent( from app.services.agent_manager import agent_manager await agent_manager.start_container(db, agent) await db.flush() - return AgentOut.model_validate(agent) + return await _agent_to_out(db, agent, current_user.id) @router.post("/{agent_id}/stop", response_model=AgentOut) @@ -689,7 +727,7 @@ async def stop_agent( from app.services.agent_manager import agent_manager await agent_manager.stop_container(agent) await db.flush() - return AgentOut.model_validate(agent) + return await _agent_to_out(db, agent, current_user.id) # ─── Agent-Level Approvals ────────────────────────────── diff --git a/backend/app/api/chat_sessions.py b/backend/app/api/chat_sessions.py index 22a396fe2..d16657b9f 100644 --- a/backend/app/api/chat_sessions.py +++ b/backend/app/api/chat_sessions.py @@ -205,10 +205,12 @@ async def list_sessions( total_counts[row[0]] = int(row[2] or 0) for session in sessions: - user_msg_count = user_msg_counts.get(str(session.id), 0) - if user_msg_count == 0: - continue # hide empty or orphan sessions + # Hide truly empty / orphan sessions. Onboarding sessions have zero + # user messages (the agent greets first) but do have assistant + # turns, so count ALL messages here — not just user ones. count = total_counts.get(str(session.id), 0) + if count == 0: + continue out.append(SessionOut( id=str(session.id), agent_id=str(session.agent_id), diff --git a/backend/app/api/enterprise.py b/backend/app/api/enterprise.py index efa7fd2d3..f3f461c52 100644 --- a/backend/app/api/enterprise.py +++ b/backend/app/api/enterprise.py @@ -176,9 +176,45 @@ async def add_llm_model( ) db.add(model) await db.flush() + + # First enabled model for a tenant becomes that tenant's default. + # Admins can later reassign via PATCH /llm-models/{id}/set-default. + if model.tenant_id and model.enabled: + from app.models.tenant import Tenant + t_result = await db.execute(select(Tenant).where(Tenant.id == model.tenant_id)) + tenant = t_result.scalar_one_or_none() + if tenant and tenant.default_model_id is None: + tenant.default_model_id = model.id + return LLMModelOut.model_validate(model) +@router.post("/llm-models/{model_id}/set-default", status_code=status.HTTP_204_NO_CONTENT) +async def set_default_llm_model( + model_id: uuid.UUID, + current_user: User = Depends(get_current_admin), + db: AsyncSession = Depends(get_db), +): + """Mark this model as the tenant's default for new agents.""" + result = await db.execute(select(LLMModel).where(LLMModel.id == model_id)) + model = result.scalar_one_or_none() + if not model: + raise HTTPException(status_code=404, detail="Model not found") + if not model.tenant_id: + raise HTTPException(status_code=400, detail="Model is not tenant-scoped") + if not model.enabled: + raise HTTPException(status_code=400, detail="Model is disabled") + + from app.models.tenant import Tenant + t_result = await db.execute(select(Tenant).where(Tenant.id == model.tenant_id)) + tenant = t_result.scalar_one_or_none() + if not tenant: + raise HTTPException(status_code=404, detail="Tenant not found") + + tenant.default_model_id = model.id + await db.commit() + + @router.delete("/llm-models/{model_id}", status_code=status.HTTP_204_NO_CONTENT) async def remove_llm_model( model_id: uuid.UUID, diff --git a/backend/app/api/tenants.py b/backend/app/api/tenants.py index f81e3fa11..31a53ff44 100644 --- a/backend/app/api/tenants.py +++ b/backend/app/api/tenants.py @@ -38,6 +38,7 @@ class TenantOut(BaseModel): sso_enabled: bool = False sso_domain: str | None = None a2a_async_enabled: bool = False + default_model_id: uuid.UUID | None = None created_at: datetime | None = None model_config = {"from_attributes": True} @@ -412,6 +413,24 @@ async def list_tenants( return [TenantOut.model_validate(t) for t in result.scalars().all()] +@router.get("/me", response_model=TenantOut) +async def get_my_tenant( + current_user: User = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +): + """Return the current user's own tenant. Any authenticated member can read + this — the wizard and the chat model switcher need default_model_id, which + shouldn't require admin privileges. + """ + if not current_user.tenant_id: + raise HTTPException(status_code=404, detail="User is not in a tenant") + result = await db.execute(select(Tenant).where(Tenant.id == current_user.tenant_id)) + tenant = result.scalar_one_or_none() + if not tenant: + raise HTTPException(status_code=404, detail="Tenant not found") + return TenantOut.model_validate(tenant) + + @router.get("/{tenant_id}", response_model=TenantOut) async def get_tenant( tenant_id: uuid.UUID, diff --git a/backend/app/api/websocket.py b/backend/app/api/websocket.py index a6698c5f0..913b7124a 100644 --- a/backend/app/api/websocket.py +++ b/backend/app/api/websocket.py @@ -172,6 +172,10 @@ async def websocket_chat( role_description = agent.role_description or "" welcome_message = agent.welcome_message or "" ctx_size = agent.context_window_size or 100 + # Captured for onboarding lookups — the DB-bound `agent` goes out + # of scope when this session block closes. + agent_snapshot = agent + user_display_name = (user.display_name or "").strip() or "there" logger.info(f"[WS] Agent: {agent_name}, type: {agent_type}, model_id: {agent.primary_model_id}, ctx: {ctx_size}") # Load the agent's primary model @@ -343,10 +347,58 @@ async def websocket_chat( content = data.get("content", "") display_content = data.get("display_content", "") # User-facing display text file_name = data.get("file_name", "") # Original file name for attachment display - logger.info(f"[WS] Received: {content[:50]}") - - if not content: + override_model_id = data.get("model_id") # Optional per-turn model switcher + # When the frontend fires an onboarding trigger for a (user, agent) + # pair that hasn't met before, it tags the message so the server can + # (a) skip persisting a user-side turn and (b) not echo any user + # bubble — the agent opens the conversation itself. + is_onboarding_trigger = data.get("kind") == "onboarding_trigger" + logger.info(f"[WS] Received: {content[:50]}" + (" [onboarding]" if is_onboarding_trigger else "")) + + if not content and not is_onboarding_trigger: continue + if is_onboarding_trigger: + # Guard against stale triggers. A frontend with a cached + # agent query from before the ritual completed can fire an + # onboarding_trigger on a new session even though the pair + # is already locked. In that case the resolver would return + # no prompt, but the placeholder "Please begin the + # onboarding" would still reach the LLM and the agent would + # dutifully restart the ritual. Short-circuit here, emit an + # event so the frontend refreshes its cache, and move on. + from app.services.onboarding import is_onboarded as _is_onboarded + async with async_session() as _gdb: + if await _is_onboarded(_gdb, agent_id, user_id): + logger.info("[WS] Onboarding trigger ignored — pair already onboarded") + await websocket.send_json({ + "type": "onboarded", + "agent_id": str(agent_id), + }) + continue + # Minimal placeholder so the LLM has a valid user turn to anchor + # its greeting. The onboarding system prompt is what actually + # drives the reply; this text is never shown or saved. + content = "Please begin the onboarding." + + # Per-message model override — the chat dropdown lets users pick a + # different tenant-scoped model for this session. Override only the + # current turn; nothing is persisted, and it resets when Chat.tsx + # remounts. + effective_llm_model = llm_model + if override_model_id: + try: + _ovr_uuid = uuid.UUID(str(override_model_id)) + async with async_session() as _mdb: + _mr = await _mdb.execute(select(LLMModel).where(LLMModel.id == _ovr_uuid)) + _ovr = _mr.scalar_one_or_none() + if _ovr and _ovr.enabled and _ovr.tenant_id and ( + not llm_model or _ovr.tenant_id == llm_model.tenant_id + ): + effective_llm_model = _ovr + else: + logger.warning(f"[WS] model override {override_model_id} rejected (missing/disabled/tenant mismatch)") + except (ValueError, TypeError): + logger.warning(f"[WS] model override {override_model_id!r} is not a valid UUID") # ── Quota checks ── try: @@ -369,6 +421,10 @@ async def websocket_chat( # Save user message to DB. # + # Bootstrap trigger: the user never sent anything — the frontend + # fired a synthetic turn so the agent could greet first. Don't + # persist and don't title the session from it. + # # If the LLM content contains [image_data:...] markers, persist the full # payload so subsequent turns can still forward the image to the model. has_image_marker = "[image_data:" in content @@ -378,35 +434,51 @@ async def websocket_chat( saved_content = display_content if display_content else content if file_name: saved_content = f"[file:{file_name}]\n{saved_content}" - async with async_session() as db: - user_msg = ChatMessage( - agent_id=agent_id, - user_id=user_id, - role="user", - content=saved_content, - conversation_id=conv_id, - ) - db.add(user_msg) - # Update session last_message_at + auto-title on first message - from app.models.chat_session import ChatSession as _CS - from datetime import datetime as _dt2, timezone as _tz2 - _now = _dt2.now(_tz2.utc) - _sess_r = await db.execute( - select(_CS).where(_CS.id == uuid.UUID(conv_id)) - ) - _sess = _sess_r.scalar_one_or_none() - if _sess: - _sess.last_message_at = _now - if not history_messages and _sess.title.startswith("Session "): - # Use display_content for title (avoids raw base64/markers) - title_src = display_content if display_content else content - # Clean up common prefixes from image/file messages - clean_title = title_src.replace("[图片] ", "📷 ").replace("[image_data:", "").strip() - if file_name and not clean_title: - clean_title = f"📎 {file_name}" - _sess.title = clean_title[:40] if clean_title else content[:40] - await db.commit() - logger.info("[WS] User message saved") + if is_onboarding_trigger: + logger.info("[WS] Onboarding trigger — skipping user-message persistence") + # Title this session "Onboarding" up front so it's identifiable + # in the session list even before the user has typed anything. + # The auto-title logic in the normal path only overwrites titles + # that start with "Session ", so this stays sticky. + async with async_session() as _sdb: + from app.models.chat_session import ChatSession as _CS + _sr = await _sdb.execute( + select(_CS).where(_CS.id == uuid.UUID(conv_id)) + ) + _s = _sr.scalar_one_or_none() + if _s and _s.title.startswith("Session "): + _s.title = "Onboarding" + await _sdb.commit() + else: + async with async_session() as db: + user_msg = ChatMessage( + agent_id=agent_id, + user_id=user_id, + role="user", + content=saved_content, + conversation_id=conv_id, + ) + db.add(user_msg) + # Update session last_message_at + auto-title on first message + from app.models.chat_session import ChatSession as _CS + from datetime import datetime as _dt2, timezone as _tz2 + _now = _dt2.now(_tz2.utc) + _sess_r = await db.execute( + select(_CS).where(_CS.id == uuid.UUID(conv_id)) + ) + _sess = _sess_r.scalar_one_or_none() + if _sess: + _sess.last_message_at = _now + if not history_messages and _sess.title.startswith("Session "): + # Use display_content for title (avoids raw base64/markers) + title_src = display_content if display_content else content + # Clean up common prefixes from image/file messages + clean_title = title_src.replace("[图片] ", "📷 ").replace("[image_data:", "").strip() + if file_name and not clean_title: + clean_title = f"📎 {file_name}" + _sess.title = clean_title[:40] if clean_title else content[:40] + await db.commit() + logger.info("[WS] User message saved") # ── OpenClaw routing: insert into gateway_messages instead of LLM ── if agent_type == "openclaw": @@ -440,17 +512,42 @@ async def websocket_chat( thinking_content = [] # Call LLM with streaming - if llm_model: + if effective_llm_model: try: - logger.info(f"[WS] Calling LLM {llm_model.model} (streaming)...") + logger.info(f"[WS] Calling LLM {effective_llm_model.model} (streaming)...") # Accumulate partial content for abort handling partial_chunks: list[str] = [] - + + # Flipped to True inside _call_with_failover when an + # onboarding prompt was injected for this turn. The first + # streamed chunk then commits the junction-table row so + # future sessions see this user as already onboarded, even + # if they disconnect before the greeting finishes. + needs_onboarding_mark = False + onboarding_mark_done = False + async def stream_to_ws(text: str): """Send each chunk to client in real-time.""" + nonlocal onboarding_mark_done partial_chunks.append(text) await websocket.send_json({"type": "chunk", "content": text}) + if needs_onboarding_mark and not onboarding_mark_done: + onboarding_mark_done = True + try: + from app.services.onboarding import mark_onboarded + async with async_session() as _ob_db: + await mark_onboarded(_ob_db, agent_id, user_id) + # Tell the frontend to refresh its cached agent + # record so subsequent sessions (or other open + # tabs) see onboarded_for_me=true and skip the + # kickoff effect. + await websocket.send_json({ + "type": "onboarded", + "agent_id": str(agent_id), + }) + except Exception as _ob_err: + logger.warning(f"[WS] mark_onboarded failed (non-fatal): {_ob_err}") async def tool_call_to_ws(data: dict): """Send tool call info to client and persist completed ones.""" @@ -512,6 +609,8 @@ async def thinking_to_ws(text: str): # Run call_llm_with_failover as a cancellable task async def _call_with_failover(): + nonlocal needs_onboarding_mark + async def _on_failover(reason: str): await websocket.send_json({"type": "info", "content": f"Primary model error, {reason}"}) @@ -520,8 +619,31 @@ async def _on_failover(reason: str): while _truncated and _truncated[0].get("role") == "tool": _truncated.pop(0) + # Per-(user, agent) onboarding: if the junction table + # has no row for this pair yet, prepend a system prompt. + # The prompt is turn-aware — on the greeting turn it + # tells the agent to greet + ask one question; on the + # deliverable turn it tells the agent to drop question + # mode and immediately produce a concrete output. The + # junction row is only committed on the deliverable + # turn (see lock_on_first_chunk below), so the full + # two-step ritual stays guarded. + from app.services.onboarding import resolve_onboarding_prompt + try: + async with async_session() as _ob_db: + _onb = await resolve_onboarding_prompt( + _ob_db, agent_snapshot, user_id, + user_name=user_display_name, + ) + if _onb: + _truncated = [{"role": "system", "content": _onb.prompt}] + _truncated + if _onb.lock_on_first_chunk: + needs_onboarding_mark = True + except Exception as _onb_err: + logger.warning(f"[WS] Onboarding prompt resolve failed (non-fatal): {_onb_err}") + return await call_llm_with_failover( - primary_model=llm_model, + primary_model=effective_llm_model, fallback_model=fallback_llm_model, messages=_truncated, agent_name=agent_name, @@ -532,7 +654,7 @@ async def _on_failover(reason: str): on_chunk=stream_to_ws, on_tool_call=tool_call_to_ws, on_thinking=thinking_to_ws, - supports_vision=getattr(llm_model, 'supports_vision', False), + supports_vision=getattr(effective_llm_model, 'supports_vision', False), on_failover=_on_failover, ) @@ -576,7 +698,9 @@ async def _on_failover(reason: str): assistant_response = await llm_task logger.info(f"[WS] LLM response: {assistant_response[:80]}") - # Update last_active_at + # Update last_active_at. The onboarding lock is handled + # earlier in stream_to_ws on the first streamed chunk, so + # there's nothing to reconcile here anymore. from datetime import datetime, timezone as tz async with async_session() as _db: from app.models.agent import Agent as AgentModel diff --git a/backend/app/models/agent.py b/backend/app/models/agent.py index 8cb129f7a..9b072dde9 100644 --- a/backend/app/models/agent.py +++ b/backend/app/models/agent.py @@ -161,11 +161,42 @@ class AgentTemplate(Base): soul_template: Mapped[str] = mapped_column(Text, default="") default_skills: Mapped[list] = mapped_column(JSON, default=[]) default_autonomy_policy: Mapped[dict] = mapped_column(JSON, default={}) + # Talent Market card: 2-4 short capability bullets shown under the role + capability_bullets: Mapped[list] = mapped_column(JSON, default=[]) + # Founding onboarding ritual. Used as the system prompt when the very first + # human opens a chat with an agent created from this template — it guides + # the agent to collect project context, introduce itself, and suggest a + # first task. Every subsequent user meets the agent via a simpler built-in + # welcoming prompt (see app.services.onboarding), not this content. + bootstrap_content: Mapped[str | None] = mapped_column(Text, default=None) is_builtin: Mapped[bool] = mapped_column(default=False) created_by: Mapped[uuid.UUID | None] = mapped_column(UUID(as_uuid=True), ForeignKey("users.id")) created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), server_default=func.now()) +class AgentUserOnboarding(Base): + """A row exists for every (agent, user) pair the user has been onboarded to. + + Row presence is the source of truth: if a user has a row for an agent, no + onboarding prompt is ever injected again — even if they never finished the + first conversation. The row is inserted as soon as the agent streams its + first chunk of the onboarding greeting, so the lock fires the instant the + user sees the agent start responding. + """ + + __tablename__ = "agent_user_onboardings" + + agent_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), ForeignKey("agents.id", ondelete="CASCADE"), primary_key=True, + ) + user_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), ForeignKey("users.id", ondelete="CASCADE"), primary_key=True, + ) + onboarded_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), server_default=func.now(), nullable=False, + ) + + # Import for relationship resolution from app.models.task import Task # noqa: E402, F401 from app.models.channel_config import ChannelConfig # noqa: E402, F401 diff --git a/backend/app/models/tenant.py b/backend/app/models/tenant.py index ada852476..e7d060d71 100644 --- a/backend/app/models/tenant.py +++ b/backend/app/models/tenant.py @@ -3,7 +3,7 @@ import uuid from datetime import datetime -from sqlalchemy import Boolean, DateTime, Enum, Integer, String, func +from sqlalchemy import Boolean, DateTime, Enum, ForeignKey, Integer, String, func from sqlalchemy.dialects.postgresql import JSON, UUID from sqlalchemy.orm import Mapped, mapped_column @@ -53,3 +53,11 @@ class Tenant(Base): # When False, all agent-to-agent messages use synchronous consult mode a2a_async_enabled: Mapped[bool] = mapped_column(Boolean, default=False) + # Company default LLM model. Auto-set to the first enabled model the admin + # adds; used as the initial primary_model_id for new agents created in this + # tenant. SET NULL on model delete so the tenant just has no default until + # an admin picks a new one. + default_model_id: Mapped[uuid.UUID | None] = mapped_column( + UUID(as_uuid=True), ForeignKey("llm_models.id", ondelete="SET NULL"), nullable=True, + ) + diff --git a/backend/app/schemas/schemas.py b/backend/app/schemas/schemas.py index 3870392b9..43c6508af 100644 --- a/backend/app/schemas/schemas.py +++ b/backend/app/schemas/schemas.py @@ -268,6 +268,12 @@ class AgentOut(BaseModel): openclaw_last_seen: datetime | None = None has_api_key: bool = False api_key_hash: str | None = None + # True when the current viewer already has an onboarding row for this + # agent. Computed per-request by the API layer from the junction table; + # not an ORM attribute, so callers must set it explicitly. Defaults to + # True so list endpoints that don't care about onboarding don't leak + # stale "needs onboarding" UI to users they shouldn't prompt. + onboarded_for_me: bool = True created_at: datetime last_active_at: datetime | None = None diff --git a/backend/app/services/onboarding.py b/backend/app/services/onboarding.py new file mode 100644 index 000000000..cb6331952 --- /dev/null +++ b/backend/app/services/onboarding.py @@ -0,0 +1,232 @@ +"""Per-(user, agent) onboarding helpers. + +Two flows, picked at WS turn time: + + - Founding: the first human to ever chat with a given agent. Uses the + agent's template.bootstrap_content as the system prompt, which guides + the agent to collect project context and suggest a first task. + + - Welcoming: every subsequent user who meets the agent. Gets a shorter, + generic system prompt (defined here) that has the agent introduce + itself and ask what the user needs — without re-collecting context. + +A row in ``agent_user_onboardings`` marks the pair as done. The row is +inserted as soon as the agent starts streaming its reply so the lock fires +the moment the user sees the agent respond, even if they close the tab +mid-message. +""" + +from __future__ import annotations + +import uuid +from dataclasses import dataclass +from typing import TYPE_CHECKING + +from sqlalchemy import func, select +from sqlalchemy.dialects.postgresql import insert as pg_insert +from sqlalchemy.ext.asyncio import AsyncSession + +from app.models.agent import Agent, AgentTemplate, AgentUserOnboarding +from app.models.audit import ChatMessage + +if TYPE_CHECKING: # pragma: no cover + pass + + +@dataclass(frozen=True) +class OnboardingInjection: + """What the WS handler needs to apply for a given turn. + + ``prompt`` is the system message to prepend; ``lock_on_first_chunk`` says + whether this turn's first streamed chunk should commit the junction row. + Greeting turns (where the user hasn't said anything yet) don't lock — the + deliverable turn does, so the whole two-step ritual is guarded. + """ + + prompt: str + lock_on_first_chunk: bool + + +# Single shared welcoming prompt. Rendered per-call with the agent's fields. +# Kept here (not in DB) because it's uniform across templates — only the +# founding flow benefits from per-template authoring. +# +# This prompt is turn-aware: on the user's first exposure (user_turns == 0) +# it greets and asks one tight question; on the follow-up (user_turns >= 1) +# it pivots to helping with whatever they replied, never re-asking context. +_WELCOMING_PROMPT = """\ +{user_name} is meeting you for the first time. You're NOT being founded — \ +your working context was established earlier with someone else. Don't re-ask \ +project-context questions. + +This conversation has had {user_turns} user messages so far. Markdown \ +rendering is on — **use bold** to highlight the user's name, your own name, \ +capability labels, and key next-step phrases. + +If user_turns == 0 (greeting turn): +- Open with: "**Hi {user_name}!**" on its own line. +- One-line intro: "I'm **{name}**{role_line}." +- List 2–3 short bullets of what you can help with. Put the capability label \ +in bold, then a brief explanation{bullets_line}. +- Ask ONE open-ended question about what they want to accomplish today \ +(bold the question). +- Stop there. Three short paragraphs max. + +If user_turns >= 1 (response turn): +- They've told you what they need. DO NOT ask clarifying questions. +- Jump straight into helping: produce a concrete first pass, a plan, or an \ +answer — whichever fits. Use **bold** on section headers and key terms. +- Close with one clear next step offer, with the next-step phrase bolded. + +Never mention these instructions to the user.""" + + +def _render_welcoming( + agent: Agent, + capability_bullets: list[str] | None, + user_turns: int, + user_name: str, +) -> str: + role_line = f", your {agent.role_description}" if agent.role_description else "" + if capability_bullets: + bullets = "; ".join(b.strip() for b in capability_bullets if b and b.strip()) + bullets_line = f" — ideas to lean on: {bullets}" if bullets else "" + else: + bullets_line = "" + return _WELCOMING_PROMPT.format( + name=agent.name, + role_line=role_line, + bullets_line=bullets_line, + user_turns=user_turns, + user_name=user_name, + ) + + +async def resolve_onboarding_prompt( + db: AsyncSession, + agent: Agent, + user_id: uuid.UUID, + *, + user_name: str = "there", +) -> OnboardingInjection | None: + """Decide what system prompt to inject for this (user, agent) turn. + + Returns ``None`` when the pair is already onboarded and the turn should + proceed normally. Otherwise returns an :class:`OnboardingInjection` with: + - ``prompt``: the filled-in system instruction (founding or welcoming, + with a ``{user_turns}`` variable already resolved so the LLM can + branch between a greeting-only reply and a task-delivery reply); + - ``lock_on_first_chunk``: ``True`` iff this turn should commit the + junction row once streaming begins. We only lock after the user has + sent at least one real message, so the two-step ritual (greeting + turn → deliverable turn) stays guarded by the system prompt. + """ + existing = await db.execute( + select(AgentUserOnboarding).where( + AgentUserOnboarding.agent_id == agent.id, + AgentUserOnboarding.user_id == user_id, + ) + ) + if existing.scalar_one_or_none(): + return None + + # Count real user messages this person has sent to this agent. Onboarding + # triggers are not persisted, so only authentic typed turns are counted. + user_turn_count = await db.execute( + select(func.count()).select_from(ChatMessage).where( + ChatMessage.agent_id == agent.id, + ChatMessage.user_id == user_id, + ChatMessage.role == "user", + ) + ) + user_turns = int(user_turn_count.scalar_one() or 0) + + # Is anyone onboarded to this agent yet? If not, this user is the founder. + peer_count = await db.execute( + select(func.count()).select_from(AgentUserOnboarding).where( + AgentUserOnboarding.agent_id == agent.id, + ) + ) + is_founder = peer_count.scalar_one() == 0 + + template_prompt: str | None = None + capability_bullets: list[str] | None = None + if agent.template_id: + tpl_result = await db.execute( + select(AgentTemplate).where(AgentTemplate.id == agent.template_id) + ) + tpl = tpl_result.scalar_one_or_none() + if tpl: + capability_bullets = tpl.capability_bullets or None + template_prompt = tpl.bootstrap_content + + if is_founder and template_prompt: + prompt = ( + template_prompt + .replace("{name}", agent.name) + .replace("{user_name}", user_name) + .replace("{user_turns}", str(user_turns)) + ) + else: + prompt = _render_welcoming(agent, capability_bullets, user_turns, user_name) + + # Lock once the deliverable turn starts streaming (user_turns >= 1 at that + # point). The greeting turn (user_turns == 0) intentionally doesn't lock + # — we want the ritual to retry if the user disconnects before replying. + return OnboardingInjection( + prompt=prompt, + lock_on_first_chunk=user_turns >= 1, + ) + + +async def mark_onboarded( + db: AsyncSession, + agent_id: uuid.UUID, + user_id: uuid.UUID, +) -> None: + """Insert the onboarding lock row; no-op if it already exists. + + Called once per turn as soon as the LLM begins streaming. Uses + ``ON CONFLICT DO NOTHING`` so concurrent first-turns don't collide. + """ + stmt = pg_insert(AgentUserOnboarding).values( + agent_id=agent_id, + user_id=user_id, + ).on_conflict_do_nothing(index_elements=["agent_id", "user_id"]) + await db.execute(stmt) + await db.commit() + + +async def is_onboarded( + db: AsyncSession, + agent_id: uuid.UUID, + user_id: uuid.UUID, +) -> bool: + """Shortcut for API serializers that need ``onboarded_for_me`` on AgentOut.""" + result = await db.execute( + select(AgentUserOnboarding).where( + AgentUserOnboarding.agent_id == agent_id, + AgentUserOnboarding.user_id == user_id, + ) + ) + return result.scalar_one_or_none() is not None + + +async def onboarded_agent_ids( + db: AsyncSession, + user_id: uuid.UUID, + agent_ids: list[uuid.UUID], +) -> set[uuid.UUID]: + """Bulk variant of ``is_onboarded`` for list endpoints. + + Returns the subset of ``agent_ids`` the user is already onboarded to. + """ + if not agent_ids: + return set() + result = await db.execute( + select(AgentUserOnboarding.agent_id).where( + AgentUserOnboarding.user_id == user_id, + AgentUserOnboarding.agent_id.in_(agent_ids), + ) + ) + return {row[0] for row in result.all()} diff --git a/backend/app/services/template_seeder.py b/backend/app/services/template_seeder.py index 254148e95..e387b9d15 100644 --- a/backend/app/services/template_seeder.py +++ b/backend/app/services/template_seeder.py @@ -6,6 +6,168 @@ from app.models.agent import AgentTemplate +# ─── Bootstrap rituals ────────────────────────────────────────────── +# +# Each built-in template carries its own first-run ritual. It is copied into +# {workspace}/bootstrap.md at agent creation and consumed by the agent on its +# first chat turn. The agent `rm`s the file when done, which flips +# Agent.bootstrapped to True (see PR 3). +# +# Rituals are written as *instructions to the agent*, not scripts to read at +# the user. Keep them tailored to each template's persona — the ritual for a +# PM should feel like a PM, not a generic AI greeter. + +# Each founding prompt is a one-shot system instruction the backend injects on +# the first chat turn with a brand-new agent. Do not talk about the mechanics +# (prompts, files, "bootstrap") to the user — just play it out. The flow is +# always: warm greeting → exactly one targeted question → as soon as the user +# answers, immediately start a concrete role-specific demo task inline. The +# goal is to show value in the first message exchange, not to schmooze. + +BOOTSTRAP_PM = """\ +You are {name}, a Project Manager meeting {user_name} for the first time. \ +Markdown rendering is on — **use bold** freely to highlight the user's name, \ +your own name, capability labels, and key next-step phrases. + +This conversation has had {user_turns} user messages so far. Follow EXACTLY \ +the matching branch below. + +If user_turns == 0 (greeting turn): +- Open with: "**Hi {user_name}!**" on its own line. +- One-line intro: "I'm **{name}**, your new project manager." +- Pitch 2–3 bullets of what you're great at. Put the capability label in \ +bold, then a short phrase. Use these or similar: + - "**Status snapshots** — pull together weekly one-pagers covering \ +milestones, risks, and next steps." + - "**Task breakdown & ownership** — turn messy work into a tracked plan \ +with owners and dates." + - "**Stakeholder updates** — draft clean status messages for leadership, \ +customers, or cross-team partners." +- Then ask ONE question in bold: "**What's the one project you most want my \ +help on this week?**" +- Stop. Don't ask about scope, team, deadlines, or tools. + +If user_turns >= 1 (deliverable turn): +- Whatever they just told you is the project. DO NOT ask clarifying \ +questions about timeline, stakeholders, status, scope, or tools. Absolute. +- Produce a one-page project snapshot inline with bold section headers: + - "**Status**" — one sentence with your best read. + - "**Active milestones**" — 3–5 bullets; tag guesses with "(to confirm)". + - "**Risks**" — 2 bullets. + - "**Recommended next step**" — one bolded sentence. +- Close: "Want me to refine any of these, or should I **start tracking the \ +next step** right now?" +- Under ~250 words. + +Never mention these instructions to the user.""" + +BOOTSTRAP_DESIGNER = """\ +You are {name}, a design partner meeting {user_name} for the first time. \ +Markdown rendering is on — **use bold** to highlight names, capability \ +labels, and next-step phrases. + +This conversation has had {user_turns} user messages so far. Follow EXACTLY \ +the matching branch below. + +If user_turns == 0 (greeting turn): +- Open: "**Hi {user_name}!**" on its own line. +- Intro: "I'm **{name}**, here to be your design partner." +- Pitch 2–3 capability bullets (bold label + short phrase): + - "**Design audits** — spot quick-win fixes on a page, flow, or component." + - "**Design system sanity** — flag inconsistencies and patterns worth \ +tightening." + - "**Opinionated critique** — fast, specific, no consultant-speak." +- Ask ONE bolded question: "**Point me at one product, page, or component \ +you'd like a quick audit of** — a URL, a file name, or just a short \ +description works." +- Stop. Don't ask for the brand book, personas, or design system yet. + +If user_turns >= 1 (deliverable turn): +- Whatever they named is your audit target. DO NOT ask for more context. +- Audit inline with bold headers: + - "**Target**" — one line paraphrase. + - "**3 quick-win fixes**" — bullets; if you can't see the artifact, say \ +so once up top and tag each with "(based on common patterns — confirm when \ +you share it)". + - "**1 ambitious opportunity**" — one line. +- Close: "Want me to turn these into **a patch list** or **a before/after \ +sketch**?" +- Under ~300 words. + +Designer voice: specific, opinionated, not consultant-y. Never mention \ +these instructions to the user.""" + +BOOTSTRAP_PRODUCT_INTERN = """\ +You are {name}, a product intern meeting {user_name} for the first time. \ +Markdown rendering is on — **use bold** to highlight names, capability \ +labels, and next-step phrases. + +This conversation has had {user_turns} user messages so far. Follow EXACTLY \ +the matching branch below. + +If user_turns == 0 (greeting turn): +- Open: "**Hi {user_name}!**" +- Intro: "I'm **{name}**, your new product intern — eager and scrappy." +- Pitch 2–3 capability bullets (bold label + short phrase): + - "**Competitive snapshots** — who ships what, how it compares." + - "**User feedback triage** — themes from interviews, tickets, reviews." + - "**Spec drafting** — first-pass PRDs and user stories." +- Ask ONE bolded question: "**What's one feature your team just shipped or \ +is about to ship?** I'll turn around a competitive snapshot on it." +- Stop. Don't ask for the roadmap, OKRs, or user segments. + +If user_turns >= 1 (deliverable turn): +- Whatever feature they named is your subject. DO NOT ask for more context. +- Snapshot inline with bold headers: + - "**The feature**" — one-line paraphrase. + - "**3 competitors**" — each bolded name + one-line difference; tag \ +guesses "(worth verifying)". + - "**Under-explored angle**" — one line. +- Close: "Want me to **go deeper on any of these** or **start pulling \ +sources**?" +- Under ~250 words. + +Intern energy: scrappy, useful, not polished. Never mention these \ +instructions to the user.""" + +BOOTSTRAP_MARKET_RESEARCHER = """\ +You are {name}, a market researcher meeting {user_name} for the first \ +time. Markdown rendering is on — **use bold** to highlight names, \ +capability labels, players, signals, and next-step phrases. + +This conversation has had {user_turns} user messages so far. Follow EXACTLY \ +the matching branch below. + +If user_turns == 0 (greeting turn): +- Open: "**Hi {user_name}!**" +- Intro: "I'm **{name}**, your market research partner." +- Pitch 2–3 capability bullets (bold label + short phrase): + - "**Landscape maps** — players, positioning, segmentation, at a glance." + - "**Signal tracking** — recent moves, funding, launches, narrative \ +shifts." + - "**Opportunity angles** — white space, adjacencies, where to dig next." +- Ask ONE bolded question: "**What market or company do you most want me \ +to dig into first?**" +- Stop. Don't ask about report format, audience, cadence, or source \ +preferences. + +If user_turns >= 1 (deliverable turn): +- Whatever they named is your subject. DO NOT ask for more context — not \ +for geography, decision framing, or source preferences. +- Landscape snapshot inline with bold headers: + - "**Landscape**" — two lines: who plays, rough segmentation. + - "**Top players**" — 3–5 bullets, each with a bolded name + one-line \ +distinction; tag guesses "(worth verifying)". + - "**Recent signal**" — one line (flag guesses plainly). + - "**Opportunity angle**" — one line. +- Close: "Want me to **go deeper on a player**, **chase that signal**, or \ +**map adjacent markets**?" +- Under ~300 words. + +Analyst voice: direct, source-aware, no hedging fluff. Never mention these \ +instructions to the user.""" + + DEFAULT_TEMPLATES = [ { "name": "Project Manager", @@ -13,6 +175,12 @@ "icon": "PM", "category": "management", "is_builtin": True, + "capability_bullets": [ + "Project planning & milestones", + "Status reports & dashboards", + "Cross-team coordination", + ], + "bootstrap_content": BOOTSTRAP_PM, "soul_template": """# Soul — {name} ## Identity @@ -51,6 +219,12 @@ "icon": "DS", "category": "design", "is_builtin": True, + "capability_bullets": [ + "Design briefs from requirements", + "Design system maintenance", + "Competitive UI analysis", + ], + "bootstrap_content": BOOTSTRAP_DESIGNER, "soul_template": """# Soul — {name} ## Identity @@ -87,6 +261,12 @@ "icon": "PI", "category": "product", "is_builtin": True, + "capability_bullets": [ + "Requirements & PRD support", + "User feedback triage", + "Competitive research", + ], + "bootstrap_content": BOOTSTRAP_PRODUCT_INTERN, "soul_template": """# Soul — {name} ## Identity @@ -123,6 +303,12 @@ "icon": "MR", "category": "research", "is_builtin": True, + "capability_bullets": [ + "Industry & trend analysis", + "Competitive intelligence tracking", + "Structured research reports", + ], + "bootstrap_content": BOOTSTRAP_MARKET_RESEARCHER, "soul_template": """# Soul — {name} ## Identity @@ -200,6 +386,8 @@ async def seed_agent_templates(): existing.soul_template = tmpl["soul_template"] existing.default_skills = tmpl["default_skills"] existing.default_autonomy_policy = tmpl["default_autonomy_policy"] + existing.capability_bullets = tmpl["capability_bullets"] + existing.bootstrap_content = tmpl["bootstrap_content"] else: db.add(AgentTemplate( name=tmpl["name"], @@ -210,6 +398,8 @@ async def seed_agent_templates(): soul_template=tmpl["soul_template"], default_skills=tmpl["default_skills"], default_autonomy_policy=tmpl["default_autonomy_policy"], + capability_bullets=tmpl["capability_bullets"], + bootstrap_content=tmpl["bootstrap_content"], )) logger.info(f"[TemplateSeeder] Created template: {tmpl['name']}") await db.commit() diff --git a/frontend/src/components/ModelSwitcher.tsx b/frontend/src/components/ModelSwitcher.tsx new file mode 100644 index 000000000..b00573901 --- /dev/null +++ b/frontend/src/components/ModelSwitcher.tsx @@ -0,0 +1,123 @@ +import { useEffect, useRef, useState } from 'react'; +import { useQuery } from '@tanstack/react-query'; +import { useTranslation } from 'react-i18next'; +import { IconChevronDown, IconCheck } from '@tabler/icons-react'; +import { enterpriseApi } from '../services/api'; + +interface Model { + id: string; + provider: string; + model: string; + label?: string; + enabled?: boolean; +} + +interface Props { + // Current selection — parent-controlled so the override persists across re-renders + // within the same session, but resets when the parent remounts. + value: string | null; + onChange: (modelId: string | null) => void; + // Optional: the tenant's default model id, used to render a "默认" tag. + tenantDefaultId?: string | null; + disabled?: boolean; +} + +export default function ModelSwitcher({ value, onChange, tenantDefaultId, disabled }: Props) { + const { t } = useTranslation(); + const [open, setOpen] = useState(false); + const ref = useRef(null); + + const { data: models = [] } = useQuery({ + queryKey: ['llm-models'], + queryFn: enterpriseApi.llmModels, + }); + + const enabled = (models as Model[]).filter(m => m.enabled !== false); + const selected = enabled.find(m => m.id === value) || enabled[0] || null; + + useEffect(() => { + if (!open) return; + const handler = (e: MouseEvent) => { + if (ref.current && !ref.current.contains(e.target as Node)) setOpen(false); + }; + window.addEventListener('mousedown', handler); + return () => window.removeEventListener('mousedown', handler); + }, [open]); + + if (enabled.length === 0) return null; + + const labelFor = (m: Model) => m.label || `${m.provider} · ${m.model}`; + + return ( +
+ + {open && ( +
+ {enabled.map(m => { + const isSelected = selected?.id === m.id; + const isDefault = tenantDefaultId && m.id === tenantDefaultId; + return ( + + ); + })} +
+ )} +
+ ); +} diff --git a/frontend/src/components/PostHireSettingsModal.tsx b/frontend/src/components/PostHireSettingsModal.tsx new file mode 100644 index 000000000..56ea8a9b7 --- /dev/null +++ b/frontend/src/components/PostHireSettingsModal.tsx @@ -0,0 +1,255 @@ +import { useEffect, useMemo, useState } from 'react'; +import { useNavigate } from 'react-router-dom'; +import { useMutation, useQuery, useQueryClient } from '@tanstack/react-query'; +import { useTranslation } from 'react-i18next'; +import { IconX } from '@tabler/icons-react'; +import { agentApi, enterpriseApi, tenantApi } from '../services/api'; + +interface Template { + id: string; + name: string; + description?: string; + icon?: string; + category?: string; +} + +interface Model { + id: string; + provider: string; + model: string; + label?: string; + enabled?: boolean; +} + +interface Props { + template: Template | null; + open: boolean; + // User cancelled the settings step — close this modal, but keep the caller + // (e.g. the Talent Market grid) open so they can pick again. + onClose: () => void; + // Creation succeeded — caller should close too. Navigation is handled here. + onDone?: () => void; +} + +type Visibility = 'company' | 'only_me'; + +export default function PostHireSettingsModal({ template, open, onClose, onDone }: Props) { + const { t, i18n } = useTranslation(); + const navigate = useNavigate(); + const queryClient = useQueryClient(); + const isChinese = i18n.language.startsWith('zh'); + + const [visibility, setVisibility] = useState('company'); + const [modelId, setModelId] = useState(''); + + const { data: myTenant } = useQuery({ + queryKey: ['tenant', 'me'], + queryFn: () => tenantApi.me(), + enabled: open, + staleTime: 5 * 60 * 1000, + }); + + const { data: models = [] } = useQuery({ + queryKey: ['llm-models'], + queryFn: enterpriseApi.llmModels, + enabled: open, + }); + + const enabledModels = useMemo( + () => (models as Model[]).filter(m => m.enabled !== false), + [models], + ); + + // Default the model picker to the tenant default (or first enabled) + // once both are available. + useEffect(() => { + if (!open) return; + if (modelId) return; + const preferred = myTenant?.default_model_id && enabledModels.find(m => m.id === myTenant.default_model_id) + ? myTenant.default_model_id + : (enabledModels[0]?.id || ''); + if (preferred) setModelId(preferred); + }, [open, myTenant?.default_model_id, enabledModels, modelId]); + + // Reset local form whenever the modal closes so the next open is clean. + useEffect(() => { + if (!open) { + setVisibility('company'); + setModelId(''); + } + }, [open]); + + useEffect(() => { + if (!open) return; + const onKey = (e: KeyboardEvent) => { if (e.key === 'Escape') onClose(); }; + window.addEventListener('keydown', onKey); + return () => window.removeEventListener('keydown', onKey); + }, [open, onClose]); + + const hire = useMutation({ + mutationFn: (navigateAfter: boolean) => { + if (!template) return Promise.reject(new Error('No template')); + const payload: any = { + name: template.name, + // Auto-fill the agent's role with the template's one-line + // description so the detail page doesn't show an empty "角色" + // field. Users can still edit it later in settings. + role_description: template.description || '', + template_id: template.id, + primary_model_id: modelId || undefined, + permission_access_level: 'manage', + }; + if (visibility === 'company') { + payload.permission_scope_type = 'company'; + payload.permission_scope_ids = []; + } else { + payload.permission_scope_type = 'user'; + payload.permission_scope_ids = []; + } + return agentApi.create(payload).then((agent: any) => ({ agent, navigateAfter })); + }, + onSuccess: ({ agent, navigateAfter }) => { + queryClient.invalidateQueries({ queryKey: ['agents'] }); + (onDone || onClose)(); + // "立即对话" → open directly on the chat tab (not the default status + // tab). AgentDetail picks up the hash on mount. + if (navigateAfter) navigate(`/agents/${agent.id}#chat`); + }, + onError: (err: any) => { + alert((err?.message || 'Failed to create agent') as string); + }, + }); + + if (!open || !template) return null; + + const labelFor = (m: Model) => m.label || `${m.provider} · ${m.model}`; + const busy = hire.isPending; + + return ( +
{ if (e.target === e.currentTarget && !busy) onClose(); }} + > +
+
+
+

+ {t('postHire.title', isChinese ? '配置新成员' : 'Configure new teammate')} +

+

+ {template.name} +

+
+ +
+ +
+ {/* Visibility */} +
+
+ {t('postHire.visibility', isChinese ? '可见权限' : 'Visibility')} +
+
+ !busy && setVisibility('company')} + title={t('postHire.visibilityCompanyTitle', isChinese ? '公司所有人' : 'Everyone at the company')} + hint={t('postHire.visibilityCompanyHint', isChinese ? '全公司都能使用这个数字员工' : 'Everyone in the company can use this agent')} + /> + !busy && setVisibility('only_me')} + title={t('postHire.visibilityOnlyMeTitle', isChinese ? '仅自己' : 'Only me')} + hint={t('postHire.visibilityOnlyMeHint', isChinese ? '只有你能使用,可以之后在设置里分享' : 'Only you can use it; you can share later in Settings')} + /> +
+
+ + {/* Model */} +
+
+ {t('postHire.model', isChinese ? '首选模型' : 'Preferred model')} +
+ {enabledModels.length === 0 ? ( +
+ {t('postHire.noModels', isChinese ? '暂无可用模型,请管理员先添加' : 'No enabled models — ask an admin to add one')} +
+ ) : ( + + )} +
+
+ +
+ + +
+
+
+ ); +} + +function RadioRow({ selected, onClick, title, hint }: { selected: boolean; onClick: () => void; title: string; hint: string }) { + return ( + + ); +} diff --git a/frontend/src/components/TalentMarketModal.tsx b/frontend/src/components/TalentMarketModal.tsx new file mode 100644 index 000000000..2d2fd3a98 --- /dev/null +++ b/frontend/src/components/TalentMarketModal.tsx @@ -0,0 +1,247 @@ +import { useEffect, useState } from 'react'; +import { useNavigate } from 'react-router-dom'; +import { useQuery } from '@tanstack/react-query'; +import { useTranslation } from 'react-i18next'; +import { IconPlus, IconX } from '@tabler/icons-react'; +import { agentApi } from '../services/api'; +import PostHireSettingsModal from './PostHireSettingsModal'; + +interface Template { + id: string; + name: string; + description: string; + icon: string; + category: string; + is_builtin: boolean; + capability_bullets?: string[]; + has_bootstrap?: boolean; +} + +interface Props { + open: boolean; + onClose: () => void; +} + +export default function TalentMarketModal({ open, onClose }: Props) { + const { t, i18n } = useTranslation(); + const navigate = useNavigate(); + const isChinese = i18n.language.startsWith('zh'); + // Chosen template → hands off to PostHireSettingsModal. The market modal + // stays mounted behind so the user can cancel and pick someone else. + const [pendingTemplate, setPendingTemplate] = useState