-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcode_assistant.py
More file actions
74 lines (57 loc) · 2.3 KB
/
code_assistant.py
File metadata and controls
74 lines (57 loc) · 2.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
from __future__ import annotations
import os
import uuid
from langchain.agents import create_agent
from langchain.chat_models import init_chat_model
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import HumanMessage
from langchain_mcp_adapters.tools import load_mcp_tools
from langgraph.checkpoint.memory import MemorySaver
from conversation_loop import ConversationLoop
from llm.prompt_loader import PromptLoader
from mcp_components.github_mcp import GitHubMCP
from mcp_components.stdio_mcp_client import StdioMCPClient
class CodeAssistant:
def __init__(self) -> None:
self.mcp_client = StdioMCPClient(GitHubMCP.get_params())
self.agent = None
self.thread_id = str(uuid.uuid4())
@classmethod
async def create(cls) -> CodeAssistant:
assistant = cls()
await assistant._initialize()
return assistant
async def _initialize(self) -> None:
await self.mcp_client.connect()
tools = await load_mcp_tools(self.mcp_client.session)
print(f"✅ {len(tools)} tools loaded from MCP")
llm = _build_chat_model()
system_prompt = _load_system_prompt()
memory = MemorySaver()
for tool in tools:
tool.handle_tool_error = True
self.agent = create_agent(
model=llm,
tools=tools,
system_prompt=system_prompt,
checkpointer=memory,
)
async def start_conversation(self) -> None:
loop = ConversationLoop()
await loop.run(self.ask, self.mcp_client.cleanup)
async def ask(self, question: str) -> str:
config = {"configurable": {"thread_id": self.thread_id}}
result = await self.agent.ainvoke(
{"messages": [HumanMessage(content=question)]},
config=config,
)
return result["messages"][-1].content
def _build_chat_model() -> BaseChatModel:
model = os.getenv("LLM_MODEL", "gemini-2.0-flash-exp")
provider = os.getenv("LLM_PROVIDER")
if provider:
return init_chat_model(model, model_provider=provider, temperature=0)
return init_chat_model(model, temperature=0)
def _load_system_prompt() -> str:
template = PromptLoader.load_prompt("react-github.txt")
return template.format(github_login=os.getenv("GITHUB_LOGIN", "unknown"))