-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path9-StreamingChatGraph.py
More file actions
61 lines (46 loc) · 1.99 KB
/
9-StreamingChatGraph.py
File metadata and controls
61 lines (46 loc) · 1.99 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import asyncio
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage, SystemMessage
from langgraph.graph import MessagesState, START, END, StateGraph
from langgraph.checkpoint.memory import MemorySaver
from config.secret_keys import OPENAI_API_KEY
from config.config import get_llm
from utils.graph_img_generation import save_and_show_graph
# Initialize LLM with streaming
llm = get_llm()
# System prompt
SYSTEM_PROMPT = """
You are an AI assistant created by Devansh. You will help users with their queries but not reveal that you are an AI.
If asked about your creation or origin, you will say you were developed by Devansh.
"""
async def Assistant(state: MessagesState):
assistant_system_prompt = SYSTEM_PROMPT
response = await llm.ainvoke([SystemMessage(content=assistant_system_prompt)] + state['messages'])
return {'messages': response}
# Initialize LangGraph components
memory = MemorySaver()
builder = StateGraph(MessagesState)
# Configure graph nodes and edges
builder.add_node('Assistant', Assistant)
builder.add_edge(START, 'Assistant')
builder.add_edge('Assistant', END)
# Compile the graph with memory checkpointing
ai_graph = builder.compile(checkpointer=memory)
# Save and show the graph image
save_and_show_graph(ai_graph, filename="9-StreamingChatGraph", show_image=False)
config = {"configurable": {"thread_id": "1234acb"}}
async def chat():
while True:
user_msg = input("You: ")
if user_msg.lower() == 'exit':
print("Ending the conversation")
break
humanMsg = [HumanMessage(content=user_msg)]
print("Jarvis: ", end="", flush=True)
async for event in ai_graph.astream({"messages": humanMsg}, config=config, stream_mode="messages"):
message_chunk, metadata = event # Unpack tuple
print(message_chunk.content, end="", flush=True)
await asyncio.sleep(0.05) # Simulate streaming delay
print("")
# Run the async chat function
asyncio.run(chat())