diff --git a/examples/start-agents/aws_strands_agent_starter/.env.example b/examples/start-agents/aws_strands_agent_starter/.env.example new file mode 100644 index 00000000..2cd20ee6 --- /dev/null +++ b/examples/start-agents/aws_strands_agent_starter/.env.example @@ -0,0 +1 @@ +OPENAI_API_KEY="your_openai_api_key_here" \ No newline at end of file diff --git a/examples/start-agents/aws_strands_agent_starter/Dockerfile b/examples/start-agents/aws_strands_agent_starter/Dockerfile new file mode 100644 index 00000000..e2db9ca5 --- /dev/null +++ b/examples/start-agents/aws_strands_agent_starter/Dockerfile @@ -0,0 +1,22 @@ +# Use the official lightweight Python image +FROM python:3.11-slim + +# Prevent Python from writing .pyc files and enable unbuffered logging +ENV PYTHONDONTWRITEBYTECODE=1 +ENV PYTHONUNBUFFERED=1 + +# Set the working directory +WORKDIR /usr/src/app + +# Install dependencies first for Docker caching +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +# Copy the application code +COPY app/ ./app/ + +# Expose the API port +EXPOSE 8000 + +# Start the FastAPI server using Uvicorn +CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"] \ No newline at end of file diff --git a/examples/start-agents/aws_strands_agent_starter/README.md b/examples/start-agents/aws_strands_agent_starter/README.md new file mode 100644 index 00000000..bdd6e0f0 --- /dev/null +++ b/examples/start-agents/aws_strands_agent_starter/README.md @@ -0,0 +1,143 @@ +# 🌀️ AWS Strands Agent Starter (Dual-Entrypoint) + +*Deploy this production-ready AI agent on [Saturn Cloud](https://saturncloud.io/).* + +**Hardware:** CPU/GPU | **Resource:** Python Project & API | **Tech Stack:** AWS Strands SDK, FastAPI, OpenAI, Docker + +

+ Saturn Cloud + FastAPI + AWS Strands + OpenAI + Open-Meteo +

+ +## πŸ“– Overview + +This template provides a dual-entrypoint implementation of a model-driven AI Agent utilizing the open-source **AWS Strands SDK**. + +It features a shared core architecture (`app/agent.py`) that can be executed in two ways: +1. **Interactive CLI (`weather_agent.py`):** For rapid local prototyping and terminal-based debugging. +2. **Production Microservice (`app/main.py`):** A high-performance FastAPI backend that allows external applications to query the agent asynchronously via standard HTTP REST endpoints. + +--- + +## πŸ—οΈ Setup & Installation + +**1. Create Virtual Environment & Install Dependencies** +```bash +python -m venv venv +source venv/bin/activate +pip install -r requirements.txt + +``` + +**2. Configure Environment Variables** +Create a `.env` file in the root directory. + +```bash +cp .env.example .env +nano .env +# Define OPENAI_API_KEY. Save and exit. + +``` + +--- + +## πŸ’» Method 1: Interactive CLI (Prototyping) + +Use the CLI script to test new prompts, verify tool execution, and chat with the agent directly in your terminal. + +```bash +python weather_agent.py + +``` + +**Example Prompts:** + +* *"What is the weather like in Tokyo right now?"* +* *"Should I wear a jacket in London today?"* + +To terminate the interactive loop, input `exit`. + +--- + +## 🌐 Method 2: FastAPI Microservice (Production) + +Serve the agent as a RESTful web API for integration with frontends, mobile apps, or other microservices. + +**Run the Server:** + +```bash +uvicorn app.main:app --reload + +``` + +**Test the API:** +Once the server is running, FastAPI automatically generates an interactive Swagger UI documentation page at `http://127.0.0.1:8000/docs`, allowing you to test the agent visually. + +Alternatively, send a standard POST request: + +```bash +curl -X 'POST' \ + '[http://127.0.0.1:8000/api/chat](http://127.0.0.1:8000/api/chat)' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "query": "What is the weather like in Tokyo right now?" +}' + +``` + +--- + +### Install Docker and Build the Container + +Run these commands one by one in your terminal to install and start the Docker engine: + +**1. Update your package manager:** + +```bash +sudo apt update + +``` + +**2. Install Docker:** + +```bash +sudo apt install -y docker.io + +``` + +**3. Start the Docker service:** + +```bash +sudo systemctl start docker +sudo systemctl enable docker + +``` + +**4. Add your user to the Docker group (so you don't have to type `sudo` every time):** + +```bash +sudo usermod -aG docker $USER +newgrp docker + +## 🐳 Docker Deployment + +To deploy this application to production environments (like AWS ECS or cloud container registries), build and run the included Dockerfile. + +```bash +docker build -t strands-weather-agent . +docker run -p 8000:8000 --env-file .env strands-weather-agent + +``` + +--- + +## πŸ“š Official Documentation & References + +* **Deployment Platform:** [Saturn Cloud Documentation](https://saturncloud.io/docs/) +* **AI Agent Framework:** [AWS Strands Agents Documentation](https://strandsagents.com/latest/) +* **API Framework:** [FastAPI Documentation](https://fastapi.tiangolo.com/) +* **Weather API Routing:** [Open-Meteo API Reference](https://open-meteo.com/en/docs) diff --git a/examples/start-agents/aws_strands_agent_starter/app/__init__.py b/examples/start-agents/aws_strands_agent_starter/app/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/examples/start-agents/aws_strands_agent_starter/app/agent.py b/examples/start-agents/aws_strands_agent_starter/app/agent.py new file mode 100644 index 00000000..1a10a08e --- /dev/null +++ b/examples/start-agents/aws_strands_agent_starter/app/agent.py @@ -0,0 +1,86 @@ +import os +import json +import urllib.request +import urllib.parse +from dotenv import load_dotenv + +from strands import Agent, tool +from strands.models.openai import OpenAIModel + +# Load environment variables +load_dotenv() + +@tool +def get_realtime_weather(location: str) -> str: + """Fetches current weather data for a specified city or location.""" + print(f" [System] Strands SDK executing tool: Geocoding '{location}'...") + headers = {"User-Agent": "Strands-Agent-API/1.0"} + + try: + # Step A: Convert city name to coordinates safely + safe_location = urllib.parse.quote(location) + geocode_url = f"https://geocoding-api.open-meteo.com/v1/search?name={safe_location}&count=1&format=json" + + req = urllib.request.Request(geocode_url, headers=headers) + with urllib.request.urlopen(req, timeout=15.0) as response: + geo_data = json.loads(response.read().decode()) + + if not geo_data.get("results"): + return f"System Error: Could not find geographical coordinates for '{location}'." + + lat = geo_data["results"][0]["latitude"] + lon = geo_data["results"][0]["longitude"] + country = geo_data["results"][0].get("country", "Unknown Region") + + print(f" [System] Strands SDK executing tool: Fetching weather for Lat: {lat}, Lon: {lon}...") + + # Step B: Fetch weather using coordinates + weather_url = f"https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lon}¤t=temperature_2m,wind_speed_10m&timezone=auto" + + req2 = urllib.request.Request(weather_url, headers=headers) + with urllib.request.urlopen(req2, timeout=15.0) as response2: + weather_data = json.loads(response2.read().decode()) + + current = weather_data.get("current", {}) + temp = current.get("temperature_2m", "Unknown") + wind = current.get("wind_speed_10m", "Unknown") + + return f"Location: {location}, {country}. Temperature: {temp}Β°C, Wind Speed: {wind} km/h." + + except Exception as e: + print(f" [Tool Error] API connection failed: {str(e)}") + return f"System Error: Network failure inside the weather tool - {str(e)}" + +def initialize_agent() -> Agent: + """Initializes and returns the Strands SDK Agent.""" + if not os.getenv("OPENAI_API_KEY"): + raise ValueError("Environment Error: OPENAI_API_KEY is not defined.") + + llm_provider = OpenAIModel( + client_args={ + "api_key": os.getenv("OPENAI_API_KEY"), + "timeout": 60.0, + "max_retries": 3 + }, + model_id="gpt-4o-mini" + ) + + return Agent( + model=llm_provider, + tools=[get_realtime_weather], + system_prompt=( + "You are a concise, highly accurate weather assistant. " + "Use the provided tool to fetch real-time weather data for the user's requested location. " + "Extract the location from the prompt, fetch the data, and present the findings clearly." + ) + ) + +# Singleton instance of the agent to avoid re-initializing on every API call +strands_agent = initialize_agent() + +def invoke_agent(user_query: str) -> str: + """Passes the prompt to the Strands framework and returns the string response.""" + result = strands_agent(user_query) + + # Convert the Strands AgentResult object into a standard Python string + return str(result) \ No newline at end of file diff --git a/examples/start-agents/aws_strands_agent_starter/app/main.py b/examples/start-agents/aws_strands_agent_starter/app/main.py new file mode 100644 index 00000000..a4f3fe75 --- /dev/null +++ b/examples/start-agents/aws_strands_agent_starter/app/main.py @@ -0,0 +1,33 @@ +from fastapi import FastAPI, HTTPException +from pydantic import BaseModel +from app.agent import invoke_agent + +# Initialize the FastAPI application +app = FastAPI( + title="AWS Strands Weather Agent API", + description="A production-ready microservice utilizing the AWS Strands SDK.", + version="1.0.0" +) + +# Define the expected JSON payload schema +class ChatRequest(BaseModel): + query: str + +class ChatResponse(BaseModel): + response: str + +# Health check endpoint for load balancers and container orchestrators +@app.get("/health") +def health_check(): + return {"status": "healthy"} + +# The primary AI interaction endpoint +@app.post("/api/chat", response_model=ChatResponse) +async def chat_endpoint(request: ChatRequest): + try: + # Route the query to the Strands agent logic + agent_reply = invoke_agent(request.query) + return ChatResponse(response=agent_reply) + except Exception as e: + # Gracefully handle framework or network errors + raise HTTPException(status_code=500, detail=f"Agent Execution Error: {str(e)}") \ No newline at end of file diff --git a/examples/start-agents/aws_strands_agent_starter/requirements.txt b/examples/start-agents/aws_strands_agent_starter/requirements.txt new file mode 100644 index 00000000..d37e978b --- /dev/null +++ b/examples/start-agents/aws_strands_agent_starter/requirements.txt @@ -0,0 +1,5 @@ +strands-agents[openai]>=0.1.0 +fastapi>=0.110.0 +uvicorn>=0.29.0 +python-dotenv>=1.1.0 +pydantic>=2.6.0 \ No newline at end of file diff --git a/examples/start-agents/aws_strands_agent_starter/weather_agent.py b/examples/start-agents/aws_strands_agent_starter/weather_agent.py new file mode 100644 index 00000000..b89e120e --- /dev/null +++ b/examples/start-agents/aws_strands_agent_starter/weather_agent.py @@ -0,0 +1,32 @@ +import os +from dotenv import load_dotenv + +# Import the shared agent logic from your app package +from app.agent import invoke_agent + +# Initialize environment variables +load_dotenv() + +if __name__ == "__main__": + print("--- AWS Strands Agent Starter (CLI Mode) ---") + print("Framework: Strands SDK | Provider: OpenAI") + print("Agent is ready. (Type 'exit' to quit)") + + if not os.getenv("OPENAI_API_KEY"): + print("❌ Error: OPENAI_API_KEY is missing from your .env file.") + exit(1) + + while True: + user_query = input("\nAsk for the weather: ") + + if user_query.lower() in ['exit', 'quit']: + print("Terminating CLI process.") + break + + if user_query.strip(): + try: + # Execute the Strands agent loop via the shared application logic + response = invoke_agent(user_query) + print(f"\nAgent: {response}") + except Exception as e: + print(f"\nExecution Error: {e}") \ No newline at end of file diff --git a/examples/start-agents/camel_ai_benchmarker/.env.example b/examples/start-agents/camel_ai_benchmarker/.env.example new file mode 100644 index 00000000..a25f8ffb --- /dev/null +++ b/examples/start-agents/camel_ai_benchmarker/.env.example @@ -0,0 +1,9 @@ +# 1. OpenAI Credentials +OPENAI_API_KEY="sk-your-openai-key-here" + +# 2. Nebius Studio Credentials +NEBIUS_API_KEY="your-nebius-api-key-here" + +# 3. Crusoe Inference Credentials +CRUSOE_API_KEY="your-crusoe-api-key-here" +CRUSOE_API_BASE="https://managed-inference-api-proxy.crusoecloud.com/v1" \ No newline at end of file diff --git a/examples/start-agents/camel_ai_benchmarker/README.md b/examples/start-agents/camel_ai_benchmarker/README.md new file mode 100644 index 00000000..52537f33 --- /dev/null +++ b/examples/start-agents/camel_ai_benchmarker/README.md @@ -0,0 +1,97 @@ +# 🐫 CAMEL-AI Model Benchmarker + +*Deploy this AI Agent instantly on [Saturn Cloud](https://saturncloud.io/) β€” The premier platform for scalable Python workspaces and AI deployment.* + +**Hardware:** CPU/GPU | **Resource:** Python Script & Web App | **Tech Stack:** CAMEL-AI, Streamlit, Pandas, Python + +

+ Python Script + Streamlit + CAMEL-AI + Benchmarking +

+ +## πŸ“– Overview + +This template provides a robust, dual-entrypoint architecture for benchmarking Large Language Models (LLMs) using the **CAMEL-AI** multi-agent framework. + +It is specifically engineered to test execution latency and output generation across multiple distinct cloud infrastructures simultaneously. By leveraging CAMEL-AI's modular `ModelFactory`, developers can test an unlimited number of foundation models without having to rewrite the core agent orchestration logic. + +### ✨ Key Capabilities +* **Dynamic Multi-Model Queue:** Queue up multiple models from entirely different cloud providers (OpenAI, Nebius, Crusoe) and benchmark them in a single, automated execution loop. +* **Per-Model Infrastructure Routing:** Easily benchmark custom or local infrastructure. The UI allows you to inject unique API keys and custom Base URLs for *each individual model* in your queue. +* **Smart Credential Fallbacks:** Leave the Key and URL fields blank in the UI to intelligently inherit default credentials securely from your `.env` file. +* **Privacy-First UI:** API keys entered into the dashboard are masked and stored strictly in temporary browser session state. +* **Dual-Entrypoint:** Execute benchmarks visually via the Streamlit web dashboard or headlessly via the terminal CLI. + +--- + +## πŸ—οΈ Setup & Installation + +**1. Create Virtual Environment & Install Dependencies** +```bash +python -m venv venv +source venv/bin/activate +pip install -r requirements.txt + +``` + +**2. Configure Environment Variables** +Create an environment file to securely store your default credentials. The application will use these as fallbacks if you don't manually specify keys in the UI. + +```bash +cp .env.example .env +nano .env +# Input your OpenAI, Nebius, and Crusoe keys. Save and exit. + +``` + +--- + +## πŸ’» Method 1: Interactive Web Dashboard (Streamlit) + +Spin up a local web server to visually build a queue of models, configure custom infrastructure endpoints, and visualize execution latency through automated charts. + +```bash +streamlit run app.py + +``` + +*The dashboard will automatically open in your default web browser (usually at `http://localhost:8501`).* + +**How to use the UI:** + +1. Select your target framework integration (e.g., Nebius Studio, OpenAI-Compatible). +2. Enter the Model ID (e.g., `deepseek-ai/DeepSeek-V3.2`). +3. *(Optional)* Enter a custom API key and Base URL. If testing Crusoe Inference, enter their proxy endpoint here. If left blank, the app will securely pull your default credentials from the `.env` file. +4. Click **Run Benchmark** to generate latency metrics and a visual comparison chart. + +--- + +## πŸ–₯️ Method 2: Headless CLI Script (Terminal) + +For rapid execution, automated CI/CD pipelines, or remote SSH environments, run the terminal-based benchmarker. This script completely bypasses the UI, pulling credentials directly from your `.env` file and outputting a formatted tracking table. + +```bash +python benchmark.py + +``` + +--- + +## βš™οΈ Supported Providers + +This template natively routes requests to: + +1. **OpenAI:** Uses the standard `ModelPlatformType.OPENAI` framework. +2. **Nebius AI Studio:** Uses CAMEL's native `ModelPlatformType.NEBIUS` integration. +3. **Crusoe Inference & Custom Clouds:** Uses `ModelPlatformType.OPENAI` but overrides the routing mechanism with custom Base URLs. Ensure your proxy endpoints stop at `/v1` (do not append `/chat/completions`). + +--- + +## πŸ“š Official Documentation & References + +* **Saturn Cloud Platform:** [Start building for free](https://saturncloud.io/) +* **Framework:** [CAMEL-AI Documentation](https://docs.camel-ai.org/) +* **Model Integration:** [CAMEL Models Guide](https://docs.camel-ai.org/key_modules/models) +* **UI Framework:** [Streamlit Documentation](https://docs.streamlit.io/) diff --git a/examples/start-agents/camel_ai_benchmarker/app.py b/examples/start-agents/camel_ai_benchmarker/app.py new file mode 100644 index 00000000..4ae492bf --- /dev/null +++ b/examples/start-agents/camel_ai_benchmarker/app.py @@ -0,0 +1,179 @@ +import os +import time +import pandas as pd +import streamlit as st +from dotenv import load_dotenv + +# Core CAMEL-AI imports +from camel.models import ModelFactory +from camel.types import ModelPlatformType +from camel.agents import ChatAgent + +# Load environment variables for the fallbacks +load_dotenv() + +# Configure the Streamlit page layout +st.set_page_config(page_title="CAMEL Benchmark", page_icon="🐫", layout="wide") + +# Initialize session state to store our dynamic list of models +if "models_to_test" not in st.session_state: + st.session_state.models_to_test = [] + +def resolve_credentials(provider, custom_key, custom_url): + """Smartly resolves keys and URLs, falling back to .env if UI fields are blank.""" + # Resolve API Key + if custom_key: + key = custom_key + else: + if provider == "OpenAI": + key = os.getenv("OPENAI_API_KEY") + elif provider == "Nebius Studio": + key = os.getenv("NEBIUS_API_KEY") + elif provider == "OpenAI-Compatible (Custom)": + # Defaults to Crusoe if you are primarily testing that infrastructure + key = os.getenv("CRUSOE_API_KEY") + else: + key = None + + # Resolve Base URL + if custom_url: + url = custom_url + elif provider == "OpenAI-Compatible (Custom)" and not custom_url: + url = os.getenv("CRUSOE_API_BASE") + else: + url = None + + return key, url + +# --- UI: SIDEBAR --- +with st.sidebar: + st.header("βš™οΈ Dashboard Controls") + st.markdown("Use the main form to add models. Keys are stored securely in session state.") + st.divider() + if st.button("πŸ—‘οΈ Clear Model Queue", use_container_width=True): + st.session_state.models_to_test = [] + st.rerun() + +# --- UI: MAIN DASHBOARD --- +st.title("🐫 CAMEL-AI Multi-Infrastructure Benchmark") +st.markdown("Dynamically add models across different cloud providers. Specify unique API keys and Base URLs per model, or leave them blank to fall back to your `.env` variables.") + +# Section 1: Add Models to the Queue +st.subheader("1. Add Models to Benchmark") +with st.form("add_model_form"): + col1, col2 = st.columns(2) + + with col1: + provider_choice = st.selectbox( + "Platform Integration Framework", + ["OpenAI", "Nebius Studio", "OpenAI-Compatible (Custom)"], + help="Tells CAMEL how to format the API request." + ) + model_name = st.text_input("Model ID", placeholder="e.g., gpt-4o-mini, meta-llama/Llama-3-8b") + + with col2: + custom_key = st.text_input("API Key (Optional)", type="password", placeholder="Leave blank to use .env fallback") + custom_endpoint = st.text_input("Base URL (Optional)", placeholder="e.g., https://managed-inference-api-proxy.crusoecloud.com/v1") + + submit_model = st.form_submit_button("βž• Add to Queue") + + if submit_model and model_name: + resolved_key, resolved_url = resolve_credentials(provider_choice, custom_key, custom_endpoint) + + st.session_state.models_to_test.append({ + "Provider": provider_choice, + "Model ID": model_name, + "API Key": resolved_key, + "Base URL": resolved_url + }) + st.success(f"Added {model_name} to queue!") + +# Display current queue +if st.session_state.models_to_test: + st.write("**Current Testing Queue:**") + + # We create a display copy of the dataframe to securely mask the API keys in the UI + display_df = pd.DataFrame(st.session_state.models_to_test) + display_df["API Key"] = display_df["API Key"].apply(lambda x: "πŸ”‘ Loaded" if x else "❌ Missing") + display_df["Base URL"] = display_df["Base URL"].fillna("Default") + + st.dataframe(display_df, use_container_width=True) + +# Section 2: Prompt Configuration +st.subheader("2. Configure Benchmark") +test_prompt = st.text_area( + "Benchmark Prompt", + value="Write a concise, high-level overview of the history of artificial intelligence, highlighting the major winters and breakthroughs. Keep it strictly under 150 words.", + height=100 +) + +# Section 3: Execution +if st.button("πŸš€ Run Benchmark", type="primary"): + if not st.session_state.models_to_test: + st.warning("Please add at least one model to the queue first.") + st.stop() + + results_data = [] + + # Map UI selections to CAMEL Platform Enums + platform_map = { + "OpenAI": ModelPlatformType.OPENAI, + "Nebius Studio": ModelPlatformType.NEBIUS, + "OpenAI-Compatible (Custom)": ModelPlatformType.OPENAI + } + + # Create a visual progress bar + progress_text = "Benchmarking in progress. Please wait..." + my_bar = st.progress(0, text=progress_text) + + for idx, m in enumerate(st.session_state.models_to_test): + platform_enum = platform_map[m["Provider"]] + api_key = m["API Key"] + + if not api_key: + results_data.append({"Model": m["Model ID"], "Status": "⚠️ Skipped (No Key)", "Exec Time (s)": None, "Length": None}) + continue + + try: + # Build the dynamic arguments for the ModelFactory + factory_kwargs = { + "model_platform": platform_enum, + "model_type": m["Model ID"], + "model_config_dict": {"temperature": 0.0}, + "api_key": api_key + } + if m["Base URL"]: + factory_kwargs["url"] = m["Base URL"] + + # Initialize CAMEL agent + camel_model = ModelFactory.create(**factory_kwargs) + agent = ChatAgent(system_message="You are a highly efficient, objective technical writer.", model=camel_model) + + # Measure execution + start_time = time.time() + response = agent.step(test_prompt) + end_time = time.time() + + exec_time = round(end_time - start_time, 2) + content = response.msgs[0].content if hasattr(response, 'msgs') else response.msg.content + + results_data.append({"Model": m["Model ID"], "Status": "βœ… Success", "Exec Time (s)": exec_time, "Length": len(content)}) + + except Exception as e: + results_data.append({"Model": m["Model ID"], "Status": f"❌ Error: {str(e)[:40]}", "Exec Time (s)": None, "Length": None}) + + # Update progress bar + my_bar.progress((idx + 1) / len(st.session_state.models_to_test), text=f"Processed {m['Model ID']}...") + + # Clear progress bar and display results + my_bar.empty() + st.subheader("πŸ“Š Benchmark Results") + + df_results = pd.DataFrame(results_data) + st.dataframe(df_results, use_container_width=True) + + # Automatically generate a bar chart for successful runs + success_df = df_results[df_results["Status"] == "βœ… Success"] + if not success_df.empty: + st.write("**Execution Latency Comparison (Seconds)**") + st.bar_chart(data=success_df, x="Model", y="Exec Time (s)", color="#ff9900") \ No newline at end of file diff --git a/examples/start-agents/camel_ai_benchmarker/benchmark.py b/examples/start-agents/camel_ai_benchmarker/benchmark.py new file mode 100644 index 00000000..2ed69f39 --- /dev/null +++ b/examples/start-agents/camel_ai_benchmarker/benchmark.py @@ -0,0 +1,93 @@ +import os +import time +from dotenv import load_dotenv +from prettytable import PrettyTable + +# Core CAMEL-AI imports +from camel.models import ModelFactory +from camel.types import ModelPlatformType +from camel.agents import ChatAgent + +# Initialize environment variables +load_dotenv() + +def run_benchmark(): + print("--- 🐫 CAMEL-AI Benchmarking Tool (CLI Mode) ---") + print("Providers: OpenAI | Nebius AI | Crusoe Inference\n") + + # Define the models based on your exact API keys + models_to_test = [ + { + "name": "OpenAI (GPT-4o-Mini)", + "platform": ModelPlatformType.OPENAI, + "type": "gpt-4o-mini", + "api_key": os.getenv("OPENAI_API_KEY"), + "url": None + }, + { + "name": "Nebius Studio (DeepSeek-V3.2)", + "platform": ModelPlatformType.NEBIUS, + "type": "deepseek-ai/DeepSeek-V3.2", + "api_key": os.getenv("NEBIUS_API_KEY"), + "url": None + }, + { + "name": "Crusoe Inference (Qwen-3-235B)", + "platform": ModelPlatformType.OPENAI, + "type": "Qwen/Qwen3-235B-A22B-Instruct-2507", + "api_key": os.getenv("CRUSOE_API_KEY"), + "url": os.getenv("CRUSOE_API_BASE") + } + ] + + test_prompt = ( + "Write a concise, high-level overview of the history of artificial intelligence, " + "highlighting the major winters and breakthroughs. Keep it strictly under 150 words." + ) + + results = PrettyTable() + results.field_names = ["Provider / Model Name", "Status", "Exec Time (s)", "Response Length (chars)"] + results.align = "l" + + print(f"Executing benchmark prompt across {len(models_to_test)} endpoints...") + print(f"Prompt: '{test_prompt}'\n") + + for m in models_to_test: + if not m["api_key"]: + print(f" [Skipping] {m['name']} - No API Key found in .env") + results.add_row([m['name'], "⚠️ Skipped (No Key)", "-", "-"]) + continue + + print(f"Testing {m['name']}...") + try: + # Build the dynamic arguments + factory_kwargs = { + "model_platform": m['platform'], + "model_type": m['type'], + "model_config_dict": {"temperature": 0.0}, + "api_key": m['api_key'] + } + if m['url']: + factory_kwargs["url"] = m['url'] + + camel_model = ModelFactory.create(**factory_kwargs) + agent = ChatAgent(system_message="You are a highly efficient, objective technical writer.", model=camel_model) + + start_time = time.time() + response = agent.step(test_prompt) + end_time = time.time() + + exec_time = round(end_time - start_time, 2) + content = response.msgs[0].content if hasattr(response, 'msgs') else response.msg.content + + results.add_row([m['name'], "βœ… Success", exec_time, len(content)]) + + except Exception as e: + print(f" [Error] Failed to execute {m['name']}: {str(e)[:100]}...") + results.add_row([m['name'], "❌ Failed", "-", "-"]) + + print("\n--- Benchmark Results ---") + print(results) + +if __name__ == "__main__": + run_benchmark() \ No newline at end of file diff --git a/examples/start-agents/camel_ai_benchmarker/requirements.txt b/examples/start-agents/camel_ai_benchmarker/requirements.txt new file mode 100644 index 00000000..f62f4ac5 --- /dev/null +++ b/examples/start-agents/camel_ai_benchmarker/requirements.txt @@ -0,0 +1,6 @@ +camel-ai>=0.2.89 +openai>=1.10.0 +python-dotenv>=1.1.0 +prettytable>=3.10.0 +streamlit>=1.32.0 +pandas>=2.2.0 \ No newline at end of file diff --git a/examples/start-agents/cpu-agno-hackernews/.env b/examples/start-agents/cpu-agno-hackernews/.env new file mode 100644 index 00000000..e69de29b diff --git a/examples/start-agents/cpu-agno-hackernews/.env.example b/examples/start-agents/cpu-agno-hackernews/.env.example new file mode 100644 index 00000000..b02df153 --- /dev/null +++ b/examples/start-agents/cpu-agno-hackernews/.env.example @@ -0,0 +1 @@ +NEBIUS_API_KEY="your_nebius_api_key_here" \ No newline at end of file diff --git a/examples/start-agents/cpu-agno-hackernews/README.md b/examples/start-agents/cpu-agno-hackernews/README.md new file mode 100644 index 00000000..62e8c012 --- /dev/null +++ b/examples/start-agents/cpu-agno-hackernews/README.md @@ -0,0 +1,115 @@ +# Template: HackerNews Analysis Agent + +*Deploy this AI agent on [Saturn Cloud](https://saturncloud.io/).* + +**Hardware:** CPU | **Resource:** Terminal & Streamlit | **Tech Stack:** Python, Agno, Nebius AI, SQLite + +![Hackernew Analyst Agent](image.png) + +## πŸ“– Overview + +This template provides a Streamlit web interface for a HackerNews Analyst Agent. Built using the [Agno](https://github.com/agno-agi/agno) framework and the **Nebius AI** model (`Qwen/Qwen3-30B-A3B-Instruct-2507`), this agent executes data retrieval and analysis of tech news. + +The application tracks trending topics and analyzes user engagement. It utilizes a local **SQLite** database to maintain session state and conversation history across interactions. + +### Infrastructure Deployment (Saturn Cloud) + +Deploying this architecture on [Saturn Cloud](https://saturncloud.io/) provides several environment benefits: + +* **Environment Isolation:** Provisions dedicated compute resources for Python package execution without local dependency conflicts. +* **Persistent Compute:** Maintains the Streamlit server process in the background. +* **Secrets Management:** Secures API keys and environment variables via isolated `.env` configurations. + +--- + +## βœ… Prerequisites + +1. **Saturn Cloud Workspace:** Provision a CPU workspace via [Saturn Cloud](https://saturncloud.io/). +2. **Nebius API Key:** Generate an LLM API token via the [Nebius Token Factory](https://studio.nebius.ai/). + +--- + +## πŸ—οΈ Phase 1: Environment Setup + +Open a terminal in your Saturn Cloud workspace and execute the following commands. + +**1. Create and Activate the Virtual Environment** + +```bash +# Create the virtual environment named 'venv' +python -m venv venv + +# Activate it +source venv/bin/activate + +``` + +**2. Install Dependencies** + +```bash +pip install -r requirements.txt + +``` + +**3. Configure Environment Variables** + +Create your `.env` file and define your API key. + +```bash +cp .env.example .env +nano .env # Define NEBIUS_API_KEY. Save and exit. + +``` +--- + +## πŸš€ Phase 2: Execution (Streamlit UI) + +The application uses Agno's `HackerNewsTools` to query live data and a local **SQLite Database** (`agent_memory.db`) to persist conversation history. + +1. Ensure your virtual environment is activated, then initialize the Streamlit server: + +```bash +streamlit run app.py + +``` + +2. Navigate to the **Local URL** provided in the terminal output (default: `http://localhost:8501`) to access the web interface. +3. Input natural language commands in the main chat interface. + +**Example Prompts:** + +* *"What are the most discussed topics on HackerNews today?"* +* *"Can you compare that to the trends from last week?"* + +--- + +## 🐘 Production Scaling (PostgreSQL) + +By default, this template uses a local **SQLite** database as it requires no initial configuration. For multi-user deployments, the architecture supports migrating to **PostgreSQL**. + +**Migration Steps (No Application Logic Changes Required):** + +1. Provision a Postgres Database. +2. Install the Postgres driver in your terminal: `pip install psycopg2-binary` +3. In `app.py`, modify the Agno storage backend from SQLite to Postgres: + +```python +from agno.db.postgres import PostgresDb + +# Replace the SQLite configuration in get_agent() with: +db=PostgresDb( + table_name="hn_agent_sessions", + db_url="postgresql+psycopg2://user:password@host:5432/dbname" +) + +``` +--- + +## πŸ“š Official Documentation & References + +For further customization, refer to the official documentation for the stack components used in this project: + +* **Deployment Platform:** [Saturn Cloud Documentation](https://saturncloud.io/docs/) +* **AI Agent Framework:** [Agno Framework](https://github.com/agno-agi/agno) +* **LLM Provider:** [Nebius AI Studio](https://docs.nebius.com/studio/) +* **Web UI Framework:** [Streamlit Documentation](https://docs.streamlit.io/) diff --git a/examples/start-agents/cpu-agno-hackernews/app.py b/examples/start-agents/cpu-agno-hackernews/app.py new file mode 100644 index 00000000..8a609f46 --- /dev/null +++ b/examples/start-agents/cpu-agno-hackernews/app.py @@ -0,0 +1,57 @@ +import streamlit as st +from agno.agent import Agent +from agno.tools.hackernews import HackerNewsTools +from agno.models.nebius import Nebius +from agno.db.sqlite import SqliteDb +import os +from dotenv import load_dotenv + +# Load keys from the .env file +load_dotenv() + +# Set up the Streamlit UI +st.set_page_config(page_title="Tech News Analyst", page_icon="πŸ€–", layout="wide") +st.title("πŸ€– Tech News Analyst") +st.markdown("Ask me anything about trending topics, engagement patterns, or tech news on HackerNews!") + +# Initialize the Agent with SQLite Memory +@st.cache_resource +def get_agent(): + return Agent( + name="Tech News Analyst", + tools=[HackerNewsTools()], + model=Nebius( + id="Qwen/Qwen3-235B-A22B-Thinking-2507", + api_key=os.getenv("NEBIUS_API_KEY") + ), + # Attach SQLite storage to give the agent a persistent memory using v2 syntax + db=SqliteDb(db_file="agent_memory.db"), + # Use the updated v2 syntax for adding history context + add_history_to_context=True, + # (Optional) You can limit how many previous runs to remember to save API costs + num_history_runs=3, + markdown=True + ) + +agent = get_agent() + +# Initialize UI chat history +if "messages" not in st.session_state: + st.session_state.messages = [] + +# Display previous chat messages +for message in st.session_state.messages: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + +# React to user input +if prompt := st.chat_input("What are the top stories on HackerNews right now?"): + st.chat_message("user").markdown(prompt) + st.session_state.messages.append({"role": "user", "content": prompt}) + + with st.chat_message("assistant"): + with st.spinner("Analyzing HackerNews..."): + response = agent.run(prompt) + st.markdown(response.content) + + st.session_state.messages.append({"role": "assistant", "content": response.content}) \ No newline at end of file diff --git a/examples/start-agents/cpu-agno-hackernews/image.png b/examples/start-agents/cpu-agno-hackernews/image.png new file mode 100644 index 00000000..cf67e9df Binary files /dev/null and b/examples/start-agents/cpu-agno-hackernews/image.png differ diff --git a/examples/start-agents/cpu-agno-hackernews/pyproject.toml b/examples/start-agents/cpu-agno-hackernews/pyproject.toml new file mode 100644 index 00000000..4b24aacd --- /dev/null +++ b/examples/start-agents/cpu-agno-hackernews/pyproject.toml @@ -0,0 +1,13 @@ +[project] +name = "hackernews-analyst-agent" +version = "0.1.0" +description = "AI Agent for HackerNews trend analysis using Agno and Nebius AI" +readme = "README.md" +requires-python = ">=3.10" +dependencies = [ + "agno>=1.5.1", + "openai>=1.78.1", + "python-dotenv>=1.1.0", + "streamlit", + "sqlalchemy" +] \ No newline at end of file diff --git a/examples/start-agents/cpu-agno-hackernews/requirements.txt b/examples/start-agents/cpu-agno-hackernews/requirements.txt new file mode 100644 index 00000000..cc03502c --- /dev/null +++ b/examples/start-agents/cpu-agno-hackernews/requirements.txt @@ -0,0 +1,5 @@ +agno>=1.5.1 +openai>=1.78.1 +python-dotenv>=1.1.0 +streamlit +sqlalchemy \ No newline at end of file diff --git a/examples/start-agents/crewai-research-crew/.env.example b/examples/start-agents/crewai-research-crew/.env.example new file mode 100644 index 00000000..9e14915e --- /dev/null +++ b/examples/start-agents/crewai-research-crew/.env.example @@ -0,0 +1,9 @@ +# Add your API key here. +# For GPU/OpenAI: +OPENAI_API_KEY=sk-proj-**** # Enter your API here +OTEL_SDK_DISABLED=true +CREWAI_TELEMETRY_OPT_OUT=true + +# For Local CPU (Ollama): +# OPENAI_API_BASE='http://localhost:11434/v1' +# OPENAI_MODEL_NAME='llama3' diff --git a/examples/start-agents/crewai-research-crew/README.md b/examples/start-agents/crewai-research-crew/README.md new file mode 100644 index 00000000..36d15fc2 --- /dev/null +++ b/examples/start-agents/crewai-research-crew/README.md @@ -0,0 +1,101 @@ +## CrewAI Research Crew (CPU/GPU) + +![template logo](image.png) + +This template provides a technical implementation of a **multi-agent research architecture** using the CrewAI framework. It demonstrates an automated pipeline where specialized agents coordinate to perform information retrieval, data synthesis, and technical reporting. + +**Infrastructure:** [Saturn Cloud](https://saturncloud.io/) +**Resource:** Python Project +**Hardware:** CPU/GPU +**Tech Stack:** CrewAI, Streamlit, Python + +--- + +## πŸ“– Overview + +This template implements a **Multi-agent research team** designed to handle complex information-gathering tasks. By delegating roles to autonomous agents, the system reduces the context window limitations and hallucination risks associated with single-prompt LLM interactions. + +The implementation includes: + +1. **Researcher Agent:** Configured for information gathering and technical data extraction. +2. **Writer Agent:** Optimized for synthesizing raw research into structured Markdown reports. +3. **Orchestration:** A sequential process flow managed via the CrewAI project container. +4. **Interactive Dashboard:** A Streamlit interface for real-time monitoring of agent "thinking" and process execution. + +--- + +## πŸš€ Execution Guide + +### 1. Environment Initialization + +The workflow requires a virtual environment to ensure dependency isolation and a stable execution context. + +1. **Create and Activate Virtual Environment:** +```bash +python -m venv venv +source venv/bin/activate # Windows: .\venv\Scripts\activate + +``` + + +2. **Install Dependencies:** +```bash +pip install -r requirements.txt + +``` + + + +### 2. Configuration + +The application requires an `OPENAI_API_KEY` to facilitate LLM-based reasoning. + +1. Create a `.env` file in the root directory. +2. Define the following variables: +```bash +OPENAI_API_KEY=sk-your-key-here +OTEL_SDK_DISABLED=true # Disables telemetry for thread safety in Streamlit +CREWAI_TELEMETRY_OPT_OUT=true # Opts out of telemetry signals + +``` + + + +### 3. Launching the Dashboard + +To trigger the multi-agent workflow via the web interface: + +```bash +streamlit run src/app.py --server.port 8000 --server.address 0.0.0.0 + +``` + +--- + +## 🧠 Technical Architecture + +The system is structured to separate agent logic from the presentation layer. + +### 1. Configuration Layer (`/config`) + +* **`agents.yaml`**: Defines the role, goal, and backstory for each agent. +* **`tasks.yaml`**: Defines specific task descriptions, expected outputs, and agent assignments. + +### 2. Core Logic (`src/crew.py`) + +This file uses the `@CrewBase` decorator to assemble the crew. It utilizes absolute path resolution to ensure configuration files are located correctly regardless of the execution entry point. + +### 3. Presentation Layer (`src/app.py`) + +The Streamlit dashboard handles: + +* **State Management:** Checks for API keys in `.env` before prompting the user. +* **Process Visualization:** Implements `st.status` and `st.expander` to display the intermediate "thinking" steps of the agents during execution. + +--- + +## 🏁 Conclusion + +This implementation demonstrates a production-ready approach to multi-agent systems on Saturn Cloud. It provides a modular framework that can be scaled by adding custom tools or expanding the agent pool. + +To scale this deploymentβ€”such as integrating with a persistent database or exposing a REST APIβ€”consider utilizing the Saturn Cloud Deployment resources to host the Streamlit dashboard as a persistent service. \ No newline at end of file diff --git a/examples/start-agents/crewai-research-crew/config/agents.yaml b/examples/start-agents/crewai-research-crew/config/agents.yaml new file mode 100644 index 00000000..9c49778d --- /dev/null +++ b/examples/start-agents/crewai-research-crew/config/agents.yaml @@ -0,0 +1,19 @@ +researcher: + role: > + Senior Technical Researcher + goal: > + Uncover cutting-edge developments and technical specifications for {topic} + backstory: > + You are an expert at navigating technical documentation, research papers, + and industry news. You excel at finding factual, high-density information + and identifying key trends. + +writer: + role: > + Technical Content Strategist + goal: > + Synthesize complex research into a structured, professional report on {topic} + backstory: > + You are a specialist in technical communication. You take raw research data + and transform it into clear, executive-level summaries that are easy to + read but maintain technical depth. \ No newline at end of file diff --git a/examples/start-agents/crewai-research-crew/config/tasks.yaml b/examples/start-agents/crewai-research-crew/config/tasks.yaml new file mode 100644 index 00000000..03bb5f15 --- /dev/null +++ b/examples/start-agents/crewai-research-crew/config/tasks.yaml @@ -0,0 +1,18 @@ +research_task: + description: > + Conduct a comprehensive investigation into {topic}. + Focus on recent breakthroughs, technical architecture, and market impact. + expected_output: > + A structured list of at least 10 key findings, including technical specs + and verified sources where possible. + agent: researcher + +writing_task: + description: > + Review the findings from the research task and compile them into a + technical report. Ensure the tone is professional and the structure + includes an Overview, Key Developments, and Future Outlook. + expected_output: > + A formatted Markdown report (.md) containing the synthesized research. + agent: writer + output_file: research_report.md \ No newline at end of file diff --git a/examples/start-agents/crewai-research-crew/image.png b/examples/start-agents/crewai-research-crew/image.png new file mode 100644 index 00000000..d6e3d9ab Binary files /dev/null and b/examples/start-agents/crewai-research-crew/image.png differ diff --git a/examples/start-agents/crewai-research-crew/requirements.txt b/examples/start-agents/crewai-research-crew/requirements.txt new file mode 100644 index 00000000..680fbc06 --- /dev/null +++ b/examples/start-agents/crewai-research-crew/requirements.txt @@ -0,0 +1,3 @@ +crewai>=0.100.0 +streamlit +python-dotenv \ No newline at end of file diff --git a/examples/start-agents/crewai-research-crew/src/app.py b/examples/start-agents/crewai-research-crew/src/app.py new file mode 100644 index 00000000..9f91d0fb --- /dev/null +++ b/examples/start-agents/crewai-research-crew/src/app.py @@ -0,0 +1,73 @@ +import os +# Disable telemetry to avoid the signal/thread error +os.environ["OTEL_SDK_DISABLED"] = "true" +os.environ["CREWAI_TELEMETRY_OPT_OUT"] = "true" + +import streamlit as st +from crew import ResearchCrew +from dotenv import load_dotenv + +# Load .env file at startup +load_dotenv() + +st.set_page_config(page_title="Research Crew Dashboard", layout="wide") + +st.title("🧠 Multi-Agent Research Crew") + +# --- API Configuration Logic --- +# Check if key exists in system environment (.env) +env_key = os.getenv("OPENAI_API_KEY") + +with st.sidebar: + st.header("Settings") + if env_key: + st.success("βœ… API Key loaded from .env") + # Optional: allow override + user_key = st.text_input("Override API Key (Optional)", type="password") + if user_key: + os.environ["OPENAI_API_KEY"] = user_key + else: + st.warning("⚠️ No API Key found in .env") + user_key = st.text_input("Enter OpenAI API Key", type="password") + if user_key: + os.environ["OPENAI_API_KEY"] = user_key + +# --- Main Interface --- +topic = st.text_input("Research Topic", placeholder="e.g. Advancements in Room-Temperature Superconductors") + +if st.button("Run Research Crew"): + if not os.environ.get("OPENAI_API_KEY"): + st.error("Please provide an API Key to continue.") + elif not topic: + st.warning("Please enter a topic.") + else: + # 1. Collapsible Thinking Section + with st.expander("πŸ•΅οΈ View Agent Thinking & Process", expanded=True): + thinking_container = st.empty() + with st.status("Agents are working...", expanded=True) as status: + st.write("πŸ” **Researcher:** Accessing technical databases...") + # In a basic setup, we use these write statements to track progress + # For real-time "internal logs," CrewAI requires a custom callback, + # but these status updates serve the same visual purpose. + + try: + crew_obj = ResearchCrew().crew() + result = crew_obj.kickoff(inputs={'topic': topic}) + + st.write("✍️ **Writer:** Synthesizing findings into report...") + status.update(label="βœ… Research Complete!", state="complete", expanded=False) + except Exception as e: + st.error(f"Execution Error: {e}") + st.stop() + + # 2. Results Section + st.divider() + st.subheader("πŸ“„ Final Research Report") + st.markdown(result) + + st.download_button( + label="Download Report (.md)", + data=str(result), + file_name=f"research_report.md", + mime="text/markdown" + ) \ No newline at end of file diff --git a/examples/start-agents/crewai-research-crew/src/crew.py b/examples/start-agents/crewai-research-crew/src/crew.py new file mode 100644 index 00000000..f74dfe29 --- /dev/null +++ b/examples/start-agents/crewai-research-crew/src/crew.py @@ -0,0 +1,49 @@ +import os +from pathlib import Path +from crewai import Agent, Crew, Process, Task +from crewai.project import CrewBase, agent, crew, task + +# Get the directory where crew.py is located +curr_dir = Path(__file__).parent + +@CrewBase +class ResearchCrew(): + # Use absolute paths relative to the 'src' folder's parent + agents_config = str(curr_dir.parent / 'config/agents.yaml') + tasks_config = str(curr_dir.parent / 'config/tasks.yaml') + + @agent + def researcher(self) -> Agent: + return Agent( + config=self.agents_config['researcher'], + verbose=True, + memory=True + ) + + @agent + def writer(self) -> Agent: + return Agent( + config=self.agents_config['writer'], + verbose=True + ) + + @task + def research_task(self) -> Task: + return Task( + config=self.tasks_config['research_task'], + ) + + @task + def writing_task(self) -> Task: + return Task( + config=self.tasks_config['writing_task'], + ) + + @crew + def crew(self) -> Crew: + return Crew( + agents=self.agents, + tasks=self.tasks, + process=Process.sequential, + verbose=True, + ) \ No newline at end of file diff --git a/examples/start-agents/crewai-research-crew/test_logic.py b/examples/start-agents/crewai-research-crew/test_logic.py new file mode 100644 index 00000000..9c0e3293 --- /dev/null +++ b/examples/start-agents/crewai-research-crew/test_logic.py @@ -0,0 +1,14 @@ +from src.crew import ResearchCrew +from dotenv import load_dotenv +import os + +load_dotenv() # Loads keys from .env + +def test(): + print("πŸš€ Starting Logic Test...") + crew = ResearchCrew().crew() + result = crew.kickoff(inputs={'topic': 'Standardization of AI Agent Protocols 2026'}) + print("\nβœ… Test Result:\n", result) + +if __name__ == "__main__": + test() \ No newline at end of file diff --git a/examples/start-agents/dspy_starter/.env.example b/examples/start-agents/dspy_starter/.env.example new file mode 100644 index 00000000..55af96a5 --- /dev/null +++ b/examples/start-agents/dspy_starter/.env.example @@ -0,0 +1 @@ +OPENAI_API_KEY="sk-your-openai-api-key-here" \ No newline at end of file diff --git a/examples/start-agents/dspy_starter/README.md b/examples/start-agents/dspy_starter/README.md new file mode 100644 index 00000000..8c8c071e --- /dev/null +++ b/examples/start-agents/dspy_starter/README.md @@ -0,0 +1,86 @@ +# 🧠 DSPy Starter: AI System Optimizer & UI + +**Hardware:** CPU/GPU | **Resource:** Jupyter Notebook & Web App | **Tech Stack:** DSPy, Streamlit, Python, OpenAI + +

+ Jupyter Notebook + Streamlit + DSPy +

+ +## πŸ“– Overview + +This template provides a robust introduction to **DSPy** (Declarative Self-Improving Language Programs), a framework that replaces manual "prompt engineering" with algorithmic optimization. + +It features a **Dual-Entrypoint Architecture**: +1. **Interactive Jupyter Notebook (`dspy_starter.ipynb`):** For defining your signatures, providing training datasets, and compiling/optimizing your agent using the `BootstrapFewShot` optimizer. +2. **Streamlit Testing Dashboard (`app.py`):** A production-grade web interface to interactively test your agent, explicitly visualizing the AI's hidden "Chain of Thought" reasoning alongside its final output. + + + +--- + +## πŸ—οΈ Setup & Installation + +**1. Create Virtual Environment & Install Dependencies** +```bash +python -m venv venv +source venv/bin/activate +pip install -r requirements.txt + +``` + +**2. Configure Environment Variables** +Create an environment file to securely store your OpenAI credentials. + +```bash +cp .env.example .env +nano .env +# Input your OpenAI API key. Save and exit. + +``` + +--- + +## πŸ’» Method 1: Interactive Dashboard (Testing) + +Use the Streamlit UI to quickly test the capacity of your DSPy module. This dashboard explicitly separates the internal reasoning monologue from the formatted output. + +```bash +streamlit run app.py + +``` + +*The dashboard will automatically open in your browser (usually at `http://localhost:8501`).* + +--- + +## πŸ”¬ Method 2: Jupyter Notebook (Compilation & Optimization) + +Use the notebook to learn how DSPy compiles algorithms and optimizes prompts under the hood. + +```bash +jupyter notebook + +``` + +*Open `dspy_starter.ipynb` and execute the cells sequentially to watch DSPy mathematically rewrite your prompts to maximize accuracy.* + +--- + +## πŸ“š Official Documentation & References + +* **DSPy Repository:** [Stanford NLP DSPy GitHub](https://github.com/stanfordnlp/dspy) +* **DSPy Documentation:** [DSPy Official Docs](https://dspy-docs.vercel.app/) +* **UI Framework:** [Streamlit Documentation](https://docs.streamlit.io/) + +``` + +### How to test your new UI: +1. Run `pip install -r requirements.txt` to grab Streamlit. +2. Save the `app.py` file in your project folder. +3. In your terminal, run `streamlit run app.py`. + +Type in a your question and watch how the UI beautifully isolates the reasoning trace from the final answer! Would you like me to help you build the architecture for the next agent template on your list now? + +``` \ No newline at end of file diff --git a/examples/start-agents/dspy_starter/app.py b/examples/start-agents/dspy_starter/app.py new file mode 100644 index 00000000..4093e551 --- /dev/null +++ b/examples/start-agents/dspy_starter/app.py @@ -0,0 +1,67 @@ +import os +import dspy +import streamlit as st +from dotenv import load_dotenv + +# 1. Page Configuration +st.set_page_config(page_title="DSPy Agent UI", page_icon="🧠", layout="centered") + +# 2. Load Environment Variables +load_dotenv() +if not os.getenv("OPENAI_API_KEY"): + st.error("⚠️ OPENAI_API_KEY is missing from your .env file.") + st.stop() + +# 3. Configure DSPy (Cached so it doesn't reload on every button click) +@st.cache_resource +def setup_dspy(): + lm = dspy.LM('openai/gpt-4o-mini', max_tokens=1000) + dspy.configure(lm=lm) + return lm + +setup_dspy() + +# 4. Define the DSPy Signature & Module (Mirroring the Notebook) +class BasicQA(dspy.Signature): + """Answer questions with short, precise, and accurate fact-based answers.""" + question = dspy.InputField(desc="The user's query.") + answer = dspy.OutputField(desc="A concise, factual answer.") + +class CoTQA(dspy.Module): + def __init__(self): + super().__init__() + self.prog = dspy.ChainOfThought(BasicQA) + + def forward(self, question): + return self.prog(question=question) + +# Instantiate the agent +agent = CoTQA() + +# 5. Streamlit UI Build +st.title("🧠 DSPy Agent Dashboard") +st.markdown("Test the reasoning and output capacity of your DSPy Chain-of-Thought agent.") + +# Input Field +user_question = st.text_input("Ask a question:", placeholder="e.g., What is the heaviest naturally occurring element?") + +# Execution Button +if st.button("πŸš€ Generate Answer", type="primary"): + if user_question: + with st.spinner("Agent is thinking..."): + try: + # Execute the DSPy agent + response = agent(question=user_question) + + # Display Final Answer clearly at the top + st.subheader("🎯 Final Answer") + st.success(response.answer) + + # Display the hidden reasoning in an expandable box or text area + st.subheader("🧠 Internal Reasoning Trace") + st.info(response.reasoning) + + except Exception as e: + st.error(f"Execution Error: {str(e)}") + else: + st.warning("Please enter a question first.") \ No newline at end of file diff --git a/examples/start-agents/dspy_starter/dspy_starter.ipynb b/examples/start-agents/dspy_starter/dspy_starter.ipynb new file mode 100644 index 00000000..1015548b --- /dev/null +++ b/examples/start-agents/dspy_starter/dspy_starter.ipynb @@ -0,0 +1,341 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "93c068b5", + "metadata": {}, + "source": [ + "# πŸš€ DSPy Starter: Building & Optimizing AI Systems\n", + "\n", + "Unlike traditional prompt engineering, **DSPy** allows you to define declarative modules (like building neural networks) and uses optimizers to automatically compile and rewrite your prompts to maximize accuracy. \n", + "\n", + "In this starter, we will:\n", + "1. Configure an OpenAI Language Model.\n", + "2. Define a custom Question-Answering task using `dspy.Signature`.\n", + "3. Provide a small training dataset.\n", + "4. Use a DSPy **Optimizer** to automatically write the best prompt for us!" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "b57f55c3", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "βœ… DSPy configured successfully!\n" + ] + } + ], + "source": [ + "import os\n", + "import dspy\n", + "from dotenv import load_dotenv\n", + "\n", + "# Load environment variables\n", + "load_dotenv()\n", + "\n", + "if not os.getenv(\"OPENAI_API_KEY\"):\n", + " print(\"⚠️ WARNING: OPENAI_API_KEY not found in .env file.\")\n", + "\n", + "# Configure DSPy to use GPT-4o-Mini as the default language model\n", + "lm = dspy.LM('openai/gpt-4o-mini', max_tokens=1000)\n", + "dspy.configure(lm=lm)\n", + "\n", + "print(\"βœ… DSPy configured successfully!\")" + ] + }, + { + "cell_type": "markdown", + "id": "56736ac7", + "metadata": {}, + "source": [ + "## 1. Define the Signature & Module\n", + "A **Signature** defines the inputs and outputs of your task (the *what*).\n", + "A **Module** defines the execution pipeline (the *how*). We will use DSPy's built-in `ChainOfThought` module to give the AI reasoning capabilities." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "c1d8d930", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--- Unoptimized Zero-Shot Response ---\n", + "Answer: Paris\n" + ] + } + ], + "source": [ + "# Define the I/O behavior for our AI system\n", + "class BasicQA(dspy.Signature):\n", + " \"\"\"Answer questions with short, precise, and accurate fact-based answers.\"\"\"\n", + " \n", + " question = dspy.InputField(desc=\"The user's query.\")\n", + " answer = dspy.OutputField(desc=\"A concise, factual answer.\")\n", + "\n", + "# Build the execution module\n", + "class CoTQA(dspy.Module):\n", + " def __init__(self):\n", + " super().__init__()\n", + " # Wrap our signature in a Chain of Thought reasoning module\n", + " self.prog = dspy.ChainOfThought(BasicQA)\n", + " \n", + " def forward(self, question):\n", + " return self.prog(question=question)\n", + "\n", + "# Let's test the UN-OPTIMIZED system\n", + "unoptimized_agent = CoTQA()\n", + "response = unoptimized_agent(question=\"What is the capital of France?\")\n", + "\n", + "print(\"--- Unoptimized Zero-Shot Response ---\")\n", + "print(f\"Answer: {response.answer}\")" + ] + }, + { + "cell_type": "markdown", + "id": "59a8d354", + "metadata": {}, + "source": [ + "## 2. Optimizing the System\n", + "Now for the magic. We will create a small dataset of 5 examples. Then, we will use DSPy's `BootstrapFewShot` optimizer. It will automatically run the module, evaluate the reasoning, and **rewrite the prompt** to include the best few-shot examples that maximize accuracy." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "735dd0d8", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Compiling and optimizing the AI system. Please wait...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + " 80%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 4/5 [00:00<00:00, 11.49it/s]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Bootstrapped 3 full traces after 4 examples for up to 1 rounds, amounting to 4 attempts.\n", + "βœ… System successfully optimized!\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "from dspy.teleprompt import BootstrapFewShot\n", + "\n", + "# 1. Create a tiny training dataset (Inputs must be marked as .with_inputs())\n", + "trainset = [\n", + " dspy.Example(question=\"What is the freezing point of water in Celsius?\", answer=\"0 degrees Celsius\").with_inputs(\"question\"),\n", + " dspy.Example(question=\"Who wrote 'To Kill a Mockingbird'?\", answer=\"Harper Lee\").with_inputs(\"question\"),\n", + " dspy.Example(question=\"What is the chemical symbol for Gold?\", answer=\"Au\").with_inputs(\"question\"),\n", + " dspy.Example(question=\"How many planets are in the Solar System?\", answer=\"8\").with_inputs(\"question\"),\n", + " dspy.Example(question=\"What year did the Apollo 11 moon landing occur?\", answer=\"1969\").with_inputs(\"question\"),\n", + "]\n", + "\n", + "# 2. Define a simple exact-match evaluation metric\n", + "def exact_match_metric(example, pred, trace=None):\n", + " return example.answer.lower() in pred.answer.lower()\n", + "\n", + "# 3. Setup the Optimizer (Teleprompter)\n", + "optimizer = BootstrapFewShot(\n", + " metric=exact_match_metric,\n", + " max_bootstrapped_demos=3, # The number of optimized examples to inject into the final prompt\n", + " max_labeled_demos=3\n", + ")\n", + "\n", + "print(\"Compiling and optimizing the AI system. Please wait...\")\n", + "\n", + "# 4. Compile! DSPy is now writing and optimizing the prompt for you.\n", + "optimized_agent = optimizer.compile(student=CoTQA(), trainset=trainset)\n", + "\n", + "print(\"βœ… System successfully optimized!\")" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "a4a5f513", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Question: Who painted the Mona Lisa?\n", + "Reasoning: The Mona Lisa is a renowned painting created during the Renaissance period. It was painted by the Italian artist Leonardo da Vinci, and it is famous for its exquisite detail and the subject's enigmatic expression.\n", + "Optimized Answer: Leonardo da Vinci\n", + "\n", + "--- The Prompt DSPy Wrote Under The Hood ---\n", + "\n", + "\n", + "\n", + "\n", + "\u001b[34m[2026-03-10T18:06:02.084233]\u001b[0m\n", + "\n", + "\u001b[31mSystem message:\u001b[0m\n", + "\n", + "Your input fields are:\n", + "1. `question` (str): The user's query.\n", + "Your output fields are:\n", + "1. `reasoning` (str): \n", + "2. `answer` (str): A concise, factual answer.\n", + "All interactions will be structured in the following way, with the appropriate values filled in.\n", + "\n", + "[[ ## question ## ]]\n", + "{question}\n", + "\n", + "[[ ## reasoning ## ]]\n", + "{reasoning}\n", + "\n", + "[[ ## answer ## ]]\n", + "{answer}\n", + "\n", + "[[ ## completed ## ]]\n", + "In adhering to this structure, your objective is: \n", + " Answer questions with short, precise, and accurate fact-based answers.\n", + "\n", + "\n", + "\u001b[31mUser message:\u001b[0m\n", + "\n", + "[[ ## question ## ]]\n", + "Who wrote 'To Kill a Mockingbird'?\n", + "\n", + "\n", + "\u001b[31mAssistant message:\u001b[0m\n", + "\n", + "[[ ## reasoning ## ]]\n", + "'Tow Kill a Mockingbird' is a classic novel published in 1960, which addresses serious themes such as racial injustice and moral growth. It was written by Harper Lee, an American author who received the Pulitzer Prize for this work.\n", + "\n", + "[[ ## answer ## ]]\n", + "Harper Lee\n", + "\n", + "[[ ## completed ## ]]\n", + "\n", + "\n", + "\u001b[31mUser message:\u001b[0m\n", + "\n", + "[[ ## question ## ]]\n", + "What is the chemical symbol for Gold?\n", + "\n", + "\n", + "\u001b[31mAssistant message:\u001b[0m\n", + "\n", + "[[ ## reasoning ## ]]\n", + "The chemical symbol for Gold is derived from its Latin name \"aurum.\" It is a one or two-letter abbreviation used in chemical formulas and notation.\n", + "\n", + "[[ ## answer ## ]]\n", + "Au\n", + "\n", + "[[ ## completed ## ]]\n", + "\n", + "\n", + "\u001b[31mUser message:\u001b[0m\n", + "\n", + "[[ ## question ## ]]\n", + "How many planets are in the Solar System?\n", + "\n", + "\n", + "\u001b[31mAssistant message:\u001b[0m\n", + "\n", + "[[ ## reasoning ## ]]\n", + "The Solar System is known to have eight recognized planets, which are Mercury, Venus, Earth, Mars, Jupiter, Saturn, Uranus, and Neptune.\n", + "\n", + "[[ ## answer ## ]]\n", + "8\n", + "\n", + "[[ ## completed ## ]]\n", + "\n", + "\n", + "\u001b[31mUser message:\u001b[0m\n", + "\n", + "[[ ## question ## ]]\n", + "Who painted the Mona Lisa?\n", + "\n", + "Respond with the corresponding output fields, starting with the field `[[ ## reasoning ## ]]`, then `[[ ## answer ## ]]`, and then ending with the marker for `[[ ## completed ## ]]`.\n", + "\n", + "\n", + "\u001b[31mResponse:\u001b[0m\n", + "\n", + "\u001b[32m[[ ## reasoning ## ]]\n", + "The Mona Lisa is a renowned painting created during the Renaissance period. It was painted by the Italian artist Leonardo da Vinci, and it is famous for its exquisite detail and the subject's enigmatic expression.\n", + "\n", + "[[ ## answer ## ]]\n", + "Leonardo da Vinci\n", + "\n", + "[[ ## completed ## ]]\u001b[0m\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ] + } + ], + "source": [ + "# Test the optimized agent on a brand new question\n", + "test_question = \"Who painted the Mona Lisa?\"\n", + "optimized_response = optimized_agent(question=test_question)\n", + "\n", + "print(f\"Question: {test_question}\")\n", + "# πŸ‘‡ Add this line right here to reveal the hidden Chain of Thought!\n", + "print(f\"Reasoning: {optimized_response.reasoning}\") \n", + "print(f\"Optimized Answer: {optimized_response.answer}\\n\")\n", + "\n", + "# THE REVEAL: Let's look at the exact prompt DSPy wrote for us!\n", + "print(\"--- The Prompt DSPy Wrote Under The Hood ---\")\n", + "lm.inspect_history(n=1)" + ] + }, + { + "cell_type": "markdown", + "id": "5de47f99", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/start-agents/dspy_starter/requirements.txt b/examples/start-agents/dspy_starter/requirements.txt new file mode 100644 index 00000000..c78e77d6 --- /dev/null +++ b/examples/start-agents/dspy_starter/requirements.txt @@ -0,0 +1,4 @@ +dspy>=2.5.0 +jupyter>=1.0.0 +python-dotenv>=1.1.0 +streamlit>=1.32.0 \ No newline at end of file diff --git a/examples/start-agents/google_adk_starter/.env.example b/examples/start-agents/google_adk_starter/.env.example new file mode 100644 index 00000000..f88b2323 --- /dev/null +++ b/examples/start-agents/google_adk_starter/.env.example @@ -0,0 +1,8 @@ +# 1. To use Google AI Studio (Recommended for local dev) +GOOGLE_GENAI_USE_VERTEXAI=FALSE +GOOGLE_API_KEY="your-gemini-api-key-here" + +# 2. To use Google Cloud Vertex AI (For production) +# GOOGLE_GENAI_USE_VERTEXAI=TRUE +# GOOGLE_CLOUD_PROJECT="your-gcp-project-id" +# GOOGLE_CLOUD_LOCATION="us-central1" \ No newline at end of file diff --git a/examples/start-agents/google_adk_starter/README.md b/examples/start-agents/google_adk_starter/README.md new file mode 100644 index 00000000..7be020fa --- /dev/null +++ b/examples/start-agents/google_adk_starter/README.md @@ -0,0 +1,100 @@ +# πŸ€– Google ADK Production Starter + +*Cloud deployment architecture verified for [Saturn Cloud](https://saturncloud.io/).* + +**Hardware:** CPU/GPU | **Resource:** Python Project & Web App | **Tech Stack:** Google ADK, Streamlit, Gemini 2.5, Open-Meteo API + +

+ Saturn Cloud + Python Script + Streamlit + Google ADK +

+ +## πŸ“– Overview + +This template provides an enterprise-grade implementation of an AI agent using the open-source **Google Agent Development Kit (ADK)**. + +While Google ADK ships with a built-in development CLI (`adk web`), Google explicitly states it is not meant for production deployment. This template bridges that gap by demonstrating how to **programmatically execute ADK agents** inside a custom **Streamlit** dashboard using ADK's native `Runner` and `SessionService` classes. + +### ✨ Key Capabilities +* **Programmatic ADK Execution:** Bypasses the ADK command line and embeds the agent directly into a custom Python application. +* **Asynchronous Session Management:** Uses Python's `asyncio` to securely mount ADK's `InMemorySessionService` inside Streamlit's synchronous execution loop. +* **Live API Tools:** Swaps out mocked data for real-world API connections, allowing the agent to fetch live, geocoded weather data from the open web using the Open-Meteo API. +* **Model Agnostic Routing:** Easily toggle between the free Google AI Studio endpoint (for local dev) and enterprise Google Cloud Vertex AI (for production). + +--- + +## πŸ—οΈ Local Setup & Installation + +**1. Create Virtual Environment & Install Dependencies** +```bash +python -m venv venv +source venv/bin/activate +pip install -r requirements.txt + +``` + +**2. Configure Environment Variables** +Create an environment file to securely store your Gemini API key. + +```bash +cp .env.example .env +nano .env + +``` + +Inside your `.env` file, ensure you have the following configured: + +```env +GOOGLE_API_KEY="your-gemini-api-key-here" +GOOGLE_GENAI_USE_VERTEXAI=FALSE + +``` + +*(Note: Setting `USE_VERTEXAI=FALSE` instructs ADK to route via the Google AI Studio endpoint. Set this to `TRUE` if executing within a Google Cloud project).* + +--- + +## πŸš€ Execution & UI + +Because this architecture utilizes a custom frontend, do **not** use the `adk web` or `adk run` terminal commands. + +Launch the interactive Streamlit application directly: + +```bash +streamlit run app.py + +``` + +*The local server will initialize and bind to `http://localhost:8501`.* + +--- + +## ☁️ Cloud Deployment + +This repository is structured for containerized web hosting. To deploy this ADK application to a production environment, you can provision a resource on [Saturn Cloud](https://saturncloud.io/). + +**Deployment Specifications:** + +1. **Resource Type:** Streamlit Deployment / Python Server. +2. **Environment Variables:** Inject `GOOGLE_API_KEY` directly into the Saturn Cloud secrets manager. Do not commit your `.env` file. +3. **Start Command:** `streamlit run app.py --server.port 8000 --server.address 0.0.0.0` +4. **Network Routing:** Ensure the deployment's exposed port matches the Streamlit configuration. + +--- + +## πŸ“ Project Architecture + +* `my_agent/agent.py`: Contains the core ADK `root_agent` definition and the Python functions that act as its toolset. +* `my_agent/__init__.py`: Required module initialization for the ADK framework. +* `app.py`: The custom Streamlit frontend that initializes the asynchronous ADK `Runner` and manages UI state. + +--- + +## πŸ“š Official Documentation & References + +* **Deployment Infrastructure:** [Saturn Cloud Documentation](https://saturncloud.io/docs/) +* **ADK GitHub Repository:** [Google ADK Python](https://github.com/google/adk-python) +* **ADK Official Docs:** [Google ADK Documentation](https://google.github.io/adk-docs/) +* **UI Framework:** [Streamlit Documentation](https://docs.streamlit.io/) diff --git a/examples/start-agents/google_adk_starter/app.py b/examples/start-agents/google_adk_starter/app.py new file mode 100644 index 00000000..31942380 --- /dev/null +++ b/examples/start-agents/google_adk_starter/app.py @@ -0,0 +1,86 @@ +import uuid +import asyncio # <-- NEW: Import asyncio +import streamlit as st +from dotenv import load_dotenv + +# ADK Programmatic Imports +from google.adk.runners import Runner +from google.adk.sessions import InMemorySessionService +from google.genai import types + +# Import our custom ADK agent package +from my_agent.agent import root_agent + +# 1. Page Configuration +st.set_page_config(page_title="Google ADK Production UI", page_icon="☁️", layout="centered") +load_dotenv() + +st.title("☁️ Google ADK Production Dashboard") +st.markdown("Interact with an enterprise-grade Google ADK agent connected to live APIs.") + +# 2. Initialize ADK Session State +if "session_id" not in st.session_state: + st.session_state.session_id = str(uuid.uuid4()) + st.session_state.messages = [] + + # Create the ADK session manager + st.session_state.session_service = InMemorySessionService() + + # <-- FIX: Wrap the async creation step in asyncio.run() --> + asyncio.run( + st.session_state.session_service.create_session( + app_name="streamlit_dashboard", + user_id="web_user", + session_id=st.session_state.session_id + ) + ) + + # Attach the agent to the runner + st.session_state.runner = Runner( + agent=root_agent, + app_name="streamlit_dashboard", + session_service=st.session_state.session_service + ) + +# 3. Render previous chat history +for msg in st.session_state.messages: + with st.chat_message(msg["role"]): + st.write(msg["content"]) + +# 4. Handle new user input +user_input = st.chat_input("Ask about the live weather anywhere in the world...") + +if user_input: + # Display user prompt + st.session_state.messages.append({"role": "user", "content": user_input}) + with st.chat_message("user"): + st.write(user_input) + + with st.chat_message("assistant"): + with st.spinner("Agent is calling external APIs..."): + try: + # Format the message for Google GenAI + content = types.Content( + role="user", + parts=[types.Part(text=user_input)] + ) + + # Execute the ADK Runner asynchronously + events = st.session_state.runner.run( + user_id="web_user", + session_id=st.session_state.session_id, + new_message=content + ) + + # ADK streams back multiple events; we want to catch the final generated text + final_response = "Error: Agent returned no content." + for event in events: + if event.is_final_response(): + final_response = event.content.parts[0].text + + # Render to UI and save to memory + st.write(final_response) + st.session_state.messages.append({"role": "assistant", "content": final_response}) + + except Exception as e: + st.error(f"ADK Execution Error: {str(e)}") \ No newline at end of file diff --git a/examples/start-agents/google_adk_starter/my_agent/__init__.py b/examples/start-agents/google_adk_starter/my_agent/__init__.py new file mode 100644 index 00000000..63bd45e6 --- /dev/null +++ b/examples/start-agents/google_adk_starter/my_agent/__init__.py @@ -0,0 +1 @@ +from . import agent \ No newline at end of file diff --git a/examples/start-agents/google_adk_starter/my_agent/agent.py b/examples/start-agents/google_adk_starter/my_agent/agent.py new file mode 100644 index 00000000..d788eee9 --- /dev/null +++ b/examples/start-agents/google_adk_starter/my_agent/agent.py @@ -0,0 +1,53 @@ +import os +import json +import urllib.request +import urllib.parse +from dotenv import load_dotenv +from google.adk.agents import Agent + +# Load environment variables securely +load_dotenv() + +# 1. Define a tool with a REAL internet connection +def get_realtime_weather(location: str) -> str: + """Fetches real-time weather data for a specified location.""" + print(f"[System] Executing ADK Tool: Fetching live weather for {location}...") + headers = {"User-Agent": "Google-ADK-Agent/1.0"} + + try: + # Step A: Convert city name to coordinates safely + safe_location = urllib.parse.quote(location) + geocode_url = f"https://geocoding-api.open-meteo.com/v1/search?name={safe_location}&count=1&format=json" + + req = urllib.request.Request(geocode_url, headers=headers) + with urllib.request.urlopen(req, timeout=10.0) as response: + geo_data = json.loads(response.read().decode()) + + if not geo_data.get("results"): + return f"System Error: Could not find geographical coordinates for '{location}'." + + lat = geo_data["results"][0]["latitude"] + lon = geo_data["results"][0]["longitude"] + + # Step B: Fetch real-time weather using coordinates + weather_url = f"https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lon}¤t=temperature_2m,wind_speed_10m" + + req2 = urllib.request.Request(weather_url, headers=headers) + with urllib.request.urlopen(req2, timeout=10.0) as response2: + weather_data = json.loads(response2.read().decode()) + + temp = weather_data.get("current", {}).get("temperature_2m", "Unknown") + wind = weather_data.get("current", {}).get("wind_speed_10m", "Unknown") + + return f"Live Data -> Temperature: {temp}Β°C, Wind Speed: {wind} km/h." + except Exception as e: + return f"System Error: Network failure - {str(e)}" + +# 2. Initialize the primary ADK agent +root_agent = Agent( + name="production_weather_agent", + model="gemini-2.5-flash", + description="A helpful assistant agent capable of checking real-time weather.", + instruction="You are an expert, helpful assistant. Use the get_realtime_weather tool to answer user questions factually.", + tools=[get_realtime_weather] +) \ No newline at end of file diff --git a/examples/start-agents/google_adk_starter/requirements.txt b/examples/start-agents/google_adk_starter/requirements.txt new file mode 100644 index 00000000..9ec419ec --- /dev/null +++ b/examples/start-agents/google_adk_starter/requirements.txt @@ -0,0 +1,4 @@ +google-adk>=0.8.0 +google-genai>=0.1.0 +python-dotenv>=1.1.0 +streamlit>=1.32.0 \ No newline at end of file diff --git a/examples/start-agents/langchain_langgraph_starter/.env.example b/examples/start-agents/langchain_langgraph_starter/.env.example new file mode 100644 index 00000000..e4b4c46f --- /dev/null +++ b/examples/start-agents/langchain_langgraph_starter/.env.example @@ -0,0 +1 @@ +OPENAI_API_KEY="place your key here" \ No newline at end of file diff --git a/examples/start-agents/langchain_langgraph_starter/README.md b/examples/start-agents/langchain_langgraph_starter/README.md new file mode 100644 index 00000000..cdba5d44 --- /dev/null +++ b/examples/start-agents/langchain_langgraph_starter/README.md @@ -0,0 +1,108 @@ +# πŸ•ΈοΈ LangChain-LangGraph Starter + +*Deploy this AI workflow on [Saturn Cloud](https://saturncloud.io/).* + +**Hardware:** CPU | **Resource:** Jupyter Notebook, Streamlit Web App | **Tech Stack:** LangChain, LangGraph, Python, Streamlit + +

+ Saturn Cloud + Streamlit + LangChain + LangGraph +

+ +## πŸ“– Overview + +This template provides a foundational implementation of a stateful, tool-calling agent workflow utilizing **LangChain** and **LangGraph**. It demonstrates the transition from linear LLM execution to cyclic, graph-based orchestration with conditional edge routing. + +By defining a `StateGraph`, the architecture supports complex agentic capabilities. The system utilizes LangGraph's conditional routing to autonomously determine when to answer directly and when to route execution to an external computational tool (`calculate_multiply`) before returning the final response. + +### Infrastructure Deployment (Saturn Cloud) + +Deploying this architecture on [Saturn Cloud](https://saturncloud.io/) provides several environment benefits: +* **Dual Interfaces:** Provisions robust CPU instances capable of simultaneously running JupyterLab for backend graph prototyping and Streamlit for frontend client access. +* **State Management:** Maintains the Streamlit server process in the background, isolating client session states. +* **Environment Isolation:** Secures API keys within `.env` configurations and manages package dependencies via virtual environments. + +--- + +## βœ… Prerequisites + +1. **Saturn Cloud Workspace:** Provision a CPU workspace via [Saturn Cloud](https://saturncloud.io/). +2. **OpenAI API Key:** Generate an API token via the [OpenAI Developer Platform](https://platform.openai.com/). + +--- + +## πŸ—οΈ Setup & Deployment + +Open a terminal in your Saturn Cloud workspace and execute the following commands. + +**1. Create Virtual Environment & Install Dependencies** +```bash +python -m venv venv +source venv/bin/activate +pip install -r requirements.txt + +``` + +**2. Configure Environment Variables** +Create your `.env` file and define your API key. + +```bash +cp .env.example .env +nano .env +# Define OPENAI_API_KEY. Save and exit (Ctrl+O, Enter, Ctrl+X). + +``` + +**3. Register Jupyter Kernel (Optional for Notebook Development)** + +```bash +python -m ipykernel install --user --name=venv --display-name="Python (venv)" + +``` + +--- + +## πŸ’‘ Execution Methods + +### Method A: Production Web Interface (Streamlit) + +To launch the production-grade graphical interface with live tool calling: + +```bash +streamlit run app.py + +``` + +* Navigate to the **Local URL** output in the terminal (default: `http://localhost:8501`). +* **Test Prompts:** * *"Explain LangGraph in one sentence."* (Executes direct LLM response path) +* *"What is 456 multiplied by 789?"* (Triggers the conditional tool-calling edge) + + + +### Method B: Interactive Backend Prototyping (Jupyter) + +1. Open `workflow_starter.ipynb` in the Jupyter interface. +2. Ensure the kernel is set to **Python (venv)**. +3. Execute the cells sequentially to test the graph compilation and internal state manipulation prior to UI deployment. + +--- + +## πŸ§ͺ Testing + +This template includes automated testing capabilities utilizing `pytest` and `nbmake` to validate the Jupyter Notebook's graph schema execution. + +```bash +pytest --nbmake workflow_starter.ipynb + +``` + +--- + +## πŸ“š Official Documentation & References + +* **Deployment Platform:** [Saturn Cloud Documentation](https://saturncloud.io/docs/) +* **AI Framework:** [LangChain Documentation](https://python.langchain.com/docs/get_started/introduction) +* **Graph Orchestration:** [LangGraph Documentation](https://langchain-ai.github.io/langgraph/) +* **Web UI Framework:** [Streamlit Documentation](https://docs.streamlit.io/) diff --git a/examples/start-agents/langchain_langgraph_starter/app.py b/examples/start-agents/langchain_langgraph_starter/app.py new file mode 100644 index 00000000..e1d44604 --- /dev/null +++ b/examples/start-agents/langchain_langgraph_starter/app.py @@ -0,0 +1,105 @@ +import streamlit as st +import os +from dotenv import load_dotenv +from typing import Annotated +from langchain_openai import ChatOpenAI +from langchain_core.tools import tool +from langchain_core.messages import HumanMessage, AIMessage, ToolMessage +from langgraph.graph import StateGraph, START, END, MessagesState +from langgraph.prebuilt import ToolNode, tools_condition + +# Initialize environment variables +load_dotenv() + +# --- UI CONFIGURATION --- +st.set_page_config( + page_title="LangGraph Agent Dashboard", + page_icon="πŸ•ΈοΈ", + layout="centered", +) + +st.title("LangGraph Tool-Calling Agent") +st.markdown("Stateful agentic workflow with conditional tool routing.") + +# Verify Environment +if not os.getenv("OPENAI_API_KEY"): + st.error("Environment Error: OPENAI_API_KEY is not defined.") + st.stop() + +# --- TOOL DEFINITION --- +@tool +def calculate_multiply(a: int, b: int) -> int: + """Multiply two integers together. Use this tool whenever you need to multiply numbers.""" + return a * b + +# --- GRAPH INITIALIZATION --- +@st.cache_resource(show_spinner=False) +def initialize_graph(): + """Compiles the LangGraph workflow and caches the executable object.""" + tools = [calculate_multiply] + + # Initialize the LLM and bind the external tools to its execution context + llm = ChatOpenAI(model="gpt-4o-mini", temperature=0.1) + llm_with_tools = llm.bind_tools(tools) + + def chatbot_node(state: MessagesState): + """Passes the current conversation state to the LLM.""" + return {"messages": [llm_with_tools.invoke(state["messages"])]} + + # Build the StateGraph utilizing the built-in MessagesState schema + graph_builder = StateGraph(MessagesState) + + # Add the primary chatbot node and the prebuilt Tool execution node + graph_builder.add_node("chatbot", chatbot_node) + graph_builder.add_node("tools", ToolNode(tools=tools)) + + # Define the graph edges and conditional routing + graph_builder.add_edge(START, "chatbot") + # Route to 'tools' if the LLM decides to call a tool, otherwise route to END + graph_builder.add_conditional_edges("chatbot", tools_condition) + # Return to the chatbot after the tool executes to formulate the final answer + graph_builder.add_edge("tools", "chatbot") + + return graph_builder.compile() + +app_graph = initialize_graph() + +# --- STATE MANAGEMENT --- +if "messages" not in st.session_state: + st.session_state.messages = [ + AIMessage(content="System online. I am equipped with a multiplication tool. How can I assist you?") + ] + +# Render chat history (filtering out hidden ToolMessages for clean UI) +for msg in st.session_state.messages: + if isinstance(msg, AIMessage) and not msg.tool_calls: + with st.chat_message("assistant"): + st.markdown(msg.content) + elif isinstance(msg, HumanMessage): + with st.chat_message("user"): + st.markdown(msg.content) + +# --- EXECUTION LOGIC --- +if prompt := st.chat_input("Ask a question or request a calculation..."): + + # Render user prompt + st.session_state.messages.append(HumanMessage(content=prompt)) + with st.chat_message("user"): + st.markdown(prompt) + + with st.chat_message("assistant"): + with st.spinner("Agent computing path..."): + try: + # Execute the graph with the full conversational history + result = app_graph.invoke({"messages": st.session_state.messages}) + + # Update session state with the new messages generated by the graph + # (This includes the AI's tool request, the tool execution result, and the final AI response) + st.session_state.messages = result["messages"] + + # Render the final output + final_response = result["messages"][-1].content + st.markdown(final_response) + + except Exception as e: + st.error(f"Execution Error: {str(e)}") \ No newline at end of file diff --git a/examples/start-agents/langchain_langgraph_starter/requirements.txt b/examples/start-agents/langchain_langgraph_starter/requirements.txt new file mode 100644 index 00000000..d87bcdc1 --- /dev/null +++ b/examples/start-agents/langchain_langgraph_starter/requirements.txt @@ -0,0 +1,8 @@ +langchain>=0.1.0 +langchain-openai>=0.1.0 +langgraph>=0.0.20 +jupyterlab>=4.0.0 +python-dotenv>=1.1.0 +pytest>=8.0.0 +nbmake>=1.5.0 +streamlit>=1.32.0 \ No newline at end of file diff --git a/examples/start-agents/langchain_langgraph_starter/workflow_starter.ipynb b/examples/start-agents/langchain_langgraph_starter/workflow_starter.ipynb new file mode 100644 index 00000000..90cf5eac --- /dev/null +++ b/examples/start-agents/langchain_langgraph_starter/workflow_starter.ipynb @@ -0,0 +1,193 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "d15581d2", + "metadata": {}, + "source": [ + "## Environment & Imports" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "3a0eef92", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "βœ… Environment initialized.\n" + ] + } + ], + "source": [ + "import os\n", + "from dotenv import load_dotenv\n", + "from typing import Annotated\n", + "from typing_extensions import TypedDict\n", + "from langgraph.graph import StateGraph, START, END\n", + "from langgraph.graph.message import add_messages\n", + "from langchain_openai import ChatOpenAI\n", + "\n", + "# Initialize environment variables\n", + "load_dotenv()\n", + "\n", + "if not os.getenv(\"OPENAI_API_KEY\"):\n", + " raise ValueError(\"Environment Error: OPENAI_API_KEY is not defined.\")\n", + " \n", + "print(\"βœ… Environment initialized.\")" + ] + }, + { + "cell_type": "markdown", + "id": "4183607b", + "metadata": {}, + "source": [ + "## Define State & Nodes" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "4e247f56", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "βœ… State and Nodes defined.\n" + ] + } + ], + "source": [ + "# 1. Define the State Schema (TypedDict)\n", + "# 'add_messages' appends new messages to the existing list rather than overwriting\n", + "class State(TypedDict):\n", + " messages: Annotated[list, add_messages]\n", + "\n", + "# 2. Initialize the LLM\n", + "llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0.1)\n", + "\n", + "# 3. Define the core node function\n", + "def chatbot_node(state: State):\n", + " \"\"\"Processes the current state messages and returns the LLM response.\"\"\"\n", + " response = llm.invoke(state[\"messages\"])\n", + " # Return a dictionary updating the 'messages' key\n", + " return {\"messages\": [response]}\n", + "\n", + "print(\"βœ… State and Nodes defined.\")" + ] + }, + { + "cell_type": "markdown", + "id": "870ca855", + "metadata": {}, + "source": [ + "## Compile and Execute the Graph" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "78f0dc14", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--- Execution Output ---\n", + "\n", + "LangChain is a framework designed for building applications that utilize language models, providing tools for managing prompts, chains, and agents to enhance interaction with these models. In contrast, LangGraph focuses on representing and manipulating knowledge graphs, enabling users to integrate and query structured data alongside language model capabilities for more complex applications.\n" + ] + } + ], + "source": [ + "# 1. Build the StateGraph\n", + "graph_builder = StateGraph(State)\n", + "\n", + "# 2. Add nodes and define routing edges\n", + "graph_builder.add_node(\"chatbot\", chatbot_node)\n", + "graph_builder.add_edge(START, \"chatbot\")\n", + "graph_builder.add_edge(\"chatbot\", END)\n", + "\n", + "# 3. Compile the graph into an executable Runnable\n", + "app = graph_builder.compile()\n", + "\n", + "# 4. Execute the workflow\n", + "initial_state = {\"messages\": [\"Explain the difference between LangChain and LangGraph in two sentences.\"]}\n", + "result = app.invoke(initial_state)\n", + "\n", + "print(\"--- Execution Output ---\\n\")\n", + "print(result[\"messages\"][-1].content)" + ] + }, + { + "cell_type": "markdown", + "id": "ad296c03", + "metadata": {}, + "source": [ + "## Automated State Validation" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "5e803866", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--- Running Graph Validations ---\n", + "βœ… All graph validations passed. The LangGraph architecture is fully functional.\n" + ] + } + ], + "source": [ + "# Validate the execution output structure\n", + "print(\"--- Running Graph Validations ---\")\n", + "\n", + "try:\n", + " # Check if the state dictionary contains the required keys\n", + " assert \"messages\" in result, \"Failure: State dictionary is missing the 'messages' key.\"\n", + " \n", + " # Check if the LLM actually appended a response to the initial prompt\n", + " assert len(result[\"messages\"]) > 1, \"Failure: The graph did not append an AI response to the state.\"\n", + " \n", + " # Check if the final message contains actual text\n", + " assert len(result[\"messages\"][-1].content) > 0, \"Failure: The AI returned an empty string.\"\n", + " \n", + " print(\"βœ… All graph validations passed. The LangGraph architecture is fully functional.\")\n", + " \n", + "except AssertionError as e:\n", + " print(f\"❌ Test Failed: {e}\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/start-agents/llamaindex-tasks/.env.example b/examples/start-agents/llamaindex-tasks/.env.example new file mode 100644 index 00000000..2cd20ee6 --- /dev/null +++ b/examples/start-agents/llamaindex-tasks/.env.example @@ -0,0 +1 @@ +OPENAI_API_KEY="your_openai_api_key_here" \ No newline at end of file diff --git a/examples/start-agents/llamaindex-tasks/README.md b/examples/start-agents/llamaindex-tasks/README.md new file mode 100644 index 00000000..2df9fffa --- /dev/null +++ b/examples/start-agents/llamaindex-tasks/README.md @@ -0,0 +1,123 @@ +# LlamaIndex Task Manager Agent + +*Deployment Architecture: [Saturn Cloud](https://saturncloud.io/)* + +**Hardware Requirements:** CPU | **Execution Interface:** CLI Script, Streamlit Web App | **Stack:** Python, LlamaIndex, Streamlit + +

+ Saturn Cloud + LlamaIndex + Streamlit + Python +

+ +## Overview + +This template provides a reference implementation for a Retrieval-Augmented Generation (RAG) pipeline utilizing **LlamaIndex**. It demonstrates context ingestion, vector indexing, and stateful natural language querying against localized data. + + + +The architecture reads from a local `./data` directory to ingest context files. To facilitate both backend prototyping and production deployment, this repository provides two execution interfaces: + +1. **Command-Line Interface (`task_manager.py`):** A lightweight, interactive terminal loop for testing vector indexing and raw query logic. +2. **Web Interface (`app.py`):** A Streamlit application utilizing session state management and resource caching (`@st.cache_resource`) to maintain the vector index in active memory, preventing redundant disk I/O operations. + +In both implementations, the LLM temperature is strictly modulated (`0.1`) to prioritize deterministic data extraction over generative variance. + +### Deployment Infrastructure (Saturn Cloud) + +Deploying this repository on [Saturn Cloud](https://saturncloud.io/) provides the following environment characteristics: +* **Compute Isolation:** Provisions dedicated instances for Python environment execution and localized vector indexing. +* **Process Persistence:** Maintains the Streamlit web server and background terminal processes. +* **File System Access:** Grants the LlamaIndex `SimpleDirectoryReader` direct read access to the persistent `/data` directory. + +--- + +## Prerequisites + +1. **Saturn Cloud Workspace:** Provision a CPU workspace via [Saturn Cloud](https://saturncloud.io/). +2. **OpenAI API Key:** Generate an API key via the [OpenAI Developer Platform](https://platform.openai.com/). *Note: LlamaIndex requires an active OpenAI API key for embedding generation and text completion functions.* + +--- + +## Environment Initialization + +Execute the following commands in the workspace terminal to configure the environment. + +**1. Create and Activate Virtual Environment** +```bash +python -m venv venv +source venv/bin/activate + +``` + +**2. Upgrade Build Tools** +To prevent dependency resolution failures with modern micro-packages, upgrade the core Python build tools prior to installation: + +```bash +python -m pip install --upgrade pip setuptools wheel + +``` + +**3. Install Package Dependencies** +Install the modular LlamaIndex integrations and UI framework utilizing the `--no-cache-dir` flag to ensure clean dependency resolution: + +```bash +pip install --no-cache-dir -r requirements.txt + +``` + +**4. Configure Environment Variables** +Initialize the `.env` file and define the required API key. + +```bash +cp .env.example .env +nano .env +# Define OPENAI_API_KEY. Save and exit the editor. + +``` + +**5. Data Provisioning** +Verify that target context files (e.g., `tasks.txt`) are present within the local `data/` directory prior to server initialization. + +--- + +## Execution Methods + +### Method A: Command-Line Interface (Backend Testing) + +To test the raw RAG logic without initializing the web server, run the terminal script: + +```bash +python task_manager.py + +``` + +* Use standard input to query the indexed data. +* Input `exit` or `quit` to terminate the process. + +### Method B: Web Application (Streamlit UI) + +To launch the production-grade graphical interface, execute the Streamlit process: + +```bash +streamlit run app.py + +``` + +* Navigate to the **Local URL** output in the terminal (default: `http://localhost:8501`). +* The application will cache the vector index upon initialization to optimize subsequent conversational turns. + +**Query Execution Examples:** + +* "What are the pending items for Project Alpha?" +* "List all administrative tasks requiring attention before the 15th." +* "Are there any meetings scheduled with the design team?" + +--- + +## Official Documentation & References + +* **Deployment Platform:** [Saturn Cloud Documentation](https://saturncloud.io/docs/) +* **Framework Reference:** [LlamaIndex Documentation](https://docs.llamaindex.ai/) +* **UI Framework:** [Streamlit Documentation](https://docs.streamlit.io/) diff --git a/examples/start-agents/llamaindex-tasks/app.py b/examples/start-agents/llamaindex-tasks/app.py new file mode 100644 index 00000000..fa8aecda --- /dev/null +++ b/examples/start-agents/llamaindex-tasks/app.py @@ -0,0 +1,78 @@ +import streamlit as st +import os +from dotenv import load_dotenv +from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings +from llama_index.llms.openai import OpenAI + +# Initialize environment variables +load_dotenv() + +# --- UI CONFIGURATION --- +st.set_page_config( + page_title="Task Assistant | LlamaIndex RAG", + page_icon="βš™οΈ", + layout="centered", +) + +st.title("LlamaIndex Task Assistant") +st.markdown("Retrieval-Augmented Generation interface for local task data.") + +# Verify Environment +api_key = os.getenv("OPENAI_API_KEY") +if not api_key: + st.error("Environment Error: OPENAI_API_KEY is not defined.") + st.stop() + +# --- CORE RAG PIPELINE --- +@st.cache_resource(show_spinner=False) +def initialize_query_engine(): + """Reads local data and initializes the vector index. Caches the output.""" + data_dir = "./data" + + if not os.path.exists(data_dir) or not os.listdir(data_dir): + st.error(f"Initialization Error: The '{data_dir}' directory is missing or empty.") + st.stop() + + # Configure deterministic LLM settings + Settings.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.1) + + # Ingest and Index + documents = SimpleDirectoryReader(data_dir).load_data() + index = VectorStoreIndex.from_documents(documents) + + return index.as_query_engine() + +# Initialize the engine +with st.spinner("Building vector index from local data..."): + query_engine = initialize_query_engine() + +# --- STATE MANAGEMENT --- +if "messages" not in st.session_state: + st.session_state.messages = [ + {"role": "assistant", "content": "Vector index initialized. What information do you need regarding your tasks?"} + ] + +# Render chat history +for message in st.session_state.messages: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + +# --- EXECUTION LOGIC --- +if prompt := st.chat_input("Query your localized data..."): + + # Append and render user query + st.session_state.messages.append({"role": "user", "content": prompt}) + with st.chat_message("user"): + st.markdown(prompt) + + # Execute RAG query and render response + with st.chat_message("assistant"): + try: + response = query_engine.query(prompt) + st.markdown(response.response) + + # Append final output to state + st.session_state.messages.append({"role": "assistant", "content": response.response}) + + except Exception as e: + st.error(f"API Execution Error: {str(e)}") \ No newline at end of file diff --git a/examples/start-agents/llamaindex-tasks/data/tasks.txt b/examples/start-agents/llamaindex-tasks/data/tasks.txt new file mode 100644 index 00000000..1c52985f --- /dev/null +++ b/examples/start-agents/llamaindex-tasks/data/tasks.txt @@ -0,0 +1,11 @@ +Project Alpha: +- Finalize the LlamaIndex template deployment by Tuesday EOD. +- Email the infrastructure team regarding the CPU resource allocation. + +Project Beta: +- Review the pull requests for the frontend dashboard. +- Schedule a sync with the design team for Thursday at 2 PM. + +General Administrative: +- Renew the cloud hosting subscription by the 15th. +- Update the system architecture diagrams. \ No newline at end of file diff --git a/examples/start-agents/llamaindex-tasks/requirements.txt b/examples/start-agents/llamaindex-tasks/requirements.txt new file mode 100644 index 00000000..d8c4a43e --- /dev/null +++ b/examples/start-agents/llamaindex-tasks/requirements.txt @@ -0,0 +1,13 @@ +# Core Framework +llama-index-core>=0.10.0 + +# Explicit Integrations +llama-index-llms-openai +llama-index-embeddings-openai +llama-index-readers-file + +# Environment Management +python-dotenv>=1.1.0 + +# streamlit for dashboard +streamlit \ No newline at end of file diff --git a/examples/start-agents/llamaindex-tasks/task_manager.py b/examples/start-agents/llamaindex-tasks/task_manager.py new file mode 100644 index 00000000..df48d4d3 --- /dev/null +++ b/examples/start-agents/llamaindex-tasks/task_manager.py @@ -0,0 +1,56 @@ +import os +from dotenv import load_dotenv +from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings +from llama_index.llms.openai import OpenAI + +# 1. Initialize environment variables +load_dotenv() + +api_key = os.getenv("OPENAI_API_KEY") +if not api_key: + raise ValueError("Environment Error: OPENAI_API_KEY is not defined.") + +def initialize_task_assistant(data_dir="./data"): + """Loads local documents and builds the LlamaIndex vector store.""" + print(f"Initializing LlamaIndex RAG pipeline. Reading from {data_dir}...") + + # Configure the LLM globally (Temperature 0.1 for strict, factual retrieval) + Settings.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.1) + + try: + # Ingest data from the target directory + documents = SimpleDirectoryReader(data_dir).load_data() + + # Build the vector index + index = VectorStoreIndex.from_documents(documents) + + # Return the query engine interface + return index.as_query_engine() + + except ValueError as e: + print(f"Initialization Error: {e}. Please ensure the '{data_dir}' directory exists and contains text files.") + exit(1) + +if __name__ == "__main__": + print("--- LlamaIndex Task Manager ---") + + # Initialize the query engine + query_engine = initialize_task_assistant() + + print("\nTask Assistant is ready. (Type 'exit' to quit)") + + # Standard interactive execution loop + while True: + user_query = input("\nQuery your tasks: ") + + if user_query.lower() in ['exit', 'quit']: + print("Terminating process.") + break + + if user_query.strip(): + try: + # Query the vector index + response = query_engine.query(user_query) + print(f"\nAssistant: {response}") + except Exception as e: + print(f"\nAPI Execution Error: {e}") \ No newline at end of file diff --git a/examples/start-agents/openai-sdk-starter/.env.example b/examples/start-agents/openai-sdk-starter/.env.example new file mode 100644 index 00000000..2cd20ee6 --- /dev/null +++ b/examples/start-agents/openai-sdk-starter/.env.example @@ -0,0 +1 @@ +OPENAI_API_KEY="your_openai_api_key_here" \ No newline at end of file diff --git a/examples/start-agents/openai-sdk-starter/README.md b/examples/start-agents/openai-sdk-starter/README.md new file mode 100644 index 00000000..afbaab4c --- /dev/null +++ b/examples/start-agents/openai-sdk-starter/README.md @@ -0,0 +1,105 @@ +# Template: OpenAI SDK Starter + +*Deployment Architecture: [Saturn Cloud](https://saturncloud.io/)* + +**Hardware Requirements:** CPU | **Execution Interface:** Jupyter Notebook, Streamlit | **Stack:** Python, OpenAI SDK + +

+ Saturn Cloud + Jupyter + Streamlit + OpenAI + Python +

+ +## Overview + +This template provides a reference implementation for the **OpenAI Python SDK**. It demonstrates system prompt configuration, conversational state management, and chat completion API execution. + +The repository includes two execution interfaces: +1. **Interactive Prototyping (`openai_agents.ipynb`):** A Jupyter environment for executing raw SDK functions, formatting message arrays, and evaluating prompt structures. +2. **Web Interface (`app.py`):** A Streamlit application demonstrating state management and chunked streaming responses via a graphical interface. + +Both implementations utilize two distinct system prompts: an **Email Helper** (professional drafting) and a **Haiku Writer** (structured constraint adherence). + +### Deployment Infrastructure (Saturn Cloud) + +Deploying this repository on [Saturn Cloud](https://saturncloud.io/) provides the following environment characteristics: +* **Compute Isolation:** Provisions dedicated instances for Python environment execution. +* **Process Persistence:** Maintains the Streamlit web server and Jupyter kernels in the background. +* **Configuration Management:** Isolates API keys and environment variables via `.env` configuration files. + +--- + +## Prerequisites + +1. **Saturn Cloud Workspace:** Provision a CPU workspace via [Saturn Cloud](https://saturncloud.io/). +2. **OpenAI API Key:** Generate an API key via the [OpenAI Developer Platform](https://platform.openai.com/). + +--- + +## Environment Initialization + +Execute the following commands in the Saturn Cloud workspace terminal to configure the environment. + +**1. Create and Activate Virtual Environment** +```bash +python -m venv venv +source venv/bin/activate + +``` + +**2. Install Package Dependencies** + +```bash +pip install -r requirements.txt + +``` + +**3. Configure Environment Variables** +Initialize the `.env` file and define the required API key. + +```bash +cp .env.example .env +nano .env +# Define OPENAI_API_KEY. Save and exit the editor. + +``` + +--- + +## Execution Methods + +### Method A: Interactive Prototyping (Jupyter) + +1. Initialize Jupyter Lab within the workspace. +2. Open `openai_agents.ipynb`. +3. Set the active kernel to the `venv` environment. +4. Execute the cells sequentially to evaluate the SDK parameters and prompt outputs. + +### Method B: Web Interface (Streamlit) + +1. Ensure the virtual environment is activated in the terminal. +2. Initialize the Streamlit server process: + +```bash +streamlit run app.py + +``` + +3. Navigate to the **Local URL** output in the terminal (default: `http://localhost:8501`). +4. **Interface Controls:** +* Select the target model (`gpt-3.5-turbo`, `gpt-4o-mini`, `gpt-4o`). +* Toggle between the predefined agent system prompts. +* Modulate the **Temperature** parameter (0.0 for deterministic output, >1.0 for randomized output). + +--- + +## Official Documentation & References + +For further architectural specification, refer to the official documentation: + +* **Deployment Platform:** [Saturn Cloud Documentation](https://saturncloud.io/docs/) +* **SDK Reference:** [OpenAI Python SDK Repository](https://github.com/openai/openai-python) +* **Prompt Architecture:** [OpenAI Prompt Engineering Guide](https://platform.openai.com/docs/guides/prompt-engineering) +* **UI Framework:** [Streamlit Documentation](https://docs.streamlit.io/) diff --git a/examples/start-agents/openai-sdk-starter/app.py b/examples/start-agents/openai-sdk-starter/app.py new file mode 100644 index 00000000..c615833a --- /dev/null +++ b/examples/start-agents/openai-sdk-starter/app.py @@ -0,0 +1,99 @@ +import streamlit as st +from openai import OpenAI +import openai +import os +from dotenv import load_dotenv + +# Initialize environment variables +load_dotenv() + +# --- UI CONFIGURATION --- +st.set_page_config( + page_title="OpenAI SDK Agent Application", + page_icon="βš™οΈ", # Changed to a standard gear icon for a more technical look + layout="wide", + initial_sidebar_state="expanded", +) + +st.title("OpenAI SDK Agent") +st.markdown("Interface for executing system prompts via the OpenAI Python SDK.") + +# --- INITIALIZATION --- +api_key = os.getenv("OPENAI_API_KEY") +if not api_key: + st.error("Environment Error: OPENAI_API_KEY is not defined in the environment variables.") + st.stop() + +# Instantiate the OpenAI Client +client = OpenAI(api_key=api_key) + +# --- SIDEBAR CONFIGURATION --- +st.sidebar.header("Agent Configuration") + +model_choice = st.sidebar.selectbox( + "Target Model", + ["gpt-3.5-turbo", "gpt-4o-mini", "gpt-4o"], + help="Select the OpenAI model to process the completions." +) + +agent_type = st.sidebar.radio( + "Agent Persona", + ["Email Helper", "Haiku Writer"] +) + +temperature = st.sidebar.slider( + "Temperature", + min_value=0.0, + max_value=1.0, + value=0.7, + step=0.1, + help="Higher values produce more randomized output. Lower values produce deterministic output." +) + +# Define system instruction logic based on persona +if agent_type == "Email Helper": + system_prompt = "You are a professional executive assistant. Your primary function is to draft, refine, and summarize professional emails based on user context. Maintain a formal, concise tone." +else: + system_prompt = "You are a master poet. You must respond to all user inputs strictly in the form of a haiku (5-7-5 syllable structure). Do not provide any conversational filler." + +# --- STATE MANAGEMENT --- +# Initialize or reset chat history if the user switches the agent persona +if "current_agent" not in st.session_state or st.session_state.current_agent != agent_type: + st.session_state.messages = [{"role": "system", "content": system_prompt}] + st.session_state.current_agent = agent_type + +# Render chat history (excluding the hidden system prompt) +for message in st.session_state.messages: + if message["role"] != "system": + with st.chat_message(message["role"]): + st.markdown(message["content"]) + +# --- EXECUTION LOGIC --- +if prompt := st.chat_input(f"Send a message to the {agent_type}..."): + + st.session_state.messages.append({"role": "user", "content": prompt}) + with st.chat_message("user"): + st.markdown(prompt) + + with st.chat_message("assistant"): + try: + # Execute API call with streaming enabled + stream = client.chat.completions.create( + model=model_choice, + messages=st.session_state.messages, + temperature=temperature, + stream=True + ) + + # Stream the response to the UI (Streamlit 1.32+ supports OpenAI streams natively) + response = st.write_stream(stream) + + # Append final output to state + st.session_state.messages.append({"role": "assistant", "content": response}) + + except openai.AuthenticationError: + st.error("API Execution Error: Authentication Failed (401). Verify your OPENAI_API_KEY.") + except openai.RateLimitError: + st.error("API Execution Error: Rate Limit or Quota Exceeded (429). Check your billing dashboard.") + except Exception as e: + st.error(f"API Execution Error: {str(e)}") \ No newline at end of file diff --git a/examples/start-agents/openai-sdk-starter/openai-agents.ipynb b/examples/start-agents/openai-sdk-starter/openai-agents.ipynb new file mode 100644 index 00000000..2cb592fe --- /dev/null +++ b/examples/start-agents/openai-sdk-starter/openai-agents.ipynb @@ -0,0 +1,207 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "init-header", + "metadata": {}, + "source": [ + "### 1. Environment Initialization\n", + "This cell loads the environment variables and configures the standard OpenAI Python SDK to route traffic through the Nebius Studio API." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "init-code", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Client instantiated. Validating network authentication against OpenAI...\n" + ] + } + ], + "source": [ + "import os\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "\n", + "# 1. Load variables from .env\n", + "load_dotenv()\n", + "\n", + "# 2. Initialize the standard OpenAI SDK client\n", + "# (Removing the base_url forces the SDK to default to api.openai.com)\n", + "client = OpenAI(\n", + " api_key=os.environ.get(\"OPENAI_API_KEY\")\n", + ")\n", + "\n", + "print(\"Client instantiated. Validating network authentication against OpenAI...\")" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "3395c96e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "βœ… Authentication Successful!\n", + "Verified access to 114 models on the OpenAI endpoint.\n" + ] + } + ], + "source": [ + "\n", + "# 3. Validate Authentication\n", + "try:\n", + " # Execute a lightweight network request to list available models\n", + " models_response = client.models.list()\n", + " \n", + " # If the request succeeds, the key is valid and authorized\n", + " print(\"βœ… Authentication Successful!\")\n", + " print(f\"Verified access to {len(models_response.data)} models on the OpenAI endpoint.\")\n", + " \n", + "except Exception as e:\n", + " # If the key is invalid, expired, or lacks billing setup, it will fail here\n", + " print(\"❌ Authentication Failed.\")\n", + " print(f\"Error Traceback: {e}\")" + ] + }, + { + "cell_type": "markdown", + "id": "email-header", + "metadata": {}, + "source": [ + "### 2. The Email Helper Persona\n", + "Executes a chat completion configured with a system prompt for professional correspondence." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "email-code", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--- Email Output ---\n", + "\n", + "Subject: Production Deployment Delayed to Tuesday\n", + "\n", + "Dear Engineering Team,\n", + "\n", + "I hope this message finds you well. I wanted to inform you that the production deployment scheduled for today has been delayed until Tuesday at 10 AM. This delay is due to failing CI pipelines that need to be addressed before proceeding with the deployment.\n", + "\n", + "Your prompt attention to resolving these issues is greatly appreciated. Thank you for your understanding and cooperation in ensuring a successful deployment on Tuesday.\n", + "\n", + "Best regards,\n", + "\n", + "[Your Name] \n", + "[Your Position] \n", + "[Company Name]\n" + ] + } + ], + "source": [ + "def generate_email(user_context):\n", + " \"\"\"Executes a chat completion configured for professional correspondence.\"\"\"\n", + " response = client.chat.completions.create(\n", + " model=\"gpt-3.5-turbo\", # Use an official OpenAI model\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": \"You are a professional executive assistant. Draft, refine, and summarize professional emails based on user context. Maintain a formal, concise tone.\"},\n", + " {\"role\": \"user\", \"content\": user_context}\n", + " ],\n", + " temperature=0.7, # Lower temperature for deterministic, professional output\n", + " )\n", + " return response.choices[0].message.content\n", + "\n", + "# Test the function\n", + "prompt = \"Draft a brief email to the engineering team stating that the production deployment is delayed until Tuesday at 10 AM due to failing CI pipelines.\"\n", + "print(\"--- Email Output ---\\n\")\n", + "print(generate_email(prompt))" + ] + }, + { + "cell_type": "markdown", + "id": "haiku-header", + "metadata": {}, + "source": [ + "### 3. The Haiku Writer Persona\n", + "Executes a chat completion with strict structural constraints to demonstrate prompt adherence." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "haiku-code", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--- Haiku Output ---\n", + "\n", + "Late night code whispers, \n", + "Memory leak haunting me, \n", + "Dawn breaks, bug defeated.\n" + ] + } + ], + "source": [ + "def generate_haiku(topic):\n", + " \"\"\"Executes a chat completion with strict structural constraints.\"\"\"\n", + " response = client.chat.completions.create(\n", + " model=\"gpt-3.5-turbo\", \n", + " messages=[\n", + " {\"role\": \"system\", \"content\": \"You are a master poet. You must respond to all user inputs strictly in the form of a haiku (5-7-5 syllable structure). Do not output any conversational filler.\"},\n", + " {\"role\": \"user\", \"content\": topic}\n", + " ],\n", + " temperature=0.9, \n", + " )\n", + " return response.choices[0].message.content\n", + "\n", + "# Test the function\n", + "topic = \"Debugging a memory leak at 2 AM.\"\n", + "print(\"--- Haiku Output ---\\n\")\n", + "print(generate_haiku(topic))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e3797fe2", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/start-agents/openai-sdk-starter/requirements.txt b/examples/start-agents/openai-sdk-starter/requirements.txt new file mode 100644 index 00000000..fe55f3cb --- /dev/null +++ b/examples/start-agents/openai-sdk-starter/requirements.txt @@ -0,0 +1,4 @@ +openai>=1.14.0 +python-dotenv>=1.1.0 +streamlit>=1.32.0 +jupyterlab>=4.0.0 \ No newline at end of file diff --git a/examples/start-agents/pydanticai_weather_bot/.env.example b/examples/start-agents/pydanticai_weather_bot/.env.example new file mode 100644 index 00000000..2cd20ee6 --- /dev/null +++ b/examples/start-agents/pydanticai_weather_bot/.env.example @@ -0,0 +1 @@ +OPENAI_API_KEY="your_openai_api_key_here" \ No newline at end of file diff --git a/examples/start-agents/pydanticai_weather_bot/README.md b/examples/start-agents/pydanticai_weather_bot/README.md new file mode 100644 index 00000000..59563e58 --- /dev/null +++ b/examples/start-agents/pydanticai_weather_bot/README.md @@ -0,0 +1,110 @@ +# 🌀️ PydanticAI Weather Agent + +*Deploy this AI agent on [Saturn Cloud](https://saturncloud.io/).* + +**Hardware:** CPU | **Resource:** CLI Script, Streamlit Web App | **Tech Stack:** PydanticAI, Python, Streamlit + +

+ Saturn Cloud + Streamlit + PydanticAI + OpenAI + Open-Meteo +

+ +## πŸ“– Overview + +This template provides a reference implementation for a Real-Time Weather Information Agent utilizing the **PydanticAI** framework. It demonstrates the framework's native capability to enforce strict type validation and schema generation during LLM tool calling. + + + +The agent autonomously extracts location data from natural language queries, executes an external HTTP request to the open-source **Open-Meteo API** to retrieve geographical coordinates, and subsequently fetches real-time meteorological conditions. + +To facilitate both backend prototyping and production deployment, this repository provides two execution interfaces: +1. **Command-Line Interface (`weather_agent.py`):** A lightweight, interactive terminal loop for testing tool execution logic. +2. **Web Interface (`app.py`):** A Streamlit application utilizing session state management to render a production-grade conversational dashboard. + +### Infrastructure Deployment (Saturn Cloud) + +Deploying this architecture on [Saturn Cloud](https://saturncloud.io/) provides several environment benefits: +* **Persistent Compute:** Maintains the Streamlit server process and background terminal processes. +* **Secrets Management:** Secures API keys and environment variables via isolated `.env` configurations. +* **Rapid Provisioning:** Utilizes `uv` for high-speed package resolution and virtual environment creation. + +--- + +## βœ… Prerequisites + +1. **Saturn Cloud Workspace:** Provision a CPU workspace via [Saturn Cloud](https://saturncloud.io/). +2. **OpenAI API Key:** Generate an LLM API token via the [OpenAI Developer Platform](https://platform.openai.com/). *(Note: The Open-Meteo API utilized for weather data is open-source and requires no authentication key).* + +--- + +## πŸ—οΈ Setup & Deployment + +Open a terminal in your Saturn Cloud workspace and execute the following commands. + +**1. Create Virtual Environment & Install Dependencies** +Utilize standard Python tools to provision the isolated environment and install the required packages. + +```bash +python -m venv venv +source venv/bin/activate +pip install -r requirements.txt +``` + +**2. Configure Environment Variables** +Create your `.env` file and define your API key. + +```bash +cp .env.example .env +nano .env +# Define OPENAI_API_KEY. Save and exit (Ctrl+O, Enter, Ctrl+X). + +``` + +--- + +## πŸ’‘ Execution Methods + +### Method A: Command-Line Interface (Backend Testing) + +To test the raw tool-calling logic and view system execution traces, initialize the terminal script: + +```bash +python weather_agent.py + +``` + +* Input your query when prompted. The system will log the Geocoding and Weather API calls before returning the final string. +* Input `exit` or `quit` to terminate the process. + +### Method B: Web Application (Streamlit UI) + +To launch the production-grade graphical interface, execute the Streamlit process: + +```bash +streamlit run app.py + +``` + +* Navigate to the **Local URL** output in the terminal (default: `http://localhost:8501`). +* Use the main chat interface to query the agent. + +**Example Prompts:** + +* *"What is the weather like in Tokyo right now?"* +* *"Should I wear a jacket in London today?"* +* *"Compare the current temperatures in New York and Sydney."* + +--- + +## πŸ“š Official Documentation & References + +For further customization, refer to the official documentation for the stack components used in this project: + +* **Deployment Platform:** [Saturn Cloud Documentation](https://saturncloud.io/docs/) +* **AI Agent Framework:** [PydanticAI Documentation](https://ai.pydantic.dev/) +* **LLM Provider:** [OpenAI API Reference](https://platform.openai.com/docs/) +* **Weather API Routing:** [Open-Meteo API Reference](https://open-meteo.com/en/docs) +* **Web UI Framework:** [Streamlit Documentation](https://docs.streamlit.io/) \ No newline at end of file diff --git a/examples/start-agents/pydanticai_weather_bot/app.py b/examples/start-agents/pydanticai_weather_bot/app.py new file mode 100644 index 00000000..b056ff56 --- /dev/null +++ b/examples/start-agents/pydanticai_weather_bot/app.py @@ -0,0 +1,96 @@ +import streamlit as st +import os +import httpx +from dotenv import load_dotenv +from pydantic_ai import Agent + +# Initialize environment variables +load_dotenv() + +# --- UI CONFIGURATION --- +st.set_page_config( + page_title="PydanticAI Weather Agent", + page_icon="🌀️", + layout="centered", +) + +st.title("Weather Information Agent") +st.markdown("Real-time meteorological data retrieval via PydanticAI tool calling.") + +# Verify Environment +if not os.getenv("OPENAI_API_KEY"): + st.error("Environment Error: OPENAI_API_KEY is not defined in the environment variables.") + st.stop() + +# --- AGENT INITIALIZATION --- +@st.cache_resource(show_spinner=False) +def initialize_agent(): + """Initializes the PydanticAI agent and defines its external tools. Cached for performance.""" + agent = Agent( + 'openai:gpt-4o-mini', + system_prompt=( + "You are a concise, highly accurate weather assistant. " + "Use the provided tool to fetch real-time weather data for the user's requested location. " + "Extract the location from the prompt, fetch the data, and present the findings clearly, " + "including temperature and wind speed. Do not hallucinate metrics." + ), + ) + + @agent.tool_plain + def get_realtime_weather(location: str) -> str: + """Fetches current weather data for a specified city or location.""" + # Geocoding Request + geocode_url = f"https://geocoding-api.open-meteo.com/v1/search?name={location}&count=1&format=json" + geo_response = httpx.get(geocode_url).json() + + if not geo_response.get("results"): + return f"System Error: Could not find geographical coordinates for '{location}'." + + lat = geo_response["results"][0]["latitude"] + lon = geo_response["results"][0]["longitude"] + country = geo_response["results"][0].get("country", "Unknown Region") + + # Weather Request + weather_url = f"https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lon}¤t=temperature_2m,wind_speed_10m&timezone=auto" + weather_data = httpx.get(weather_url).json() + + current = weather_data.get("current", {}) + temp = current.get("temperature_2m", "Unknown") + wind = current.get("wind_speed_10m", "Unknown") + + return f"Location: {location}, {country}. Temperature: {temp}Β°C, Wind Speed: {wind} km/h." + + return agent + +agent = initialize_agent() + +# --- STATE MANAGEMENT --- +if "messages" not in st.session_state: + st.session_state.messages = [ + {"role": "assistant", "content": "System online. Which location's weather would you like to check?"} + ] + +# Render chat history +for message in st.session_state.messages: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + +# --- EXECUTION LOGIC --- +if prompt := st.chat_input("Ask for the weather in a specific city..."): + + st.session_state.messages.append({"role": "user", "content": prompt}) + with st.chat_message("user"): + st.markdown(prompt) + + with st.chat_message("assistant"): + with st.spinner("Executing Open-Meteo tool call sequence..."): + try: + # Execute the agent synchronously + result = agent.run_sync(prompt) + + # Render and store response + st.markdown(result.output) + st.session_state.messages.append({"role": "assistant", "content": result.output}) + + except Exception as e: + st.error(f"API Execution Error: {str(e)}") \ No newline at end of file diff --git a/examples/start-agents/pydanticai_weather_bot/requirements.txt b/examples/start-agents/pydanticai_weather_bot/requirements.txt new file mode 100644 index 00000000..e0ff04cb --- /dev/null +++ b/examples/start-agents/pydanticai_weather_bot/requirements.txt @@ -0,0 +1,3 @@ +pydantic-ai>=0.0.14 +httpx>=0.27.0 +python-dotenv>=1.1.0 \ No newline at end of file diff --git a/examples/start-agents/pydanticai_weather_bot/weather_agent.py b/examples/start-agents/pydanticai_weather_bot/weather_agent.py new file mode 100644 index 00000000..440006fd --- /dev/null +++ b/examples/start-agents/pydanticai_weather_bot/weather_agent.py @@ -0,0 +1,73 @@ +import os +import httpx +from dotenv import load_dotenv +from pydantic_ai import Agent + +# 1. Initialize environment variables +load_dotenv() + +if not os.getenv("OPENAI_API_KEY"): + raise ValueError("Environment Error: OPENAI_API_KEY is not defined.") + +# 2. Configure the PydanticAI Agent +agent = Agent( + 'openai:gpt-4o-mini', + system_prompt=( + "You are a concise, highly accurate weather assistant. " + "Use the provided tool to fetch real-time weather data for the user's requested location. " + "Extract the location from the prompt, fetch the data, and present the findings clearly, " + "including temperature and wind speed." + ), +) + +# 3. Define the Tool utilizing Pydantic validation natively +@agent.tool_plain +def get_realtime_weather(location: str) -> str: + """Fetches current weather data for a specified city or location.""" + print(f" [System] Executing tool: Geocoding '{location}'...") + + # Step A: Convert city name to coordinates + geocode_url = f"https://geocoding-api.open-meteo.com/v1/search?name={location}&count=1&format=json" + geo_response = httpx.get(geocode_url).json() + + if not geo_response.get("results"): + return f"System Error: Could not find geographical coordinates for '{location}'." + + lat = geo_response["results"][0]["latitude"] + lon = geo_response["results"][0]["longitude"] + country = geo_response["results"][0].get("country", "Unknown Region") + + print(f" [System] Executing tool: Fetching weather for Lat: {lat}, Lon: {lon}...") + + # Step B: Fetch weather using coordinates + weather_url = f"https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lon}¤t=temperature_2m,wind_speed_10m&timezone=auto" + weather_data = httpx.get(weather_url).json() + + current = weather_data.get("current", {}) + temp = current.get("temperature_2m", "Unknown") + wind = current.get("wind_speed_10m", "Unknown") + + return f"Location: {location}, {country}. Temperature: {temp}Β°C, Wind Speed: {wind} km/h." + +# 4. Execution Logic +if __name__ == "__main__": + print("--- PydanticAI Weather Bot ---") + print("Agent is ready. (Type 'exit' to quit)") + + while True: + user_query = input("\nAsk for the weather: ") + + if user_query.lower() in ['exit', 'quit']: + print("Terminating process.") + break + + if user_query.strip(): + try: + # Execute the agent synchronously + result = agent.run_sync(user_query) + + # FIXED: Access the text payload via .output instead of .data + print(f"\nAgent: {result.output}") + + except Exception as e: + print(f"\nAPI Execution Error: {e}") \ No newline at end of file diff --git a/examples/start-agents/scheduling_agent/.env.example b/examples/start-agents/scheduling_agent/.env.example new file mode 100644 index 00000000..e610f9d5 --- /dev/null +++ b/examples/start-agents/scheduling_agent/.env.example @@ -0,0 +1,2 @@ +NEBIUS_API_KEY="your_nebius_api_key_here" +CALCOM_API_KEY="your_calcom_api_key_here" \ No newline at end of file diff --git a/examples/start-agents/scheduling_agent/README.md b/examples/start-agents/scheduling_agent/README.md new file mode 100644 index 00000000..cbb51444 --- /dev/null +++ b/examples/start-agents/scheduling_agent/README.md @@ -0,0 +1,100 @@ +# πŸ“… Scheduling Agent Dashboard + +*Deploy this AI agent on [Saturn Cloud](https://saturncloud.io/).* + +**Hardware:** CPU | **Resource:** Streamlit Web App | **Tech Stack:** Python, Agno, Nebius AI, Cal.com API, Streamlit + +

+ Saturn Cloud + Streamlit + Agno + Nebius AI + Cal.com +

+ +## πŸ“– Overview + +This template provides a Streamlit web interface for an Autonomous Scheduling Agent. Built using the **Agno** framework, it enables users to interact with an AI agent capable of executing calendar operations via tool calling. + +The agent integrates directly with the **Cal.com** API. Through the chat interface, users can query availability, create bookings, or retrieve upcoming events. The agent autonomously handles the routing and API execution required to fulfill these requests. + +### Infrastructure Deployment (Saturn Cloud) + +Deploying this architecture on [Saturn Cloud](https://saturncloud.io/) provides several environment benefits: +* **Persistent Compute:** Maintains the Streamlit server process in the background. +* **Secrets Management:** Secures API keys and environment variables via isolated `.env` configurations. +* **Environment Isolation:** Provisions dedicated compute resources for Python package execution without local dependency conflicts. + +--- + +## βœ… Prerequisites + +1. **Saturn Cloud Workspace:** Provision a CPU workspace via [Saturn Cloud](https://saturncloud.io/). +2. **Nebius API Key:** Generate an LLM API token via the [Nebius Token Factory](https://studio.nebius.ai/). +3. **Cal.com API Key:** Generate an API key from **Settings -> Developer -> API Keys** in your [Cal.com](https://cal.com/) account. + +--- + +## πŸ—οΈ Setup & Deployment + +Open a terminal in your Saturn Cloud workspace and execute the following commands. + +**1. Create Virtual Environment & Install Dependencies** +```bash +python -m venv venv +source venv/bin/activate +pip install -r requirements.txt + +``` + +**2. Configure Environment Variables** +Create your `.env` file and define your API keys. + +```bash +cp .env.example .env +nano .env +# Define NEBIUS_API_KEY and CALCOM_API_KEY. Save and exit (Ctrl+O, Enter, Ctrl+X). + +``` + +**3. Initialize the Application** +Start the Streamlit server process. + +```bash +streamlit run app.py + +``` + +Navigate to the **Local URL** provided in the terminal output (default: `http://localhost:8501`) to access the web interface. + +--- + +## πŸ’‘ Usage Guide + +1. **Configuration:** Use the left sidebar to set the agent parameters. + +* Select your target **Timezone** (IANA format) from the dropdown menu. +* Select the **Meeting Type** you wish to book (e.g., "15 Min Meeting"). The application fetches these dynamically via the Cal.com API. + +2. **Execution:** Input natural language commands in the main chat interface. + +**Example Prompts:** + +* *"Check my availability for tomorrow between 9am and 12pm."* +* *"Book a meeting with 'Saturn Test' at test@example.com for tomorrow at 10am."* +* *"What bookings do I have coming up for test@example.com?"* + +The UI will indicate a processing state while the agent interfaces with the LLM and Cal.com APIs to execute the scheduling logic. + +--- + +## πŸ“š Official Documentation & References + +For further customization, refer to the official documentation for the stack components used in this project: + +* **Deployment Platform:** [Saturn Cloud Documentation](https://saturncloud.io/docs/) +* **AI Agent Framework:** [Agno (PhiData) Documentation](https://docs.agno.com/) +* **LLM Provider:** [Nebius AI Studio Documentation](https://docs.nebius.com/studio/) +* **Scheduling Engine:** [Cal.com API Reference](https://www.google.com/search?q=https://cal.com/docs/introduction/api) +* **Web UI Framework:** [Streamlit Documentation](https://docs.streamlit.io/) + diff --git a/examples/start-agents/scheduling_agent/app.py b/examples/start-agents/scheduling_agent/app.py new file mode 100644 index 00000000..53b9598c --- /dev/null +++ b/examples/start-agents/scheduling_agent/app.py @@ -0,0 +1,150 @@ +import streamlit as st +import nest_asyncio +import os +import requests +import pytz +from datetime import datetime +from dotenv import load_dotenv + +from agno.agent import Agent +from agno.models.nebius import Nebius +from agno.tools.calcom import CalComTools + +# Apply asyncio patch for running agents in Streamlit +nest_asyncio.apply() + +# Load environment variables +load_dotenv() + +# --- PAGE CONFIGURATION --- +st.set_page_config( + page_title="AI Scheduling Assistant", + page_icon="πŸ“…", + layout="wide", + initial_sidebar_state="expanded", +) + +st.title("πŸ“… Autonomous Scheduling Assistant") +st.markdown(""" +This agent connects to your **Cal.com** account to check availability and book meetings autonomously. +Tell it what you need, and it will handle the logistics. +""") + +# --- SIDEBAR CONFIGURATION --- +st.sidebar.header("βš™οΈ Configuration") + +nebius_key = os.getenv("NEBIUS_API_KEY") +calcom_key = os.getenv("CALCOM_API_KEY") + +if not nebius_key or not calcom_key: + st.sidebar.error("⚠️ Missing API Keys in .env file!") + st.stop() + +# 1. Dynamic Timezone Selection (UPGRADED TO DROPDOWN) +all_timezones = pytz.all_timezones +# Default to America/New_York, but fallback to 0 (Africa/Abidjan) if not found +default_tz_index = all_timezones.index("America/New_York") if "America/New_York" in all_timezones else 0 + +user_timezone = st.sidebar.selectbox( + "Your Timezone", + options=all_timezones, + index=default_tz_index, + help="Select your local timezone so meetings are booked correctly." +) + +# 2. Dynamic Event Type Fetching +@st.cache_data(ttl=600) # Cache API response for 10 mins +def fetch_calcom_events(api_key): + url = "https://api.cal.com/v2/event-types" + headers = {"Authorization": f"Bearer {api_key}"} + try: + response = requests.get(url, headers=headers) + response.raise_for_status() + data = response.json() + # Create a dictionary mapping "Title" -> "ID" + return {event['title']: event['id'] for event in data['data']['eventTypeGroups'][0]['eventTypes']} + except Exception as e: + st.sidebar.error(f"Failed to fetch Cal.com events: {e}") + return {} + +event_options = fetch_calcom_events(calcom_key) + +if not event_options: + st.sidebar.warning("No Event Types found in your Cal.com account.") + st.stop() + +selected_event_name = st.sidebar.selectbox("Select Meeting Type to Book", options=list(event_options.keys())) +selected_event_id = event_options[selected_event_name] + + +# --- AGENT SETUP (Cached Resource) --- +@st.cache_resource +def get_scheduling_agent(calcom_api_key, event_id, timezone): + + # Define system instructions + instructions = f"""You are a scheduling assistant. Today is {datetime.now().strftime("%Y-%m-%d")}. +Your goal is to autonomously manage calendar bookings using the available tools. + +IMPORTANT REASONING STEPS: +1. **Check Availability First:** Always use `get_available_slots(start_date, end_date)` before attempting to book. +2. **Book the Slot:** If a slot is available, use `create_booking(start_time, name, email)`. +3. **Verify:** After booking, confirm it exists using `get_upcoming_bookings(email)`. + +When asked to book a call, you MUST follow these steps sequentially. Do not skip verification. confirm to the user only after verification succeeds. +""" + # Initialize tools with selected settings + tools = CalComTools( + user_timezone=timezone, + api_key=calcom_api_key, + event_type_id=event_id + ) + + # Initialize agent (Removed show_tool_calls for Agno v2 compatibility) + return Agent( + name="Calendar Assistant", + instructions=[instructions], + model=Nebius( + id="Qwen/Qwen3-30B-A3B-Instruct-2507", + api_key=os.getenv("NEBIUS_API_KEY") + ), + tools=[tools], + markdown=True, + ) + +# Load the agent with current sidebar settings +agent = get_scheduling_agent(calcom_key, selected_event_id, user_timezone) + + +# --- CHAT INTERFACE --- + +# Initialize chat history +if "messages" not in st.session_state: + # Add an initial greeting from the assistant + st.session_state.messages = [ + {"role": "assistant", "content": f"Hello! I'm ready to schedule **'{selected_event_name}'** meetings for you in the **{user_timezone}** timezone.\n\nWhat would you like me to do?"} + ] + +# Display chat messages from history on app rerun +for message in st.session_state.messages: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + +# Accept user input +if prompt := st.chat_input("e.g., 'Book a meeting with John Doe tomorrow at 10am'"): + # Add user message to chat history + st.session_state.messages.append({"role": "user", "content": prompt}) + # Display user message in chat message container + with st.chat_message("user"): + st.markdown(prompt) + + # Display assistant response in chat message container + with st.chat_message("assistant"): + with st.spinner("Checking calendar and reasoning..."): + try: + # Run the agent + response = agent.run(prompt) + st.markdown(response.content) + # Add assistant response to chat history + st.session_state.messages.append({"role": "assistant", "content": response.content}) + except Exception as e: + st.error(f"An error occurred: {e}") \ No newline at end of file diff --git a/examples/start-agents/scheduling_agent/requirements.txt b/examples/start-agents/scheduling_agent/requirements.txt new file mode 100644 index 00000000..f33e8ce1 --- /dev/null +++ b/examples/start-agents/scheduling_agent/requirements.txt @@ -0,0 +1,8 @@ +agno>=1.5.1 +openai>=1.78.1 +python-dotenv>=1.1.0 +requests +pytz +jupyterlab +nest-asyncio +streamlit \ No newline at end of file