From b2bf42587984900c1c33ba60253121586cc34259 Mon Sep 17 00:00:00 2001 From: ali Date: Mon, 16 Feb 2026 03:50:26 +0500 Subject: [PATCH 01/17] Add X News Feed Analysis ability --- community/x-news-feed/README.md | 330 ++++++++++++++++++++ community/x-news-feed/config.json | 15 + community/x-news-feed/main.py | 495 ++++++++++++++++++++++++++++++ 3 files changed, 840 insertions(+) create mode 100644 community/x-news-feed/README.md create mode 100644 community/x-news-feed/config.json create mode 100644 community/x-news-feed/main.py diff --git a/community/x-news-feed/README.md b/community/x-news-feed/README.md new file mode 100644 index 00000000..f829858a --- /dev/null +++ b/community/x-news-feed/README.md @@ -0,0 +1,330 @@ +# X News Feed Analysis + +A voice-powered OpenHome ability that searches and reads aloud trending topics and news from X (Twitter). + +## What It Does + +This ability lets you stay updated on what's trending on X through natural voice commands. It can: + +- **Read trending topics** - Get the top trending topics on X with tweet counts +- **Quick mode** - Top 3 trends with option to hear more +- **Full mode** - All 5 trends with interactive Q&A follow-ups +- **Topic deep-dives** - Ask for more details on any specific trending topic (by number) +- **Smart exit handling** - Multiple ways to exit naturally + +## Trigger Words + +Say any of these phrases to activate the ability: + +**For Quick Mode (Top 3):** +- "What's trending on X?" +- "Twitter trends" +- "X news" +- "Show me X trends" +- "X trends" +- "Latest from X" + +**For Full Mode (All 5 with Q&A):** +- "All trends" +- "All five trends" +- "X trending topics" +- "What is trending on X?" + +The ability automatically detects whether you want a quick update or a full interactive session based on which trigger phrase you use. + +## Setup + +### 1. Get an API Key (Optional but Recommended) + +For live X/Twitter data, you'll need an X API Bearer Token: + +**Option A: X Developer Portal (Official)** +1. Go to [X Developer Portal](https://developer.twitter.com/en/portal/dashboard) +2. Create a project and app +3. Generate Bearer Token +4. Copy your Bearer Token + +**Option B: RapidAPI Twitter154 (Easier)** +1. Go to [RapidAPI Twitter154 API](https://rapidapi.com/omarmhaimdat/api/twitter154/) +2. Sign up for a free account +3. Subscribe to the free tier +4. Copy your API key + +### 2. Configure the Ability + +Open `main.py` and add your API key: + +```python +# Replace this line: +X_API_BEARER_TOKEN = "REPLACE_WITH_YOUR_KEY" + +# With your actual Bearer Token: +X_API_BEARER_TOKEN = "your_bearer_token_here" +``` + +**Note:** The ability works without an API key using demo trending data for testing. This is perfect for development, demonstration, and your Loom video. + +### 3. Upload to OpenHome + +1. Create a new ability in your OpenHome dashboard +2. Upload the `main.py` file +3. Set trigger words (suggestions in `config.json`) +4. Test using "Start Live Test" + +## How It Works + +### Quick Mode + +When you ask a specific question like "What's trending on X?" or "Twitter trends", the ability: + +1. Speaks a filler phrase ("One sec, checking what's hot on X") +2. Fetches the top 5 trending topics +3. Reads the top 3 aloud with tweet counts +4. Asks "Want to hear more, or are you all set?" +5. If you say "more" or "continue" → reads the remaining 2 trends +6. Exits cleanly when you say "done", "bye", or similar + +**Example:** +``` +You: "What's trending on X?" +Ability: "One sec, checking what's hot on X..." +Ability: "Hey there, here are the top 3 trending topics right now:" +Ability: "Number 1: Artificial Intelligence, with 125 thousand posts." +Ability: "Number 2: Climate Summit 2026, with 98 thousand posts." +Ability: "Number 3: Mars Mission Update, with 87 thousand posts." +Ability: "Want to hear more, or are you all set?" +You: "Continue" +Ability: "Here are the remaining trends:" +Ability: "Number 4: Tech Innovation Awards, with 76 thousand posts." +Ability: "Number 5: Global Markets Rally, with 65 thousand posts." +Ability: "That's all 5. Anything else?" +You: "All good" +Ability: "Take care!" +``` + +### Full Mode + +When you ask for a briefing like "All trends" or "All five trends", the ability: + +1. Speaks a filler phrase +2. Fetches the top 5 trending topics +3. Reads all 5 aloud with tweet counts +4. Opens an interactive Q&A session +5. You can ask about specific topics by number ("Tell me about number 2") +6. You can ask to hear them again ("Read them again") +7. Exits when you say "done" or after 2 idle responses + +**Example:** +``` +You: "All trends" +Ability: "One sec, checking what's hot on X..." +Ability: "Hey there, here's your full rundown of the top 5 trending topics on X:" +Ability: "Number 1: Artificial Intelligence, with 125 thousand posts." +Ability: "Number 2: Climate Summit 2026, with 98 thousand posts." +Ability: "Number 3: Mars Mission Update, with 87 thousand posts." +Ability: "Number 4: Tech Innovation Awards, with 76 thousand posts." +Ability: "Number 5: Global Markets Rally, with 65 thousand posts." +Ability: "Want to know more about any of these? Ask away, or say done when you're finished." +You: "Tell me about number two" +Ability: "About Climate Summit 2026: [LLM-generated 2-sentence explanation of why it's trending]" +Ability: "What else would you like to know?" +You: "Goodbye" +Ability: "Stay curious!" +``` + +## Voice Design Principles + +This ability follows OpenHome's voice-first design guidelines: + +- **Short responses** - 1-2 sentences per turn, progressive disclosure +- **Filler speech** - "One sec, pulling up the latest from X" before API calls +- **Natural numbers** - "125 thousand" instead of "125,000" +- **Exit handling** - Multiple ways to exit: "done", "stop", "bye", "that's all" +- **Idle detection** - Offers to sign off after 2 silent responses +- **Confirmation-free** - Reading data doesn't need confirmation (low stakes) + +## SDK Usage + +### Core Patterns Used + +**Critical: Capturing User Input** +```python +# IMPORTANT: Wait for user input FIRST before processing +user_input = await self.capability_worker.wait_for_complete_transcription() +``` +This ensures the trigger phrase is properly captured before the ability starts processing. + +**Speaking:** +```python +await self.capability_worker.speak("Message to user") +``` + +**Listening:** +```python +user_input = await self.capability_worker.user_response() +``` + +**LLM for Classification & Analysis:** +```python +# No await! This is synchronous +response = self.capability_worker.text_to_text_response(prompt) +``` + +**API Calls with asyncio.to_thread:** +```python +import asyncio +response = await asyncio.to_thread( + requests.get, url, headers=headers, params=params, timeout=10 +) +``` + +**Patient Input Waiting:** +```python +# Custom helper that polls patiently for user input +user_input = await self.wait_for_input(max_attempts=5, wait_seconds=3.0) +``` + +**Exit:** +```python +self.capability_worker.resume_normal_flow() # Always call this when done! +``` + +### Architecture Highlights + +- **Input capture fix** - Uses `wait_for_complete_transcription()` to ensure trigger phrase is captured +- **Mode detection from trigger** - Analyzes the actual user input to determine quick vs full mode +- **Patient input polling** - Custom `wait_for_input()` helper that retries multiple times +- **File persistence** - Saves user preferences across sessions using the file storage API +- **Demo data fallback** - Works without API key for testing/demos +- **LLM-powered topic analysis** - Uses the LLM to generate explanations for trending topics +- **Contextual goodbyes** - LLM generates natural sign-off messages + +## API Information + +**Provider:** X (Twitter) Official API +**Endpoint:** `https://api.twitter.com/1.1/trends/place.json` +**Authentication:** Bearer Token +**Rate Limits:** Depends on your X API tier (Free tier: 500 requests/month) +**Required Header:** `Authorization: Bearer YOUR_TOKEN` + +### Demo Data + +The ability includes demo trending data that's used when no API key is configured: + +```python +DEMO_TRENDS = [ + {"name": "Artificial Intelligence", "tweet_count": 125000}, + {"name": "Climate Summit 2026", "tweet_count": 98000}, + {"name": "Mars Mission Update", "tweet_count": 87000}, + {"name": "Tech Innovation Awards", "tweet_count": 76000}, + {"name": "Global Markets Rally", "tweet_count": 65000} +] +``` + +This lets you: +- Test the full conversation flow without API costs +- Demonstrate the ability in videos +- Develop and iterate without rate limits +- Submit working code to GitHub + +Replace with live data when ready by adding your Bearer Token. + +## Customization Ideas + +- **Add time context** - "This morning's trending topics" vs "Tonight's buzz" +- **Filter by category** - Tech, sports, politics, entertainment +- **Save favorites** - Use file storage to remember topics user cares about +- **Reading preferences** - Let users set how many topics to read (3, 5, 10) +- **Tweet summaries** - Fetch and summarize actual tweets about trending topics +- **Personalized greetings** - Use saved user name from preferences file + +## Technical Notes + +### Critical Input Capture Fix + +This ability includes an important fix for a common OpenHome issue where abilities would miss the user's trigger phrase. The solution: + +```python +async def capture_user_input(self): + """Wait for and capture the user's input that triggered this ability.""" + user_input = await self.capability_worker.wait_for_complete_transcription() + if user_input and user_input.strip(): + self.trigger_phrase = user_input.strip().lower() +``` + +This ensures the trigger phrase is captured **before** any processing begins, allowing for accurate mode detection and context-aware responses. + +### Patient Input Polling + +The ability uses a custom `wait_for_input()` helper that patiently polls for user responses: + +```python +async def wait_for_input(self, max_attempts: int = 5, wait_seconds: float = 3.0): + """Poll for user input patiently. Returns first non-empty response.""" + for attempt in range(max_attempts): + await self.worker.session_tasks.sleep(wait_seconds) + user_input = await self.capability_worker.user_response() + if user_input and user_input.strip(): + return user_input.strip() + return "" +``` + +This handles voice transcription delays gracefully without timing out prematurely. + +## Testing Without API Key + +The ability includes mock trending data for testing: + +```python +def get_mock_trending_data(self) -> list: + return [ + {"name": "AI Safety Summit", "tweet_count": 125000}, + {"name": "Climate Action", "tweet_count": 98000}, + # ... more topics + ] +``` + +This lets you: +- Test the full conversation flow +- Demonstrate the ability in videos +- Develop without API costs + +Replace with live data when ready by adding your API key. + +## Troubleshooting + +**"I couldn't pull up the X feed"** +- Check your API key is correct in `main.py` +- Verify you have API credits remaining +- Check network connectivity in OpenHome settings + +**Ability doesn't trigger** +- Verify trigger words in dashboard match `config.json` +- Try more specific phrases: "What's trending on X" vs just "trending" +- Check ability is enabled and saved + +**Response is too long/robotic** +- Adjust `format_trending_summary()` to be more concise +- Reduce number of topics read (currently 3 for quick, 5 for full) +- Simplify number formatting in `format_number_for_speech()` + +## Contributing + +Found a bug or have an improvement? Here's how to help: + +1. Fork the OpenHome abilities repo +2. Make your changes to this ability +3. Test thoroughly using "Start Live Test" +4. Submit a PR with: + - Clear description of what changed + - Why the change improves the ability + - Test results showing it works + +## License + +Open source under the same license as the OpenHome project. + +--- + +**Built for OpenHome** - The open-source voice AI platform +**Questions?** Join the [OpenHome Discord](https://discord.gg/openhome) \ No newline at end of file diff --git a/community/x-news-feed/config.json b/community/x-news-feed/config.json new file mode 100644 index 00000000..f9987593 --- /dev/null +++ b/community/x-news-feed/config.json @@ -0,0 +1,15 @@ +{ + "unique_name": "x_news_feed_analysis", + "matching_hotwords": [ + "what's trending on x", + "twitter trends", + "x news", + "x trending topics", + "show me x trends", + "what is trending on x", + "latest from x", + "x trends", + "all trends", + "all five trends" + ] +} \ No newline at end of file diff --git a/community/x-news-feed/main.py b/community/x-news-feed/main.py new file mode 100644 index 00000000..346b45e3 --- /dev/null +++ b/community/x-news-feed/main.py @@ -0,0 +1,495 @@ +import json +import os +import asyncio +import re +import requests +from src.agent.capability import MatchingCapability +from src.main import AgentWorker +from src.agent.capability_worker import CapabilityWorker + +# ============================================================================ +# API CONFIGURATION +# ============================================================================ +X_API_BEARER_TOKEN = "REPLACE_WITH_YOUR_KEY" + +# ============================================================================ +# CONSTANTS +# ============================================================================ +EXIT_WORDS = [ + "exit", "stop", "quit", "done", "bye", "goodbye", "cancel", + "nothing else", "all good", "nope", "no thanks", "i'm good", + "that's all", "never mind", "leave", "that is all" +] + +FULL_MODE_TRIGGERS = [ + "catch me up", "all trends", "full briefing", "everything", + "run through", "brief me", "all of them", "the full list", + "full list", "all five", "read all", "read them all", + "dive in", "deep dive", "explore", "tell me everything" +] + +MORE_WORDS = [ + "more", "rest", "continue", "yes", "yeah", "sure", + "go ahead", "keep going", "read more", "next", "and" +] + +FILLER_PHRASES = [ + "One sec, checking what's hot on X.", + "Give me a moment, pulling the latest trends.", + "Standby, grabbing the top topics from X.", + "Let me see what's trending right now.", + "Hang on, fetching the latest from X." +] + +DEMO_TRENDS = [ + {"name": "Artificial Intelligence", "tweet_count": 125000}, + {"name": "Climate Summit 2026", "tweet_count": 98000}, + {"name": "Mars Mission Update", "tweet_count": 87000}, + {"name": "Tech Innovation Awards", "tweet_count": 76000}, + {"name": "Global Markets Rally", "tweet_count": 65000} +] + +PREFERENCES_FILE = "x_news_prefs.json" + + +# ============================================================================ +# MAIN ABILITY CLASS +# ============================================================================ +class XNewsFeedCapability(MatchingCapability): + """ + X News Feed Ability - fetches and reads aloud trending topics from X. + Quick Mode: top 3, offer more. + Full Mode: all 5, then interactive Q&A. + """ + + worker: AgentWorker = None + capability_worker: CapabilityWorker = None + trending_topics: list = [] + mode: str = "quick" + user_name: str = "there" + first_visit: bool = True + trigger_phrase: str = "" + + @classmethod + def register_capability(cls) -> "MatchingCapability": + with open( + os.path.join(os.path.dirname(os.path.abspath(__file__)), "config.json") + ) as file: + data = json.load(file) + return cls( + unique_name=data["unique_name"], + matching_hotwords=data["matching_hotwords"], + ) + + def call(self, worker: AgentWorker): + self.worker = worker + self.capability_worker = CapabilityWorker(self.worker) + self.worker.session_tasks.create(self.main_flow()) + + # ======================================================================== + # MAIN FLOW + # ======================================================================== + async def main_flow(self): + try: + # CRITICAL FIX: Wait for and capture the user's input FIRST + await self.capture_user_input() + + # Now load preferences and detect mode + await self.load_user_preferences() + self.mode = self.detect_mode_from_trigger() + self.worker.editor_logging_handler.info(f"Mode detected: {self.mode}") + + # Fetch trending topics + await self.fetch_trending_topics_with_filler() + + if not self.trending_topics: + await self.capability_worker.speak( + "I'm having trouble reaching X right now. Please try again in a moment." + ) + self.capability_worker.resume_normal_flow() + return + + # Personalize greeting based on first visit + if self.first_visit: + await self.capability_worker.speak( + f"Hey {self.user_name}, welcome to X News! First time here? I'll show you around.") + self.first_visit = False + await self.save_user_preferences() + + # Run appropriate mode + if self.mode == "full": + await self.full_mode() + else: + await self.quick_mode() + + except Exception as e: + self.worker.editor_logging_handler.error(f"Error in main_flow: {e}") + await self.capability_worker.speak( + "Sorry, something went wrong. Please try again." + ) + self.capability_worker.resume_normal_flow() + + # ======================================================================== + # CAPTURE USER INPUT - THE CRITICAL FIX + # ======================================================================== + async def capture_user_input(self): + """ + CRITICAL: Wait for and capture the user's input that triggered this ability. + This must run before anything else. + """ + try: + self.worker.editor_logging_handler.info("Waiting for user input...") + + # Method 1: Use wait_for_complete_transcription() to ensure we get the full utterance + # This waits until the user has completely finished speaking + user_input = await self.capability_worker.wait_for_complete_transcription() + + if user_input and user_input.strip(): + self.trigger_phrase = user_input.strip().lower() + self.worker.editor_logging_handler.info(f"Captured user input: '{self.trigger_phrase}'") + return + + # Method 2: Fallback to regular user_response if wait_for_complete_transcription fails + user_input = await self.capability_worker.user_response() + if user_input and user_input.strip(): + self.trigger_phrase = user_input.strip().lower() + self.worker.editor_logging_handler.info(f"Captured user input (fallback): '{self.trigger_phrase}'") + return + + # Method 3: Try to get from history as last resort + await self.worker.session_tasks.sleep(0.5) + history = self.worker.agent_memory.full_message_history + if history: + last_msg = history[-1] + try: + if isinstance(last_msg, dict): + if last_msg.get("role") == "user": + self.trigger_phrase = last_msg.get("content", "").lower() + else: + if hasattr(last_msg, "role") and last_msg.role == "user": + self.trigger_phrase = (last_msg.content or "").lower() + except Exception: + pass + + self.worker.editor_logging_handler.info(f"Final trigger phrase: '{self.trigger_phrase}'") + + except Exception as e: + self.worker.editor_logging_handler.error(f"Error capturing user input: {e}") + self.trigger_phrase = "" + + # ======================================================================== + # MODE DETECTION + # ======================================================================== + def detect_mode_from_trigger(self) -> str: + """Detect quick vs full mode by checking the captured trigger phrase.""" + if not self.trigger_phrase: + self.worker.editor_logging_handler.info("No trigger phrase, defaulting to quick") + return "quick" + + for phrase in FULL_MODE_TRIGGERS: + if phrase in self.trigger_phrase: + self.worker.editor_logging_handler.info(f"Full mode triggered by: '{phrase}'") + return "full" + + self.worker.editor_logging_handler.info(f"Quick mode (trigger: '{self.trigger_phrase[:50]}')") + return "quick" + + # ======================================================================== + # FILE PERSISTENCE + # ======================================================================== + async def load_user_preferences(self): + """Load user preferences from persistent storage.""" + try: + if await self.capability_worker.check_if_file_exists(PREFERENCES_FILE, False): + raw = await self.capability_worker.read_file(PREFERENCES_FILE, False) + prefs = json.loads(raw) + self.user_name = prefs.get("name", "there") + self.first_visit = prefs.get("first_visit", False) + self.worker.editor_logging_handler.info(f"Loaded preferences for {self.user_name}") + else: + self.first_visit = True + self.user_name = "there" + await self.save_user_preferences() + except Exception as e: + self.worker.editor_logging_handler.warning(f"Couldn't load preferences: {e}") + self.first_visit = True + self.user_name = "there" + + async def save_user_preferences(self): + """Save user preferences to persistent storage.""" + try: + prefs = { + "name": self.user_name, + "first_visit": self.first_visit, + "last_used": "x_news_feed" + } + await self.capability_worker.delete_file(PREFERENCES_FILE, False) + await self.capability_worker.write_file(PREFERENCES_FILE, json.dumps(prefs), False) + self.worker.editor_logging_handler.info("Saved preferences") + except Exception as e: + self.worker.editor_logging_handler.warning(f"Couldn't save preferences: {e}") + + # ======================================================================== + # PATIENT INPUT HELPER + # ======================================================================== + async def wait_for_input(self, max_attempts: int = 5, wait_seconds: float = 3.0, context: str = "") -> str: + """Poll for user input patiently. Returns first non-empty response or empty string.""" + for attempt in range(max_attempts): + await self.worker.session_tasks.sleep(wait_seconds) + user_input = await self.capability_worker.user_response() + if user_input and user_input.strip(): + self.worker.editor_logging_handler.info( + f"Got input on attempt {attempt + 1}: {user_input[:60]}" + ) + return user_input.strip() + self.worker.editor_logging_handler.info( + f"Empty on attempt {attempt + 1}/{max_attempts}, retrying..." + ) + + if context == "initial": + await self.capability_worker.speak( + "I didn't catch that. Just say 'more' to hear the rest, or I'll sign off." + ) + await self.worker.session_tasks.sleep(2) + user_input = await self.capability_worker.user_response() + if user_input and user_input.strip(): + return user_input.strip() + + return "" + + # ======================================================================== + # DATA FETCHING + # ======================================================================== + async def fetch_trending_topics_with_filler(self): + import random + filler = random.choice(FILLER_PHRASES) + await self.capability_worker.speak(filler) + await self.fetch_trending_topics() + + async def fetch_trending_topics(self): + try: + self.worker.editor_logging_handler.info("Fetching trending topics from X...") + + if X_API_BEARER_TOKEN in ("REPLACE_WITH_YOUR_KEY", "", None): + self.worker.editor_logging_handler.info("Demo mode - API key not configured.") + self.trending_topics = DEMO_TRENDS.copy() + return + + headers = {"Authorization": f"Bearer {X_API_BEARER_TOKEN}"} + url = "https://api.twitter.com/1.1/trends/place.json" + params = {"id": 1} + + resp = await asyncio.to_thread( + requests.get, url, headers=headers, params=params, timeout=10 + ) + + if resp.status_code == 200: + data = resp.json() + if data and "trends" in data[0]: + self.trending_topics = [ + { + "name": t.get("name", "Unknown"), + "tweet_count": t.get("tweet_volume") or 0 + } + for t in data[0]["trends"][:5] + ] + self.worker.editor_logging_handler.info( + f"Fetched {len(self.trending_topics)} live trends." + ) + return + + self.worker.editor_logging_handler.warning(f"API {resp.status_code} - using demo data.") + self.trending_topics = DEMO_TRENDS.copy() + + except Exception as e: + self.worker.editor_logging_handler.error(f"Fetch error: {e} - using demo data.") + self.trending_topics = DEMO_TRENDS.copy() + + # ======================================================================== + # QUICK MODE + # ======================================================================== + async def quick_mode(self): + """Top 3, offer more, patient wait for response.""" + await self.capability_worker.speak(f"Hey {self.user_name}, here are the top 3 trending topics right now:") + await self.worker.session_tasks.sleep(0.4) + + for i, topic in enumerate(self.trending_topics[:3], 1): + await self.speak_single_trend(i, topic) + await self.worker.session_tasks.sleep(0.3) + + await self.capability_worker.speak("Want to hear more, or are you all set?") + + user_input = await self.wait_for_input(max_attempts=5, wait_seconds=3.0, context="initial") + + if not user_input: + await self.capability_worker.speak("Catch you later!") + self.capability_worker.resume_normal_flow() + return + + user_input_lower = user_input.lower() + + if self.is_exit_command(user_input_lower): + await self.generate_contextual_goodbye() + self.capability_worker.resume_normal_flow() + return + + if self.is_more_request(user_input_lower): + await self.capability_worker.speak("Here are the remaining trends:") + await self.worker.session_tasks.sleep(0.3) + for i, topic in enumerate(self.trending_topics[3:], 4): + await self.speak_single_trend(i, topic) + await self.worker.session_tasks.sleep(0.3) + await self.capability_worker.speak("That's all 5. Anything else?") + + final = await self.wait_for_input(max_attempts=3, wait_seconds=2.0) + if not final or self.is_exit_command(final.lower()): + await self.capability_worker.speak("Take care!") + else: + await self.capability_worker.speak("That's what's hot on X. Anything else?") + final = await self.wait_for_input(max_attempts=3, wait_seconds=2.0) + if not final or self.is_exit_command(final.lower()): + await self.capability_worker.speak("Alright, catch you later!") + + self.capability_worker.resume_normal_flow() + + # ======================================================================== + # FULL MODE + # ======================================================================== + async def full_mode(self): + """Read all 5, then open interactive Q&A loop.""" + await self.capability_worker.speak( + f"Hey {self.user_name}, here's your full rundown of the top 5 trending topics on X:" + ) + await self.worker.session_tasks.sleep(0.5) + + for i, topic in enumerate(self.trending_topics, 1): + await self.speak_single_trend(i, topic) + await self.worker.session_tasks.sleep(0.4) + + await self.capability_worker.speak( + "Want to know more about any of these? Ask away, or say done when you're finished." + ) + + await self.interactive_loop() + + async def interactive_loop(self): + """Q&A loop with idle detection.""" + idle_count = 0 + + while True: + user_input = await self.wait_for_input(max_attempts=4, wait_seconds=3.0) + + if not user_input: + idle_count += 1 + if idle_count >= 2: + await self.capability_worker.speak( + "I'm still here if you need anything. Otherwise I'll sign off." + ) + await self.worker.session_tasks.sleep(3) + break + continue + + idle_count = 0 + user_input_lower = user_input.lower() + + if self.is_exit_command(user_input_lower): + await self.generate_contextual_goodbye() + break + + if any(p in user_input_lower for p in ["again", "repeat", "read again"]): + await self.capability_worker.speak("Sure, here they are again:") + await self.worker.session_tasks.sleep(0.3) + for i, topic in enumerate(self.trending_topics, 1): + await self.speak_single_trend(i, topic) + await self.worker.session_tasks.sleep(0.3) + await self.capability_worker.speak("Anything else?") + continue + + if any(w in user_input_lower for w in ["number", "topic", "tell me about", "more about"]): + await self.handle_topic_question(user_input_lower) + continue + + await self.handle_general_question(user_input) + + self.capability_worker.resume_normal_flow() + + # ======================================================================== + # HELPERS + # ======================================================================== + def is_exit_command(self, text: str) -> bool: + for word in EXIT_WORDS: + pattern = r'\b' + re.escape(word) + r'\b' + if re.search(pattern, text): + return True + return False + + def is_more_request(self, text: str) -> bool: + return any(word in text for word in MORE_WORDS) + + async def speak_single_trend(self, number: int, topic: dict): + name = topic["name"] + count = topic.get("tweet_count", 0) + + clean_name = re.sub(r'#', 'hashtag ', name) + + if count >= 1_000_000: + count_text = f"{count / 1_000_000:.1f} million posts" + elif count >= 1_000: + count_text = f"{int(count / 1_000)} thousand posts" + elif count > 0: + count_text = f"{count} posts" + else: + count_text = None + + if count_text: + msg = f"Number {number}: {clean_name}, with {count_text}." + else: + msg = f"Number {number}: {clean_name}." + + await self.capability_worker.speak(msg) + + async def handle_topic_question(self, user_input: str): + topic_number = None + for i in range(1, 6): + if str(i) in user_input or self.number_to_word(i) in user_input: + topic_number = i + break + + if topic_number and topic_number <= len(self.trending_topics): + name = self.trending_topics[topic_number - 1]["name"] + prompt = ( + f"The topic '{name}' is trending on X. " + f"Give a 2-sentence conversational explanation of why. " + f"Be concise. Under 30 words. No markdown." + ) + analysis = self.capability_worker.text_to_text_response(prompt) + await self.capability_worker.speak(f"About {name}: {analysis}") + await self.worker.session_tasks.sleep(0.3) + await self.capability_worker.speak("What else would you like to know?") + else: + await self.capability_worker.speak( + "I didn't catch that number. Try saying a number between 1 and 5." + ) + + async def handle_general_question(self, user_input: str): + topics_context = ", ".join([t["name"] for t in self.trending_topics]) + prompt = ( + f"You are a helpful X news assistant. Current trending topics: {topics_context}.\n" + f"User: {user_input}\n" + f"Reply in 2 sentences max. Conversational. No markdown." + ) + response = self.capability_worker.text_to_text_response(prompt) + await self.capability_worker.speak(response) + await self.worker.session_tasks.sleep(0.3) + await self.capability_worker.speak("Anything else?") + + async def generate_contextual_goodbye(self): + prompt = ( + "Generate a brief friendly goodbye under 10 words for a news briefing. " + "Casual. Examples: 'Catch you later!', 'Stay informed!', 'Take care!'\nOne only:" + ) + goodbye = self.capability_worker.text_to_text_response(prompt).strip() + await self.capability_worker.speak(goodbye) + + def number_to_word(self, num: int) -> str: + return {1: "one", 2: "two", 3: "three", 4: "four", 5: "five"}.get(num, "") \ No newline at end of file From 548843a9e528f809fcfbae2ea498171caa9a005b Mon Sep 17 00:00:00 2001 From: ali Date: Mon, 16 Feb 2026 10:47:07 +0500 Subject: [PATCH 02/17] Add X News Feed ability with lint fixes and __init__.py --- community/x-news-feed/__init__.py | 1 + community/x-news-feed/main.py | 39 ++++++++++++++++++++++++------- 2 files changed, 31 insertions(+), 9 deletions(-) create mode 100644 community/x-news-feed/__init__.py diff --git a/community/x-news-feed/__init__.py b/community/x-news-feed/__init__.py new file mode 100644 index 00000000..277cdbe0 --- /dev/null +++ b/community/x-news-feed/__init__.py @@ -0,0 +1 @@ +# OpenHome X News Feed Analysis Ability \ No newline at end of file diff --git a/community/x-news-feed/main.py b/community/x-news-feed/main.py index 346b45e3..c4153c3e 100644 --- a/community/x-news-feed/main.py +++ b/community/x-news-feed/main.py @@ -2,7 +2,9 @@ import os import asyncio import re + import requests + from src.agent.capability import MatchingCapability from src.main import AgentWorker from src.agent.capability_worker import CapabilityWorker @@ -112,7 +114,9 @@ async def main_flow(self): # Personalize greeting based on first visit if self.first_visit: await self.capability_worker.speak( - f"Hey {self.user_name}, welcome to X News! First time here? I'll show you around.") + f"Hey {self.user_name}, welcome to X News! " + "First time here? I'll show you around." + ) self.first_visit = False await self.save_user_preferences() @@ -146,14 +150,18 @@ async def capture_user_input(self): if user_input and user_input.strip(): self.trigger_phrase = user_input.strip().lower() - self.worker.editor_logging_handler.info(f"Captured user input: '{self.trigger_phrase}'") + self.worker.editor_logging_handler.info( + f"Captured user input: '{self.trigger_phrase}'" + ) return # Method 2: Fallback to regular user_response if wait_for_complete_transcription fails user_input = await self.capability_worker.user_response() if user_input and user_input.strip(): self.trigger_phrase = user_input.strip().lower() - self.worker.editor_logging_handler.info(f"Captured user input (fallback): '{self.trigger_phrase}'") + self.worker.editor_logging_handler.info( + f"Captured user input (fallback): '{self.trigger_phrase}'" + ) return # Method 3: Try to get from history as last resort @@ -171,7 +179,9 @@ async def capture_user_input(self): except Exception: pass - self.worker.editor_logging_handler.info(f"Final trigger phrase: '{self.trigger_phrase}'") + self.worker.editor_logging_handler.info( + f"Final trigger phrase: '{self.trigger_phrase}'" + ) except Exception as e: self.worker.editor_logging_handler.error(f"Error capturing user input: {e}") @@ -191,7 +201,9 @@ def detect_mode_from_trigger(self) -> str: self.worker.editor_logging_handler.info(f"Full mode triggered by: '{phrase}'") return "full" - self.worker.editor_logging_handler.info(f"Quick mode (trigger: '{self.trigger_phrase[:50]}')") + self.worker.editor_logging_handler.info( + f"Quick mode (trigger: '{self.trigger_phrase[:50]}')" + ) return "quick" # ======================================================================== @@ -232,7 +244,12 @@ async def save_user_preferences(self): # ======================================================================== # PATIENT INPUT HELPER # ======================================================================== - async def wait_for_input(self, max_attempts: int = 5, wait_seconds: float = 3.0, context: str = "") -> str: + async def wait_for_input( + self, + max_attempts: int = 5, + wait_seconds: float = 3.0, + context: str = "" + ) -> str: """Poll for user input patiently. Returns first non-empty response or empty string.""" for attempt in range(max_attempts): await self.worker.session_tasks.sleep(wait_seconds) @@ -310,7 +327,9 @@ async def fetch_trending_topics(self): # ======================================================================== async def quick_mode(self): """Top 3, offer more, patient wait for response.""" - await self.capability_worker.speak(f"Hey {self.user_name}, here are the top 3 trending topics right now:") + await self.capability_worker.speak( + f"Hey {self.user_name}, here are the top 3 trending topics right now:" + ) await self.worker.session_tasks.sleep(0.4) for i, topic in enumerate(self.trending_topics[:3], 1): @@ -405,7 +424,9 @@ async def interactive_loop(self): await self.capability_worker.speak("Anything else?") continue - if any(w in user_input_lower for w in ["number", "topic", "tell me about", "more about"]): + if any(w in user_input_lower for w in [ + "number", "topic", "tell me about", "more about" + ]): await self.handle_topic_question(user_input_lower) continue @@ -492,4 +513,4 @@ async def generate_contextual_goodbye(self): await self.capability_worker.speak(goodbye) def number_to_word(self, num: int) -> str: - return {1: "one", 2: "two", 3: "three", 4: "four", 5: "five"}.get(num, "") \ No newline at end of file + return {1: "one", 2: "two", 3: "three", 4: "four", 5: "five"}.get(num, "") From c21988d07940c0faaabb6056511de10e95645bfd Mon Sep 17 00:00:00 2001 From: ali Date: Mon, 16 Feb 2026 10:56:06 +0500 Subject: [PATCH 03/17] make the __init__.py empty --- community/x-news-feed/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/community/x-news-feed/__init__.py b/community/x-news-feed/__init__.py index 277cdbe0..e69de29b 100644 --- a/community/x-news-feed/__init__.py +++ b/community/x-news-feed/__init__.py @@ -1 +0,0 @@ -# OpenHome X News Feed Analysis Ability \ No newline at end of file From 3be9fc1ba4dae8dc47d2cf51272ebc5f1be450e6 Mon Sep 17 00:00:00 2001 From: ali Date: Mon, 16 Feb 2026 10:58:48 +0500 Subject: [PATCH 04/17] Fix import sorting --- community/x-news-feed/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/community/x-news-feed/main.py b/community/x-news-feed/main.py index c4153c3e..be25d548 100644 --- a/community/x-news-feed/main.py +++ b/community/x-news-feed/main.py @@ -6,8 +6,8 @@ import requests from src.agent.capability import MatchingCapability -from src.main import AgentWorker from src.agent.capability_worker import CapabilityWorker +from src.main import AgentWorker # ============================================================================ # API CONFIGURATION From 3fdc5725aa572ff5764a6b689e18efeff7216a73 Mon Sep 17 00:00:00 2001 From: ali Date: Mon, 16 Feb 2026 11:01:45 +0500 Subject: [PATCH 05/17] Fix import sorting --- community/x-news-feed/main.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/community/x-news-feed/main.py b/community/x-news-feed/main.py index be25d548..88a1a506 100644 --- a/community/x-news-feed/main.py +++ b/community/x-news-feed/main.py @@ -1,10 +1,9 @@ +import asyncio import json import os -import asyncio import re import requests - from src.agent.capability import MatchingCapability from src.agent.capability_worker import CapabilityWorker from src.main import AgentWorker From 491ebe16bf80de3afc320d4a5e232e5bff571063 Mon Sep 17 00:00:00 2001 From: Muhammad Rizwan Date: Mon, 16 Feb 2026 12:20:07 +0500 Subject: [PATCH 06/17] Delete community/x-news-feed/config.json Signed-off-by: Muhammad Rizwan --- community/x-news-feed/config.json | 15 --------------- 1 file changed, 15 deletions(-) delete mode 100644 community/x-news-feed/config.json diff --git a/community/x-news-feed/config.json b/community/x-news-feed/config.json deleted file mode 100644 index f9987593..00000000 --- a/community/x-news-feed/config.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "unique_name": "x_news_feed_analysis", - "matching_hotwords": [ - "what's trending on x", - "twitter trends", - "x news", - "x trending topics", - "show me x trends", - "what is trending on x", - "latest from x", - "x trends", - "all trends", - "all five trends" - ] -} \ No newline at end of file From 1fcd3de259840fddd20bfa612d7ed99592274fb6 Mon Sep 17 00:00:00 2001 From: ali Date: Thu, 5 Mar 2026 01:05:11 +0500 Subject: [PATCH 07/17] change top trends to top tweets from different topic --- community/x-news-feed/config.json | 3 +- community/x-news-feed/main.py | 391 ++++++++++++++++++++---------- 2 files changed, 269 insertions(+), 125 deletions(-) diff --git a/community/x-news-feed/config.json b/community/x-news-feed/config.json index f9987593..243d934c 100644 --- a/community/x-news-feed/config.json +++ b/community/x-news-feed/config.json @@ -10,6 +10,7 @@ "latest from x", "x trends", "all trends", - "all five trends" + "all five trends", + "top tweets" ] } \ No newline at end of file diff --git a/community/x-news-feed/main.py b/community/x-news-feed/main.py index 88a1a506..405ac069 100644 --- a/community/x-news-feed/main.py +++ b/community/x-news-feed/main.py @@ -11,7 +11,18 @@ # ============================================================================ # API CONFIGURATION # ============================================================================ -X_API_BEARER_TOKEN = "REPLACE_WITH_YOUR_KEY" +X_API_BEARER_TOKEN = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + +# ============================================================================ +# TOPIC SEEDS — one API call per topic, best tweet selected per topic +# ============================================================================ +TOPIC_SEEDS = [ + "Artificial Intelligence", + "Crypto", + "Climate", + "Tech Innovation", + "Global Markets", +] # ============================================================================ # CONSTANTS @@ -26,7 +37,7 @@ "catch me up", "all trends", "full briefing", "everything", "run through", "brief me", "all of them", "the full list", "full list", "all five", "read all", "read them all", - "dive in", "deep dive", "explore", "tell me everything" + "dive in", "deep dive", "explore", "tell me everything",'all tweets' ] MORE_WORDS = [ @@ -36,29 +47,93 @@ FILLER_PHRASES = [ "One sec, checking what's hot on X.", - "Give me a moment, pulling the latest trends.", + "Give me a moment, pulling the latest tweets.", "Standby, grabbing the top topics from X.", "Let me see what's trending right now.", "Hang on, fetching the latest from X." ] +FILLER_INTRO_TEMPLATES = [ + "Let me fetch the top tweets on {topics} — just a moment.", + "Pulling the most popular tweets on {topics} right now.", + "Give me a second, grabbing the top tweets on {topics}.", + "One moment — fetching top tweets on {topics}.", + "Looking up the best tweets on {topics} for you.", +] + +# Demo data — one entry per TOPIC_SEED, mirrors live structure {name, top_tweet, score, summary} DEMO_TRENDS = [ - {"name": "Artificial Intelligence", "tweet_count": 125000}, - {"name": "Climate Summit 2026", "tweet_count": 98000}, - {"name": "Mars Mission Update", "tweet_count": 87000}, - {"name": "Tech Innovation Awards", "tweet_count": 76000}, - {"name": "Global Markets Rally", "tweet_count": 65000} + { + "name": "Artificial Intelligence", + "top_tweet": "2026 is the year of AI. But we use it differently at junior, mid, senior levels. Build foundations, collab with agents, orchestrate teams.", + "score": 42, + "summary": "Developers are debating how AI changes workflows across every seniority level, from building basics to orchestrating full agent teams." + }, + { + "name": "Crypto", + "top_tweet": "I'm Sergey Polonsky, the developer behind Moscow City. My new legacy is a global network of 12 luxury eco-hubs combined with the $OAZIS token.", + "score": 12, + "summary": "Real-world asset tokenisation is gaining momentum, with developers blending physical infrastructure and digital tokens into new hybrid ecosystems." + }, + { + "name": "Climate", + "top_tweet": "The Climate Summit 2026 concluded with 47 nations signing binding emissions targets, the most ambitious global agreement since Paris.", + "score": 98, + "summary": "Climate Summit 2026 has produced a landmark multi-nation commitment on emissions, reigniting optimism about coordinated global climate action." + }, + { + "name": "Tech Innovation", + "top_tweet": "Ready to put your GPU to work? YOM Official is bridging the gap between high-end rendering and everyday devices for developers and gamers alike.", + "score": 35, + "summary": "Distributed GPU rendering is turning heads, with new platforms promising to make high-end graphics accessible on everyday consumer hardware." + }, + { + "name": "Global Markets", + "top_tweet": "Global markets rallied sharply today as inflation data came in below forecast, boosting investor confidence across equities and crypto alike.", + "score": 65, + "summary": "Better-than-expected inflation figures have sparked a broad market rally, lifting both traditional equities and digital assets simultaneously." + }, ] PREFERENCES_FILE = "x_news_prefs.json" +# Recent Search API — fetches 10 tweets per query +RECENT_SEARCH_URL = ( + "https://api.twitter.com/2/tweets/search/recent" + "?query={query} -is:retweet -is:reply lang:en" + "&tweet.fields=text,public_metrics" + "&max_results=10" +) + + +# ============================================================================ +# SCORING HELPER +# ============================================================================ +def score_tweet(public_metrics: dict) -> int: + """ + Compute a weighted engagement score from public_metrics. + Weights: likes x3 | retweets x2 | quotes x2 | replies x1 | bookmarks x1 + Impression count excluded — it reflects reach, not engagement quality. + """ + return ( + public_metrics.get("like_count", 0) * 3 + + public_metrics.get("retweet_count", 0) * 2 + + public_metrics.get("quote_count", 0) * 2 + + public_metrics.get("reply_count", 0) * 1 + + public_metrics.get("bookmark_count", 0) * 1 + ) + # ============================================================================ # MAIN ABILITY CLASS # ============================================================================ class XNewsFeedCapability(MatchingCapability): """ - X News Feed Ability - fetches and reads aloud trending topics from X. + X News Feed Ability — for each topic in TOPIC_SEEDS: + 1. Fetch 10 recent tweets via Recent Search API + 2. Score each tweet with weighted public_metrics engagement + 3. Keep the highest-scoring tweet as the topic representative + 4. Send all 5 top tweets to the LLM for trend-style summaries Quick Mode: top 3, offer more. Full Mode: all 5, then interactive Q&A. """ @@ -92,15 +167,12 @@ def call(self, worker: AgentWorker): # ======================================================================== async def main_flow(self): try: - # CRITICAL FIX: Wait for and capture the user's input FIRST await self.capture_user_input() - # Now load preferences and detect mode await self.load_user_preferences() self.mode = self.detect_mode_from_trigger() self.worker.editor_logging_handler.info(f"Mode detected: {self.mode}") - # Fetch trending topics await self.fetch_trending_topics_with_filler() if not self.trending_topics: @@ -110,7 +182,6 @@ async def main_flow(self): self.capability_worker.resume_normal_flow() return - # Personalize greeting based on first visit if self.first_visit: await self.capability_worker.speak( f"Hey {self.user_name}, welcome to X News! " @@ -119,7 +190,6 @@ async def main_flow(self): self.first_visit = False await self.save_user_preferences() - # Run appropriate mode if self.mode == "full": await self.full_mode() else: @@ -133,37 +203,22 @@ async def main_flow(self): self.capability_worker.resume_normal_flow() # ======================================================================== - # CAPTURE USER INPUT - THE CRITICAL FIX + # CAPTURE USER INPUT # ======================================================================== async def capture_user_input(self): - """ - CRITICAL: Wait for and capture the user's input that triggered this ability. - This must run before anything else. - """ try: self.worker.editor_logging_handler.info("Waiting for user input...") - # Method 1: Use wait_for_complete_transcription() to ensure we get the full utterance - # This waits until the user has completely finished speaking user_input = await self.capability_worker.wait_for_complete_transcription() - if user_input and user_input.strip(): self.trigger_phrase = user_input.strip().lower() - self.worker.editor_logging_handler.info( - f"Captured user input: '{self.trigger_phrase}'" - ) return - # Method 2: Fallback to regular user_response if wait_for_complete_transcription fails user_input = await self.capability_worker.user_response() if user_input and user_input.strip(): self.trigger_phrase = user_input.strip().lower() - self.worker.editor_logging_handler.info( - f"Captured user input (fallback): '{self.trigger_phrase}'" - ) return - # Method 3: Try to get from history as last resort await self.worker.session_tasks.sleep(0.5) history = self.worker.agent_memory.full_message_history if history: @@ -178,10 +233,6 @@ async def capture_user_input(self): except Exception: pass - self.worker.editor_logging_handler.info( - f"Final trigger phrase: '{self.trigger_phrase}'" - ) - except Exception as e: self.worker.editor_logging_handler.error(f"Error capturing user input: {e}") self.trigger_phrase = "" @@ -190,33 +241,24 @@ async def capture_user_input(self): # MODE DETECTION # ======================================================================== def detect_mode_from_trigger(self) -> str: - """Detect quick vs full mode by checking the captured trigger phrase.""" if not self.trigger_phrase: - self.worker.editor_logging_handler.info("No trigger phrase, defaulting to quick") return "quick" - for phrase in FULL_MODE_TRIGGERS: if phrase in self.trigger_phrase: self.worker.editor_logging_handler.info(f"Full mode triggered by: '{phrase}'") return "full" - - self.worker.editor_logging_handler.info( - f"Quick mode (trigger: '{self.trigger_phrase[:50]}')" - ) return "quick" # ======================================================================== # FILE PERSISTENCE # ======================================================================== async def load_user_preferences(self): - """Load user preferences from persistent storage.""" try: if await self.capability_worker.check_if_file_exists(PREFERENCES_FILE, False): raw = await self.capability_worker.read_file(PREFERENCES_FILE, False) prefs = json.loads(raw) self.user_name = prefs.get("name", "there") self.first_visit = prefs.get("first_visit", False) - self.worker.editor_logging_handler.info(f"Loaded preferences for {self.user_name}") else: self.first_visit = True self.user_name = "there" @@ -227,36 +269,21 @@ async def load_user_preferences(self): self.user_name = "there" async def save_user_preferences(self): - """Save user preferences to persistent storage.""" try: - prefs = { - "name": self.user_name, - "first_visit": self.first_visit, - "last_used": "x_news_feed" - } + prefs = {"name": self.user_name, "first_visit": self.first_visit, "last_used": "x_news_feed"} await self.capability_worker.delete_file(PREFERENCES_FILE, False) await self.capability_worker.write_file(PREFERENCES_FILE, json.dumps(prefs), False) - self.worker.editor_logging_handler.info("Saved preferences") except Exception as e: self.worker.editor_logging_handler.warning(f"Couldn't save preferences: {e}") # ======================================================================== # PATIENT INPUT HELPER # ======================================================================== - async def wait_for_input( - self, - max_attempts: int = 5, - wait_seconds: float = 3.0, - context: str = "" - ) -> str: - """Poll for user input patiently. Returns first non-empty response or empty string.""" + async def wait_for_input(self, max_attempts: int = 5, wait_seconds: float = 3.0, context: str = "") -> str: for attempt in range(max_attempts): await self.worker.session_tasks.sleep(wait_seconds) user_input = await self.capability_worker.user_response() if user_input and user_input.strip(): - self.worker.editor_logging_handler.info( - f"Got input on attempt {attempt + 1}: {user_input[:60]}" - ) return user_input.strip() self.worker.editor_logging_handler.info( f"Empty on attempt {attempt + 1}/{max_attempts}, retrying..." @@ -274,58 +301,183 @@ async def wait_for_input( return "" # ======================================================================== - # DATA FETCHING + # DATA FETCHING — per-topic, scored, top-tweet selection # ======================================================================== async def fetch_trending_topics_with_filler(self): import random - filler = random.choice(FILLER_PHRASES) - await self.capability_worker.speak(filler) + + # Build a natural-language list of the topic seeds + # e.g. "Artificial Intelligence, Crypto, Climate, Tech Innovation, and Global Markets" + if len(TOPIC_SEEDS) > 1: + topics_spoken = ", ".join(TOPIC_SEEDS[:-1]) + ", and " + TOPIC_SEEDS[-1] + else: + topics_spoken = TOPIC_SEEDS[0] + + template = random.choice(FILLER_INTRO_TEMPLATES) + filler_message = template.format(topics=topics_spoken) + + await self.capability_worker.speak(filler_message) await self.fetch_trending_topics() async def fetch_trending_topics(self): - try: - self.worker.editor_logging_handler.info("Fetching trending topics from X...") + """ + For each topic in TOPIC_SEEDS: + 1. Fetch up to 10 recent tweets (no retweets, no replies, English only) + 2. Score every tweet using weighted public_metrics + 3. Select the highest-scoring tweet as the topic representative + Then pass all 5 top tweets to the LLM for trend-style summaries. + Falls back to DEMO_TRENDS if the API key is missing or all topic calls fail. + """ + if X_API_BEARER_TOKEN in ("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", "REPLACE_WITH_YOUR_KEY", "", None): + self.worker.editor_logging_handler.info("Demo mode — API key not configured.") + self.trending_topics = DEMO_TRENDS.copy() + return - if X_API_BEARER_TOKEN in ("REPLACE_WITH_YOUR_KEY", "", None): - self.worker.editor_logging_handler.info("Demo mode - API key not configured.") - self.trending_topics = DEMO_TRENDS.copy() - return + # Fetch best tweet per topic concurrently + tasks = [self._fetch_top_tweet_for_topic(topic) for topic in TOPIC_SEEDS] + results = await asyncio.gather(*tasks, return_exceptions=True) + + top_tweets = [] # [{name, top_tweet, score}, ...] + for topic, result in zip(TOPIC_SEEDS, results): + if isinstance(result, Exception) or result is None: + self.worker.editor_logging_handler.warning( + f"No result for topic '{topic}', skipping." + ) + continue + top_tweets.append(result) + + if not top_tweets: + self.worker.editor_logging_handler.warning("All topic fetches failed — using demo data.") + self.trending_topics = DEMO_TRENDS.copy() + return + + self.worker.editor_logging_handler.info( + f"Collected top tweets for {len(top_tweets)} / {len(TOPIC_SEEDS)} topics. " + "Sending to LLM for summarisation." + ) + self.trending_topics = await self._summarise_top_tweets_with_llm(top_tweets) + + async def _fetch_top_tweet_for_topic(self, topic: str) -> dict | None: + """ + Fetch 10 recent tweets for `topic`, score each one with public_metrics, + and return the best as {name, top_tweet, score}. Returns None on failure. + Scoring formula (see score_tweet): + likes x3 | retweets x2 | quotes x2 | replies x1 | bookmarks x1 + """ + try: + url = RECENT_SEARCH_URL.format(query=requests.utils.quote(topic)) headers = {"Authorization": f"Bearer {X_API_BEARER_TOKEN}"} - url = "https://api.twitter.com/1.1/trends/place.json" - params = {"id": 1} resp = await asyncio.to_thread( - requests.get, url, headers=headers, params=params, timeout=10 + requests.get, url, headers=headers, timeout=10 ) - if resp.status_code == 200: - data = resp.json() - if data and "trends" in data[0]: - self.trending_topics = [ - { - "name": t.get("name", "Unknown"), - "tweet_count": t.get("tweet_volume") or 0 - } - for t in data[0]["trends"][:5] - ] - self.worker.editor_logging_handler.info( - f"Fetched {len(self.trending_topics)} live trends." - ) - return + if resp.status_code != 200: + self.worker.editor_logging_handler.warning( + f"[{topic}] API returned {resp.status_code}." + ) + return None + self.worker.editor_logging_handler.warning( + f"[{topic}] API returned {resp.json()}." + ) - self.worker.editor_logging_handler.warning(f"API {resp.status_code} - using demo data.") - self.trending_topics = DEMO_TRENDS.copy() + tweets = resp.json().get("data", []) + if not tweets: + self.worker.editor_logging_handler.warning( + f"[{topic}] No tweets in response." + ) + return None + + # Log all scores for debugging + for t in tweets: + s = score_tweet(t.get("public_metrics", {})) + self.worker.editor_logging_handler.info( + f" [{topic}] score={s:>4} {t.get('text', '')[:60]}" + ) + + # Pick the winner + best_tweet = max( + tweets, + key=lambda t: score_tweet(t.get("public_metrics", {})) + ) + best_score = score_tweet(best_tweet.get("public_metrics", {})) + + self.worker.editor_logging_handler.info( + f"[{topic}] WINNER score={best_score} | {best_tweet.get('text', '')[:80]}" + ) + + return { + "name": topic, + "top_tweet": best_tweet.get("text", "").strip(), + "score": best_score, + } except Exception as e: - self.worker.editor_logging_handler.error(f"Fetch error: {e} - using demo data.") - self.trending_topics = DEMO_TRENDS.copy() + self.worker.editor_logging_handler.error(f"[{topic}] Fetch error: {e}") + return None + + async def _summarise_top_tweets_with_llm(self, top_tweets: list) -> list: + """ + Send the best tweet per topic to the LLM and ask for trend-style summaries. + Returns a list of {name, top_tweet, score, summary} dicts. + Falls back to DEMO_TRENDS on any parsing error. + """ + try: + tweet_block = "\n".join( + f"{i + 1}. Topic: {item['name']}\n Top Tweet: {item['top_tweet']}" + for i, item in enumerate(top_tweets) + ) + + prompt = ( + "You are a news analyst. Below are the highest-engagement tweets for each topic.\n" + "For each topic write a short, conversational 1-2 sentence summary that captures " + "the key theme or sentiment from that tweet.\n" + "Return ONLY a valid JSON array — no markdown, no explanation — in this exact format:\n" + '[{"name": "", "summary": ""}, ...]\n\n' + f"Topics and their top tweets:\n{tweet_block}" + ) + + raw_response = self.capability_worker.text_to_text_response(prompt) + + # Strip accidental markdown fences + clean = raw_response.strip() + if clean.startswith("```"): + clean = re.sub(r"```[a-z]*\n?", "", clean).strip("` \n") + + parsed = json.loads(clean) + if not isinstance(parsed, list) or not parsed: + raise ValueError("LLM returned unexpected structure.") + + # Index summaries by topic name for easy lookup + summaries_by_name = {item["name"]: item.get("summary", "") for item in parsed} + + # Merge LLM summaries back with original top-tweet data + enriched = [] + for item in top_tweets: + enriched.append({ + "name": item["name"], + "top_tweet": item["top_tweet"], + "score": item["score"], + "summary": summaries_by_name.get(item["name"], ""), + }) + + self.worker.editor_logging_handler.info( + f"LLM produced summaries for {len(enriched)} topics." + ) + return enriched + + except Exception as e: + self.worker.editor_logging_handler.error( + f"LLM summarisation failed: {e} — using demo data." + ) + return DEMO_TRENDS.copy() # ======================================================================== # QUICK MODE # ======================================================================== async def quick_mode(self): - """Top 3, offer more, patient wait for response.""" + """Top 3 summaries, offer more, patient wait for response.""" await self.capability_worker.speak( f"Hey {self.user_name}, here are the top 3 trending topics right now:" ) @@ -352,7 +504,7 @@ async def quick_mode(self): return if self.is_more_request(user_input_lower): - await self.capability_worker.speak("Here are the remaining trends:") + await self.capability_worker.speak("Here are the remaining topics:") await self.worker.session_tasks.sleep(0.3) for i, topic in enumerate(self.trending_topics[3:], 4): await self.speak_single_trend(i, topic) @@ -374,7 +526,7 @@ async def quick_mode(self): # FULL MODE # ======================================================================== async def full_mode(self): - """Read all 5, then open interactive Q&A loop.""" + """Read all 5 summaries, then open interactive Q&A loop.""" await self.capability_worker.speak( f"Hey {self.user_name}, here's your full rundown of the top 5 trending topics on X:" ) @@ -423,9 +575,7 @@ async def interactive_loop(self): await self.capability_worker.speak("Anything else?") continue - if any(w in user_input_lower for w in [ - "number", "topic", "tell me about", "more about" - ]): + if any(w in user_input_lower for w in ["number", "topic", "tell me about", "more about"]): await self.handle_topic_question(user_input_lower) continue @@ -438,8 +588,7 @@ async def interactive_loop(self): # ======================================================================== def is_exit_command(self, text: str) -> bool: for word in EXIT_WORDS: - pattern = r'\b' + re.escape(word) + r'\b' - if re.search(pattern, text): + if re.search(r'\b' + re.escape(word) + r'\b', text): return True return False @@ -447,25 +596,11 @@ def is_more_request(self, text: str) -> bool: return any(word in text for word in MORE_WORDS) async def speak_single_trend(self, number: int, topic: dict): - name = topic["name"] - count = topic.get("tweet_count", 0) - + """Speak one trend. Reads the LLM summary; falls back to topic name only.""" + name = topic.get("name", "Unknown") + summary = topic.get("summary", "") clean_name = re.sub(r'#', 'hashtag ', name) - - if count >= 1_000_000: - count_text = f"{count / 1_000_000:.1f} million posts" - elif count >= 1_000: - count_text = f"{int(count / 1_000)} thousand posts" - elif count > 0: - count_text = f"{count} posts" - else: - count_text = None - - if count_text: - msg = f"Number {number}: {clean_name}, with {count_text}." - else: - msg = f"Number {number}: {clean_name}." - + msg = f"Number {number}: {clean_name}. {summary}" if summary else f"Number {number}: {clean_name}." await self.capability_worker.speak(msg) async def handle_topic_question(self, user_input: str): @@ -476,14 +611,20 @@ async def handle_topic_question(self, user_input: str): break if topic_number and topic_number <= len(self.trending_topics): - name = self.trending_topics[topic_number - 1]["name"] + topic = self.trending_topics[topic_number - 1] + name = topic.get("name", "Unknown") + existing_summary = topic.get("summary", "") + top_tweet = topic.get("top_tweet", "") + prompt = ( - f"The topic '{name}' is trending on X. " - f"Give a 2-sentence conversational explanation of why. " - f"Be concise. Under 30 words. No markdown." + f"Topic: '{name}' is trending on X.\n" + f"Top tweet: \"{top_tweet}\"\n" + f"Existing summary: {existing_summary}\n" + f"Give an additional 1-2 sentence conversational insight about why this matters. " + f"Be concise. Under 40 words. No markdown." ) analysis = self.capability_worker.text_to_text_response(prompt) - await self.capability_worker.speak(f"About {name}: {analysis}") + await self.capability_worker.speak(f"More on {name}: {analysis}") await self.worker.session_tasks.sleep(0.3) await self.capability_worker.speak("What else would you like to know?") else: @@ -492,9 +633,11 @@ async def handle_topic_question(self, user_input: str): ) async def handle_general_question(self, user_input: str): - topics_context = ", ".join([t["name"] for t in self.trending_topics]) + topics_context = "; ".join( + [f"{t['name']}: {t.get('summary', '')}" for t in self.trending_topics] + ) prompt = ( - f"You are a helpful X news assistant. Current trending topics: {topics_context}.\n" + f"You are a helpful X news assistant. Current trending topics and summaries: {topics_context}.\n" f"User: {user_input}\n" f"Reply in 2 sentences max. Conversational. No markdown." ) @@ -512,4 +655,4 @@ async def generate_contextual_goodbye(self): await self.capability_worker.speak(goodbye) def number_to_word(self, num: int) -> str: - return {1: "one", 2: "two", 3: "three", 4: "four", 5: "five"}.get(num, "") + return {1: "one", 2: "two", 3: "three", 4: "four", 5: "five"}.get(num, "") \ No newline at end of file From fdfa7ab2e509785ac0a16f5ccb02d905c7c8afe7 Mon Sep 17 00:00:00 2001 From: ali Date: Thu, 5 Mar 2026 01:27:56 +0500 Subject: [PATCH 08/17] update the README.md --- community/x-news-feed/README.md | 337 +++++++++++++++----------------- 1 file changed, 163 insertions(+), 174 deletions(-) diff --git a/community/x-news-feed/README.md b/community/x-news-feed/README.md index f829858a..f499149b 100644 --- a/community/x-news-feed/README.md +++ b/community/x-news-feed/README.md @@ -1,22 +1,48 @@ # X News Feed Analysis -A voice-powered OpenHome ability that searches and reads aloud trending topics and news from X (Twitter). +A voice-powered OpenHome ability that fetches the top tweet from each of five curated topics on X (Twitter), scores them by engagement, and reads AI-generated summaries aloud. ## What It Does -This ability lets you stay updated on what's trending on X through natural voice commands. It can: +This ability keeps you updated on what's happening across five key topics on X through natural voice commands. It can: -- **Read trending topics** - Get the top trending topics on X with tweet counts -- **Quick mode** - Top 3 trends with option to hear more -- **Full mode** - All 5 trends with interactive Q&A follow-ups -- **Topic deep-dives** - Ask for more details on any specific trending topic (by number) -- **Smart exit handling** - Multiple ways to exit naturally +- **Fetch top tweets per topic** — For each topic in `TOPIC_SEEDS`, it pulls 10 recent tweets and picks the single most-engaged one using a weighted public metrics score +- **AI-generated summaries** — The winning tweet for each topic is sent to the LLM, which produces a short, conversational trend-style summary +- **Quick mode** — Top 3 topic summaries with option to hear more +- **Full mode** — All 5 topic summaries with an interactive Q&A follow-up session +- **Topic deep-dives** — Ask for more detail on any topic by number +- **Smart exit handling** — Multiple natural ways to end the session + +## How Topics Are Selected + +Rather than relying on the X Trends API (which doesn't reliably return tweet counts on all subscription tiers), this ability uses the **Recent Search API** with a fixed set of topic seeds: + +```python +TOPIC_SEEDS = [ + "Artificial Intelligence", + "Crypto", + "Climate", + "Tech Innovation", + "Global Markets", +] +``` + +For each topic it: +1. Fetches 10 recent tweets (`-is:retweet -is:reply lang:en`) +2. Scores every tweet using weighted public metrics: + ``` + likes ×3 | retweets ×2 | quotes ×2 | replies ×1 | bookmarks ×1 + ``` +3. Selects the highest-scoring tweet as the topic representative +4. Sends all 5 winning tweets to the LLM for a trend-style summary each + +All 5 topics are fetched **concurrently** using `asyncio.gather()`, so the wait time is roughly the duration of the slowest single call rather than 5× sequential waits. ## Trigger Words Say any of these phrases to activate the ability: -**For Quick Mode (Top 3):** +**Quick Mode (Top 3):** - "What's trending on X?" - "Twitter trends" - "X news" @@ -24,24 +50,36 @@ Say any of these phrases to activate the ability: - "X trends" - "Latest from X" -**For Full Mode (All 5 with Q&A):** +**Full Mode (All 5 with Q&A):** - "All trends" - "All five trends" -- "X trending topics" -- "What is trending on X?" +- "Catch me up" +- "Full briefing" +- "Tell me everything" +- "Deep dive" The ability automatically detects whether you want a quick update or a full interactive session based on which trigger phrase you use. +## What You'll Hear While Fetching + +Instead of a generic filler, the ability now names the exact topics it is fetching. One of these phrases is spoken at random: + +> *"Let me fetch the top tweets on Artificial Intelligence, Crypto, Climate, Tech Innovation, and Global Markets — just a moment."* + +> *"Give me a second, grabbing the top tweets on Artificial Intelligence, Crypto, Climate, Tech Innovation, and Global Markets."* + +This is driven by `FILLER_INTRO_TEMPLATES` and built dynamically from `TOPIC_SEEDS`, so it stays accurate if you ever change the topic list. + ## Setup ### 1. Get an API Key (Optional but Recommended) -For live X/Twitter data, you'll need an X API Bearer Token: +For live X/Twitter data, you need an X API Bearer Token with access to the **v2 Recent Search** endpoint. **Option A: X Developer Portal (Official)** 1. Go to [X Developer Portal](https://developer.twitter.com/en/portal/dashboard) 2. Create a project and app -3. Generate Bearer Token +3. Generate a Bearer Token 4. Copy your Bearer Token **Option B: RapidAPI Twitter154 (Easier)** @@ -52,22 +90,22 @@ For live X/Twitter data, you'll need an X API Bearer Token: ### 2. Configure the Ability -Open `main.py` and add your API key: +Open `main.py` and set your token: ```python -# Replace this line: -X_API_BEARER_TOKEN = "REPLACE_WITH_YOUR_KEY" +# Replace this: +X_API_BEARER_TOKEN = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" # With your actual Bearer Token: X_API_BEARER_TOKEN = "your_bearer_token_here" ``` -**Note:** The ability works without an API key using demo trending data for testing. This is perfect for development, demonstration, and your Loom video. +**Note:** The ability works without an API key using built-in demo data. This is ideal for development, testing, and demonstrations. ### 3. Upload to OpenHome 1. Create a new ability in your OpenHome dashboard -2. Upload the `main.py` file +2. Upload `main.py` 3. Set trigger words (suggestions in `config.json`) 4. Test using "Start Live Test" @@ -75,84 +113,91 @@ X_API_BEARER_TOKEN = "your_bearer_token_here" ### Quick Mode -When you ask a specific question like "What's trending on X?" or "Twitter trends", the ability: +When you ask something like "What's trending on X?", the ability: -1. Speaks a filler phrase ("One sec, checking what's hot on X") -2. Fetches the top 5 trending topics -3. Reads the top 3 aloud with tweet counts -4. Asks "Want to hear more, or are you all set?" -5. If you say "more" or "continue" → reads the remaining 2 trends -6. Exits cleanly when you say "done", "bye", or similar +1. Speaks a filler phrase naming each topic being fetched +2. Concurrently fetches 10 tweets per topic (5 topics = 5 parallel API calls) +3. Picks the highest-engagement tweet per topic using the scoring formula +4. Asks the LLM to summarise each winning tweet into a 1–2 sentence insight +5. Reads the top 3 summaries aloud +6. Asks "Want to hear more, or are you all set?" +7. If you say "more" → reads summaries 4 and 5 +8. Exits cleanly when you say "done", "bye", or similar **Example:** ``` -You: "What's trending on X?" -Ability: "One sec, checking what's hot on X..." +You: "What's trending on X?" +Ability: "Give me a second, grabbing the top tweets on Artificial Intelligence, + Crypto, Climate, Tech Innovation, and Global Markets." Ability: "Hey there, here are the top 3 trending topics right now:" -Ability: "Number 1: Artificial Intelligence, with 125 thousand posts." -Ability: "Number 2: Climate Summit 2026, with 98 thousand posts." -Ability: "Number 3: Mars Mission Update, with 87 thousand posts." +Ability: "Number 1: Artificial Intelligence. Developers are debating how AI + changes workflows across every seniority level, from building basics + to orchestrating full agent teams." +Ability: "Number 2: Crypto. Real-world asset tokenisation is gaining momentum, + with developers blending physical infrastructure and digital tokens + into new hybrid ecosystems." +Ability: "Number 3: Climate. Climate Summit 2026 has produced a landmark + multi-nation commitment on emissions, reigniting optimism about + coordinated global climate action." Ability: "Want to hear more, or are you all set?" -You: "Continue" -Ability: "Here are the remaining trends:" -Ability: "Number 4: Tech Innovation Awards, with 76 thousand posts." -Ability: "Number 5: Global Markets Rally, with 65 thousand posts." +You: "More" +Ability: "Here are the remaining topics:" +Ability: "Number 4: Tech Innovation. Distributed GPU rendering is turning heads, + with new platforms making high-end graphics accessible on everyday hardware." +Ability: "Number 5: Global Markets. Better-than-expected inflation figures sparked + a broad rally, lifting both equities and digital assets simultaneously." Ability: "That's all 5. Anything else?" -You: "All good" +You: "All good" Ability: "Take care!" ``` ### Full Mode -When you ask for a briefing like "All trends" or "All five trends", the ability: +When you ask for a full briefing like "Catch me up" or "All trends", the ability: -1. Speaks a filler phrase -2. Fetches the top 5 trending topics -3. Reads all 5 aloud with tweet counts +1. Speaks the filler phrase naming all topics +2. Fetches, scores, and summarises all 5 topics (same pipeline as Quick Mode) +3. Reads all 5 summaries aloud 4. Opens an interactive Q&A session -5. You can ask about specific topics by number ("Tell me about number 2") +5. You can ask about a specific topic by number ("Tell me about number 2") 6. You can ask to hear them again ("Read them again") -7. Exits when you say "done" or after 2 idle responses +7. Exits after you say "done" or after 2 idle responses **Example:** ``` -You: "All trends" -Ability: "One sec, checking what's hot on X..." +You: "Catch me up" +Ability: "One moment — fetching top tweets on Artificial Intelligence, Crypto, + Climate, Tech Innovation, and Global Markets." Ability: "Hey there, here's your full rundown of the top 5 trending topics on X:" -Ability: "Number 1: Artificial Intelligence, with 125 thousand posts." -Ability: "Number 2: Climate Summit 2026, with 98 thousand posts." -Ability: "Number 3: Mars Mission Update, with 87 thousand posts." -Ability: "Number 4: Tech Innovation Awards, with 76 thousand posts." -Ability: "Number 5: Global Markets Rally, with 65 thousand posts." -Ability: "Want to know more about any of these? Ask away, or say done when you're finished." -You: "Tell me about number two" -Ability: "About Climate Summit 2026: [LLM-generated 2-sentence explanation of why it's trending]" +Ability: "Number 1: Artificial Intelligence. [LLM summary]" +... +Ability: "Want to know more about any of these? Ask away, or say done when finished." +You: "Tell me about number three" +Ability: "More on Climate: [LLM-generated follow-up insight using the top tweet as context]" Ability: "What else would you like to know?" -You: "Goodbye" -Ability: "Stay curious!" +You: "Goodbye" +Ability: "Stay informed!" ``` ## Voice Design Principles This ability follows OpenHome's voice-first design guidelines: -- **Short responses** - 1-2 sentences per turn, progressive disclosure -- **Filler speech** - "One sec, pulling up the latest from X" before API calls -- **Natural numbers** - "125 thousand" instead of "125,000" -- **Exit handling** - Multiple ways to exit: "done", "stop", "bye", "that's all" -- **Idle detection** - Offers to sign off after 2 silent responses -- **Confirmation-free** - Reading data doesn't need confirmation (low stakes) +- **Named filler speech** — "Fetching top tweets on Artificial Intelligence, Crypto…" instead of a generic "one sec" +- **Short responses** — 1–2 sentences per turn, progressive disclosure +- **Natural language** — Conversational summaries instead of raw tweet text or numerical counts +- **Exit handling** — Multiple natural ways to exit: "done", "stop", "bye", "that's all" +- **Idle detection** — Offers to sign off after 2 silent responses +- **Concurrent fetching** — All 5 topics fetched in parallel to minimise wait time ## SDK Usage ### Core Patterns Used -**Critical: Capturing User Input** +**Capturing user input (critical — must run first):** ```python -# IMPORTANT: Wait for user input FIRST before processing user_input = await self.capability_worker.wait_for_complete_transcription() ``` -This ensures the trigger phrase is properly captured before the ability starts processing. **Speaking:** ```python @@ -164,23 +209,24 @@ await self.capability_worker.speak("Message to user") user_input = await self.capability_worker.user_response() ``` -**LLM for Classification & Analysis:** +**LLM for summarisation and Q&A (synchronous — no await):** ```python -# No await! This is synchronous response = self.capability_worker.text_to_text_response(prompt) ``` -**API Calls with asyncio.to_thread:** +**Concurrent API calls:** +```python +tasks = [self._fetch_top_tweet_for_topic(topic) for topic in TOPIC_SEEDS] +results = await asyncio.gather(*tasks, return_exceptions=True) +``` + +**Blocking HTTP inside async:** ```python -import asyncio -response = await asyncio.to_thread( - requests.get, url, headers=headers, params=params, timeout=10 -) +resp = await asyncio.to_thread(requests.get, url, headers=headers, timeout=10) ``` -**Patient Input Waiting:** +**Patient input polling:** ```python -# Custom helper that polls patiently for user input user_input = await self.wait_for_input(max_attempts=5, wait_seconds=3.0) ``` @@ -189,136 +235,79 @@ user_input = await self.wait_for_input(max_attempts=5, wait_seconds=3.0) self.capability_worker.resume_normal_flow() # Always call this when done! ``` -### Architecture Highlights - -- **Input capture fix** - Uses `wait_for_complete_transcription()` to ensure trigger phrase is captured -- **Mode detection from trigger** - Analyzes the actual user input to determine quick vs full mode -- **Patient input polling** - Custom `wait_for_input()` helper that retries multiple times -- **File persistence** - Saves user preferences across sessions using the file storage API -- **Demo data fallback** - Works without API key for testing/demos -- **LLM-powered topic analysis** - Uses the LLM to generate explanations for trending topics -- **Contextual goodbyes** - LLM generates natural sign-off messages - ## API Information **Provider:** X (Twitter) Official API -**Endpoint:** `https://api.twitter.com/1.1/trends/place.json` +**Endpoint:** `https://api.twitter.com/2/tweets/search/recent` **Authentication:** Bearer Token -**Rate Limits:** Depends on your X API tier (Free tier: 500 requests/month) +**Fields requested:** `text, public_metrics` +**Filters applied:** `-is:retweet -is:reply lang:en` +**Results per topic:** `max_results=10` **Required Header:** `Authorization: Bearer YOUR_TOKEN` -### Demo Data - -The ability includes demo trending data that's used when no API key is configured: - -```python -DEMO_TRENDS = [ - {"name": "Artificial Intelligence", "tweet_count": 125000}, - {"name": "Climate Summit 2026", "tweet_count": 98000}, - {"name": "Mars Mission Update", "tweet_count": 87000}, - {"name": "Tech Innovation Awards", "tweet_count": 76000}, - {"name": "Global Markets Rally", "tweet_count": 65000} -] -``` - -This lets you: -- Test the full conversation flow without API costs -- Demonstrate the ability in videos -- Develop and iterate without rate limits -- Submit working code to GitHub - -Replace with live data when ready by adding your Bearer Token. +### Engagement Scoring Formula -## Customization Ideas +Each of the 10 fetched tweets is scored as follows: -- **Add time context** - "This morning's trending topics" vs "Tonight's buzz" -- **Filter by category** - Tech, sports, politics, entertainment -- **Save favorites** - Use file storage to remember topics user cares about -- **Reading preferences** - Let users set how many topics to read (3, 5, 10) -- **Tweet summaries** - Fetch and summarize actual tweets about trending topics -- **Personalized greetings** - Use saved user name from preferences file +| Metric | Weight | Reason | +|--------|--------|--------| +| `like_count` | ×3 | Strongest positive signal | +| `retweet_count` | ×2 | Indicates shareworthy content | +| `quote_count` | ×2 | Signals conversation-worthy content | +| `reply_count` | ×1 | Engagement but can be negative | +| `bookmark_count` | ×1 | Quiet saves, moderate signal | +| `impression_count` | ×0 | Excluded — reflects reach, not quality | -## Technical Notes - -### Critical Input Capture Fix - -This ability includes an important fix for a common OpenHome issue where abilities would miss the user's trigger phrase. The solution: - -```python -async def capture_user_input(self): - """Wait for and capture the user's input that triggered this ability.""" - user_input = await self.capability_worker.wait_for_complete_transcription() - if user_input and user_input.strip(): - self.trigger_phrase = user_input.strip().lower() -``` +The tweet with the highest score wins and is passed to the LLM for summarisation. -This ensures the trigger phrase is captured **before** any processing begins, allowing for accurate mode detection and context-aware responses. - -### Patient Input Polling +### Demo Data -The ability uses a custom `wait_for_input()` helper that patiently polls for user responses: +The ability includes demo data used when no API key is configured. Each entry mirrors the live data structure exactly: ```python -async def wait_for_input(self, max_attempts: int = 5, wait_seconds: float = 3.0): - """Poll for user input patiently. Returns first non-empty response.""" - for attempt in range(max_attempts): - await self.worker.session_tasks.sleep(wait_seconds) - user_input = await self.capability_worker.user_response() - if user_input and user_input.strip(): - return user_input.strip() - return "" +DEMO_TRENDS = [ + { + "name": "Artificial Intelligence", + "top_tweet": "2026 is the year of AI...", + "score": 42, + "summary": "Developers are debating how AI changes workflows..." + }, + ... +] ``` -This handles voice transcription delays gracefully without timing out prematurely. - -## Testing Without API Key +This lets you test the full conversation flow, demonstrate the ability, and develop without API costs or rate limits. -The ability includes mock trending data for testing: +## Customisation -```python -def get_mock_trending_data(self) -> list: - return [ - {"name": "AI Safety Summit", "tweet_count": 125000}, - {"name": "Climate Action", "tweet_count": 98000}, - # ... more topics - ] -``` - -This lets you: -- Test the full conversation flow -- Demonstrate the ability in videos -- Develop without API costs - -Replace with live data when ready by adding your API key. +- **Change topics** — Edit `TOPIC_SEEDS` to track any subjects you care about. The filler speech updates automatically. +- **Adjust scoring weights** — Modify `score_tweet()` to weight engagement signals differently. +- **Change result count** — Update `max_results=10` in `RECENT_SEARCH_URL` (max 100 on Basic tier). +- **Add time context** — Append `start_time` to the API query for "this morning's tweets" vs "this week's". +- **Reading preferences** — Let users configure how many topics to read via the preferences file. ## Troubleshooting -**"I couldn't pull up the X feed"** -- Check your API key is correct in `main.py` -- Verify you have API credits remaining -- Check network connectivity in OpenHome settings +**"I'm having trouble reaching X right now"** +- Check your Bearer Token is correct in `main.py` +- Verify you have API credits remaining (Free tier: 500 requests/month) +- Confirm network connectivity in your OpenHome settings **Ability doesn't trigger** -- Verify trigger words in dashboard match `config.json` -- Try more specific phrases: "What's trending on X" vs just "trending" -- Check ability is enabled and saved +- Verify trigger words in the dashboard match `config.json` +- Try more explicit phrases: "What's trending on X" rather than just "trending" +- Confirm the ability is enabled and saved -**Response is too long/robotic** -- Adjust `format_trending_summary()` to be more concise -- Reduce number of topics read (currently 3 for quick, 5 for full) -- Simplify number formatting in `format_number_for_speech()` +**All topics fall back to demo data** +- Check the API token is not still set to the placeholder value `"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"` +- Run a manual `curl` test against the endpoint to confirm your token has v2 Recent Search access ## Contributing -Found a bug or have an improvement? Here's how to help: - 1. Fork the OpenHome abilities repo -2. Make your changes to this ability +2. Make your changes 3. Test thoroughly using "Start Live Test" -4. Submit a PR with: - - Clear description of what changed - - Why the change improves the ability - - Test results showing it works +4. Submit a PR with a clear description of what changed and why ## License @@ -326,5 +315,5 @@ Open source under the same license as the OpenHome project. --- -**Built for OpenHome** - The open-source voice AI platform +**Built for OpenHome** — The open-source voice AI platform **Questions?** Join the [OpenHome Discord](https://discord.gg/openhome) \ No newline at end of file From 8ebdbdface4c0016bc14cd64c3e261d4c27dd885 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 4 Mar 2026 20:29:36 +0000 Subject: [PATCH 09/17] style: auto-format Python files with autoflake + autopep8 --- community/x-news-feed/main.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/community/x-news-feed/main.py b/community/x-news-feed/main.py index 405ac069..ef485cb4 100644 --- a/community/x-news-feed/main.py +++ b/community/x-news-feed/main.py @@ -37,7 +37,7 @@ "catch me up", "all trends", "full briefing", "everything", "run through", "brief me", "all of them", "the full list", "full list", "all five", "read all", "read them all", - "dive in", "deep dive", "explore", "tell me everything",'all tweets' + "dive in", "deep dive", "explore", "tell me everything", 'all tweets' ] MORE_WORDS = [ @@ -379,8 +379,8 @@ async def _fetch_top_tweet_for_topic(self, topic: str) -> dict | None: ) return None self.worker.editor_logging_handler.warning( - f"[{topic}] API returned {resp.json()}." - ) + f"[{topic}] API returned {resp.json()}." + ) tweets = resp.json().get("data", []) if not tweets: @@ -655,4 +655,4 @@ async def generate_contextual_goodbye(self): await self.capability_worker.speak(goodbye) def number_to_word(self, num: int) -> str: - return {1: "one", 2: "two", 3: "three", 4: "four", 5: "five"}.get(num, "") \ No newline at end of file + return {1: "one", 2: "two", 3: "three", 4: "four", 5: "five"}.get(num, "") From f572f273373ee3183335f8691f1734025beaf035 Mon Sep 17 00:00:00 2001 From: ali Date: Thu, 5 Mar 2026 04:35:04 +0500 Subject: [PATCH 10/17] =?UTF-8?q?raw=20open()=20is=20not=20allowed=20?= =?UTF-8?q?=E2=80=94=20use=20capability=5Fworkerfile=20helpers=20(read=5Ff?= =?UTF-8?q?ile,=20write=5Ffile,=20etc.)=20instead=20=20Missing=20register?= =?UTF-8?q?=20capability=20tag=20=E2=80=94=20add=20one=20of=20the=20follow?= =?UTF-8?q?ing=20to=20your=20class:=20=20=20=20=20#{{register=5Fcapability?= =?UTF-8?q?}}=20=20=20=20=20#=20{{register=5Fcapability}}?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit resolved the above issue --- community/x-news-feed/main.py | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/community/x-news-feed/main.py b/community/x-news-feed/main.py index ef485cb4..216c0e09 100644 --- a/community/x-news-feed/main.py +++ b/community/x-news-feed/main.py @@ -1,6 +1,5 @@ import asyncio import json -import os import re import requests @@ -146,16 +145,8 @@ class XNewsFeedCapability(MatchingCapability): first_visit: bool = True trigger_phrase: str = "" - @classmethod - def register_capability(cls) -> "MatchingCapability": - with open( - os.path.join(os.path.dirname(os.path.abspath(__file__)), "config.json") - ) as file: - data = json.load(file) - return cls( - unique_name=data["unique_name"], - matching_hotwords=data["matching_hotwords"], - ) + # Do not change following tag of register capability + #{{register capability}} def call(self, worker: AgentWorker): self.worker = worker @@ -655,4 +646,4 @@ async def generate_contextual_goodbye(self): await self.capability_worker.speak(goodbye) def number_to_word(self, num: int) -> str: - return {1: "one", 2: "two", 3: "three", 4: "four", 5: "five"}.get(num, "") + return {1: "one", 2: "two", 3: "three", 4: "four", 5: "five"}.get(num, "") \ No newline at end of file From f21adc38bbde0c1852c59754dd247cd89a693abd Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 4 Mar 2026 23:35:49 +0000 Subject: [PATCH 11/17] style: auto-format Python files with autoflake + autopep8 --- community/x-news-feed/main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/community/x-news-feed/main.py b/community/x-news-feed/main.py index 216c0e09..167c59f8 100644 --- a/community/x-news-feed/main.py +++ b/community/x-news-feed/main.py @@ -146,7 +146,7 @@ class XNewsFeedCapability(MatchingCapability): trigger_phrase: str = "" # Do not change following tag of register capability - #{{register capability}} + # {{register capability}} def call(self, worker: AgentWorker): self.worker = worker @@ -646,4 +646,4 @@ async def generate_contextual_goodbye(self): await self.capability_worker.speak(goodbye) def number_to_word(self, num: int) -> str: - return {1: "one", 2: "two", 3: "three", 4: "four", 5: "five"}.get(num, "") \ No newline at end of file + return {1: "one", 2: "two", 3: "three", 4: "four", 5: "five"}.get(num, "") From faa9e2a45531d3f830224e049b7ff3becd8a228e Mon Sep 17 00:00:00 2001 From: ali Date: Thu, 12 Mar 2026 05:34:52 +0500 Subject: [PATCH 12/17] Updated the topic selection flow to be fully conversational. Instead of a fixed numbered menu, --- community/x-news-feed/README.md | 434 +++++++++-------- community/x-news-feed/main.py | 832 +++++++++++++++++--------------- 2 files changed, 678 insertions(+), 588 deletions(-) diff --git a/community/x-news-feed/README.md b/community/x-news-feed/README.md index f499149b..b6279745 100644 --- a/community/x-news-feed/README.md +++ b/community/x-news-feed/README.md @@ -1,21 +1,104 @@ -# X News Feed Analysis +# X News Feed -A voice-powered OpenHome ability that fetches the top tweet from each of five curated topics on X (Twitter), scores them by engagement, and reads AI-generated summaries aloud. +A voice-powered OpenHome ability that fetches the top tweets for any topic on X (Twitter), scores them by engagement, cleans them for natural speech, and reads them aloud. + +--- ## What It Does -This ability keeps you updated on what's happening across five key topics on X through natural voice commands. It can: +- **Demo mode** — No API key needed. Presents 5 curated topics as a numbered menu. User picks one and hears the top 3 pre-scored tweets for that topic. +- **Live mode** — With an API key, asks the user to name any topic freely, fetches up to 30 real tweets, scores them, and reads the top 3. +- **Tweet cleaning** — Strips URLs, hashtags, mentions, and HTML entities before speaking, then uses the LLM to rewrite each tweet as a single natural sentence. +- **Quick mode** — Reads the top 2 tweets, then offers the 3rd. +- **Full mode** — Reads all 3 tweets upfront, then opens an interactive Q&A session. +- **Topic deep-dives** — Ask for more detail on any tweet by number during Q&A. +- **Smart exit** — Multiple natural phrases to end the session gracefully. + +--- + +## How It Works + +### Demo Mode vs Live Mode + +The ability checks whether a real Bearer Token is configured at startup: + +**Demo mode** (no token set): +1. Reads out the 5 static `TOPIC_SEEDS` as a numbered list +2. User says a number (1–5) or a topic name like "Crypto" or "Climate" +3. The top 3 pre-scored demo tweets for that topic are read aloud + +**Live mode** (token configured): +1. Asks the user to name any topic they want — no restrictions +2. Fetches up to 30 real tweets from X Recent Search API using a plain synchronous `requests.get()` call +3. Scores every tweet using weighted public metrics +4. Keeps the top 3 highest-scoring tweets +5. Sends them to the LLM for a short summary +6. Reads them aloud after cleaning and polishing for speech + +### Tweet Cleaning Pipeline + +Every live tweet goes through a two-stage cleaning process before being spoken: + +**Stage 1 — Regex cleanup** (`clean_tweet_text`): +- Removes all URLs (`https://t.co/...`) +- Removes all hashtags (`#PAKvsBAN`) +- Removes all mentions (`@SomeUser`) +- Decodes HTML entities (`&` → "and", `<` → "less than", etc.) +- Collapses extra whitespace and newlines + +**Stage 2 — LLM polish** (`polish_tweet_for_speech`): +- Sends the cleaned text to the LLM with a prompt to rewrite it as a single natural-sounding sentence +- Removes any leftover emoji or awkward fragments from URL/hashtag removal +- Falls back to the regex-cleaned text if the LLM call fails + +**Example:** +``` +Raw tweet: +"#PAKvsBAN | 1st ODI 🇵🇰 Pakistan humbled in Bangladesh 🇧🇩; + hosts chase down target in 15.1 overs https://t.co/fJj7f4NqxN" + +After regex clean: +"Pakistan humbled in Bangladesh; hosts chase down target in 15.1 overs" + +After LLM polish (spoken aloud): +"Pakistan were humbled in the first ODI as Bangladesh chased down + the target in just 15 overs." +``` + +### Engagement Scoring -- **Fetch top tweets per topic** — For each topic in `TOPIC_SEEDS`, it pulls 10 recent tweets and picks the single most-engaged one using a weighted public metrics score -- **AI-generated summaries** — The winning tweet for each topic is sent to the LLM, which produces a short, conversational trend-style summary -- **Quick mode** — Top 3 topic summaries with option to hear more -- **Full mode** — All 5 topic summaries with an interactive Q&A follow-up session -- **Topic deep-dives** — Ask for more detail on any topic by number -- **Smart exit handling** — Multiple natural ways to end the session +Each fetched tweet is scored using weighted public metrics: -## How Topics Are Selected +| Metric | Weight | Reason | +|--------|--------|--------| +| `like_count` | ×3 | Strongest positive engagement | +| `retweet_count` | ×2 | Indicates shareworthy content | +| `quote_count` | ×2 | Signals conversation-worthy content | +| `reply_count` | ×1 | Engagement but can be negative | +| `bookmark_count` | ×1 | Quiet saves, moderate signal | +| `impression_count` | ×0 | Excluded — reach, not quality | -Rather than relying on the X Trends API (which doesn't reliably return tweet counts on all subscription tiers), this ability uses the **Recent Search API** with a fixed set of topic seeds: +Tweets are sorted descending by score. The top 3 are kept. + +### Quick Mode vs Full Mode + +Mode is detected from the phrase used to trigger the ability: + +**Quick mode** (default): +- Reads tweets 1 and 2 +- Offers: "There is one more tweet. Want to hear it, or are you all set?" +- If the user says yes/more/sure → reads tweet 3 +- Exits after a short follow-up + +**Full mode** (triggered by phrases like "all tweets", "full briefing", "catch me up"): +- Reads all 3 tweets upfront +- Speaks the LLM-generated topic summary +- Opens an interactive Q&A loop where the user can ask about specific tweets or the topic generally +- Exits after the user says "done" or after 2 idle responses + +--- + +## Topic Seeds (Demo Mode) ```python TOPIC_SEEDS = [ @@ -27,174 +110,128 @@ TOPIC_SEEDS = [ ] ``` -For each topic it: -1. Fetches 10 recent tweets (`-is:retweet -is:reply lang:en`) -2. Scores every tweet using weighted public metrics: - ``` - likes ×3 | retweets ×2 | quotes ×2 | replies ×1 | bookmarks ×1 - ``` -3. Selects the highest-scoring tweet as the topic representative -4. Sends all 5 winning tweets to the LLM for a trend-style summary each +In demo mode these are presented as a numbered menu. The user picks one by saying its number or name. In live mode these are not used — the user can name any topic freely. -All 5 topics are fetched **concurrently** using `asyncio.gather()`, so the wait time is roughly the duration of the slowest single call rather than 5× sequential waits. +--- ## Trigger Words -Say any of these phrases to activate the ability: - -**Quick Mode (Top 3):** -- "What's trending on X?" +**Quick mode:** - "Twitter trends" - "X news" -- "Show me X trends" -- "X trends" +- "What's trending on X" - "Latest from X" +- "X trends" -**Full Mode (All 5 with Q&A):** +**Full mode:** +- "All tweets" - "All trends" -- "All five trends" -- "Catch me up" - "Full briefing" +- "Catch me up" - "Tell me everything" - "Deep dive" +- "Show all" -The ability automatically detects whether you want a quick update or a full interactive session based on which trigger phrase you use. - -## What You'll Hear While Fetching - -Instead of a generic filler, the ability now names the exact topics it is fetching. One of these phrases is spoken at random: - -> *"Let me fetch the top tweets on Artificial Intelligence, Crypto, Climate, Tech Innovation, and Global Markets — just a moment."* - -> *"Give me a second, grabbing the top tweets on Artificial Intelligence, Crypto, Climate, Tech Innovation, and Global Markets."* - -This is driven by `FILLER_INTRO_TEMPLATES` and built dynamically from `TOPIC_SEEDS`, so it stays accurate if you ever change the topic list. +--- ## Setup -### 1. Get an API Key (Optional but Recommended) - -For live X/Twitter data, you need an X API Bearer Token with access to the **v2 Recent Search** endpoint. +### 1. Get a Bearer Token (for live mode) -**Option A: X Developer Portal (Official)** -1. Go to [X Developer Portal](https://developer.twitter.com/en/portal/dashboard) +1. Go to [developer.twitter.com](https://developer.twitter.com/en/portal/dashboard) 2. Create a project and app -3. Generate a Bearer Token -4. Copy your Bearer Token +3. Generate a Bearer Token with access to the **v2 Recent Search** endpoint +4. Copy the token -**Option B: RapidAPI Twitter154 (Easier)** -1. Go to [RapidAPI Twitter154 API](https://rapidapi.com/omarmhaimdat/api/twitter154/) -2. Sign up for a free account -3. Subscribe to the free tier -4. Copy your API key +The ability works without a token using built-in demo data — useful for testing and demonstrations. ### 2. Configure the Ability -Open `main.py` and set your token: +Open `main.py` and replace the placeholder: ```python # Replace this: X_API_BEARER_TOKEN = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" -# With your actual Bearer Token: +# With your token: X_API_BEARER_TOKEN = "your_bearer_token_here" ``` -**Note:** The ability works without an API key using built-in demo data. This is ideal for development, testing, and demonstrations. - ### 3. Upload to OpenHome 1. Create a new ability in your OpenHome dashboard 2. Upload `main.py` -3. Set trigger words (suggestions in `config.json`) +3. Set trigger words in `config.json` 4. Test using "Start Live Test" -## How It Works - -### Quick Mode +--- -When you ask something like "What's trending on X?", the ability: +## Example Conversations -1. Speaks a filler phrase naming each topic being fetched -2. Concurrently fetches 10 tweets per topic (5 topics = 5 parallel API calls) -3. Picks the highest-engagement tweet per topic using the scoring formula -4. Asks the LLM to summarise each winning tweet into a 1–2 sentence insight -5. Reads the top 3 summaries aloud -6. Asks "Want to hear more, or are you all set?" -7. If you say "more" → reads summaries 4 and 5 -8. Exits cleanly when you say "done", "bye", or similar +### Quick Mode (Live) -**Example:** ``` -You: "What's trending on X?" -Ability: "Give me a second, grabbing the top tweets on Artificial Intelligence, - Crypto, Climate, Tech Innovation, and Global Markets." -Ability: "Hey there, here are the top 3 trending topics right now:" -Ability: "Number 1: Artificial Intelligence. Developers are debating how AI - changes workflows across every seniority level, from building basics - to orchestrating full agent teams." -Ability: "Number 2: Crypto. Real-world asset tokenisation is gaining momentum, - with developers blending physical infrastructure and digital tokens - into new hybrid ecosystems." -Ability: "Number 3: Climate. Climate Summit 2026 has produced a landmark - multi-nation commitment on emissions, reigniting optimism about - coordinated global climate action." -Ability: "Want to hear more, or are you all set?" -You: "More" -Ability: "Here are the remaining topics:" -Ability: "Number 4: Tech Innovation. Distributed GPU rendering is turning heads, - with new platforms making high-end graphics accessible on everyday hardware." -Ability: "Number 5: Global Markets. Better-than-expected inflation figures sparked - a broad rally, lifting both equities and digital assets simultaneously." -Ability: "That's all 5. Anything else?" -You: "All good" -Ability: "Take care!" +You: "Twitter trends" +Ability: "Hey there, let us check what is buzzing on X." +Ability: "What topic would you like to explore? You can say anything, + for example Space Exploration, Football, or Electric Vehicles." +You: "Artificial Intelligence" +Ability: "One moment, fetching top tweets on Artificial Intelligence." +Ability: "Here are the top 2 tweets on Artificial Intelligence:" +Ability: "Tweet 1: Developers are rethinking how AI fits into workflows + at every level, from junior engineers building foundations + to seniors orchestrating entire agent teams." +Ability: "Tweet 2: Every company is becoming an AI company whether they + want to or not — those who adapt their workflows will come out ahead." +Ability: "There is one more tweet. Want to hear it, or are you all set?" +You: "Sure" +Ability: "Tweet 3: AI agents are not here to replace engineers — + they are taking over the repetitive work, leaving + the creative thinking to humans." +Ability: "That is the top 3. Anything else?" +You: "Done" +Ability: "Stay informed!" ``` -### Full Mode - -When you ask for a full briefing like "Catch me up" or "All trends", the ability: +### Full Mode (Demo) -1. Speaks the filler phrase naming all topics -2. Fetches, scores, and summarises all 5 topics (same pipeline as Quick Mode) -3. Reads all 5 summaries aloud -4. Opens an interactive Q&A session -5. You can ask about a specific topic by number ("Tell me about number 2") -6. You can ask to hear them again ("Read them again") -7. Exits after you say "done" or after 2 idle responses - -**Example:** ``` -You: "Catch me up" -Ability: "One moment — fetching top tweets on Artificial Intelligence, Crypto, - Climate, Tech Innovation, and Global Markets." -Ability: "Hey there, here's your full rundown of the top 5 trending topics on X:" -Ability: "Number 1: Artificial Intelligence. [LLM summary]" -... -Ability: "Want to know more about any of these? Ask away, or say done when finished." -You: "Tell me about number three" -Ability: "More on Climate: [LLM-generated follow-up insight using the top tweet as context]" -Ability: "What else would you like to know?" -You: "Goodbye" -Ability: "Stay informed!" +You: "Full briefing" +Ability: "Hey there, welcome to X News! I will help you catch up on + the latest tweets for any topic you care about." +Ability: "Here are the available topics: 1. Artificial Intelligence, + 2. Crypto, 3. Climate, 4. Tech Innovation, 5. Global Markets. + Just say the number or the topic name." +You: "Three" +Ability: "Here is the full rundown of the top 3 tweets on Climate:" +Ability: "Tweet 1: The Climate Summit 2026 concluded with 47 nations + signing binding emissions targets, the most ambitious global + agreement since Paris." +Ability: "Tweet 2: Solar is now the cheapest energy source in history, + and every new coal plant built today will be a stranded asset + within a decade." +Ability: "Tweet 3: Carbon capture technology just hit a new efficiency + milestone, suggesting we may have more tools available than + previously thought." +Ability: "Overall: Climate Summit 2026 has produced a landmark multi-nation + commitment on emissions, reigniting optimism about coordinated + global climate action." +Ability: "Want to know more about any of these? Ask away, or say done + when finished." +You: "Tell me more about tweet two" +Ability: "More on tweet 2: The falling cost of solar is accelerating + the retirement of fossil fuel infrastructure globally, + making clean energy the default economic choice." +Ability: "What else would you like to know?" +You: "Goodbye" +Ability: "Catch you later!" ``` -## Voice Design Principles - -This ability follows OpenHome's voice-first design guidelines: - -- **Named filler speech** — "Fetching top tweets on Artificial Intelligence, Crypto…" instead of a generic "one sec" -- **Short responses** — 1–2 sentences per turn, progressive disclosure -- **Natural language** — Conversational summaries instead of raw tweet text or numerical counts -- **Exit handling** — Multiple natural ways to exit: "done", "stop", "bye", "that's all" -- **Idle detection** — Offers to sign off after 2 silent responses -- **Concurrent fetching** — All 5 topics fetched in parallel to minimise wait time - -## SDK Usage +--- -### Core Patterns Used +## SDK Patterns Used -**Capturing user input (critical — must run first):** +**Capturing the trigger phrase (runs first):** ```python user_input = await self.capability_worker.wait_for_complete_transcription() ``` @@ -204,116 +241,115 @@ user_input = await self.capability_worker.wait_for_complete_transcription() await self.capability_worker.speak("Message to user") ``` -**Listening:** +**Listening for a reply:** ```python user_input = await self.capability_worker.user_response() ``` -**LLM for summarisation and Q&A (synchronous — no await):** +**LLM text generation (synchronous — no await):** ```python response = self.capability_worker.text_to_text_response(prompt) ``` -**Concurrent API calls:** +**HTTP calls (plain synchronous — no asyncio or threading):** ```python -tasks = [self._fetch_top_tweet_for_topic(topic) for topic in TOPIC_SEEDS] -results = await asyncio.gather(*tasks, return_exceptions=True) +resp = requests.get(url, headers=headers, timeout=10) ``` -**Blocking HTTP inside async:** +**Managed sleep (use instead of asyncio.sleep):** ```python -resp = await asyncio.to_thread(requests.get, url, headers=headers, timeout=10) +await self.worker.session_tasks.sleep(0.4) ``` -**Patient input polling:** +**Per-user file storage:** ```python -user_input = await self.wait_for_input(max_attempts=5, wait_seconds=3.0) +await self.capability_worker.write_file("prefs.json", json.dumps(data), False) +raw = await self.capability_worker.read_file("prefs.json", False) ``` -**Exit:** +**Always call at the end:** ```python -self.capability_worker.resume_normal_flow() # Always call this when done! +self.capability_worker.resume_normal_flow() ``` -## API Information - -**Provider:** X (Twitter) Official API -**Endpoint:** `https://api.twitter.com/2/tweets/search/recent` -**Authentication:** Bearer Token -**Fields requested:** `text, public_metrics` -**Filters applied:** `-is:retweet -is:reply lang:en` -**Results per topic:** `max_results=10` -**Required Header:** `Authorization: Bearer YOUR_TOKEN` - -### Engagement Scoring Formula - -Each of the 10 fetched tweets is scored as follows: - -| Metric | Weight | Reason | -|--------|--------|--------| -| `like_count` | ×3 | Strongest positive signal | -| `retweet_count` | ×2 | Indicates shareworthy content | -| `quote_count` | ×2 | Signals conversation-worthy content | -| `reply_count` | ×1 | Engagement but can be negative | -| `bookmark_count` | ×1 | Quiet saves, moderate signal | -| `impression_count` | ×0 | Excluded — reflects reach, not quality | - -The tweet with the highest score wins and is passed to the LLM for summarisation. +--- -### Demo Data +## Demo Data -The ability includes demo data used when no API key is configured. Each entry mirrors the live data structure exactly: +When no API token is configured, `DEMO_TRENDS` provides 3 pre-scored tweets per topic: ```python -DEMO_TRENDS = [ - { - "name": "Artificial Intelligence", - "top_tweet": "2026 is the year of AI...", - "score": 42, - "summary": "Developers are debating how AI changes workflows..." +DEMO_TRENDS = { + "Artificial Intelligence": { + "summary": "Developers are debating how AI changes workflows...", + "tweets": [ + {"text": "2026 is the year of AI...", "score": 420}, + {"text": "Every company is now an AI company...", "score": 310}, + {"text": "AI agents are not replacing engineers...", "score": 275}, + ], }, ... -] +} ``` -This lets you test the full conversation flow, demonstrate the ability, and develop without API costs or rate limits. +Demo tweets are pre-cleaned and require no LLM polishing before being spoken. + +--- ## Customisation -- **Change topics** — Edit `TOPIC_SEEDS` to track any subjects you care about. The filler speech updates automatically. -- **Adjust scoring weights** — Modify `score_tweet()` to weight engagement signals differently. +- **Change topics** — Edit `TOPIC_SEEDS`. Demo menu and filler speech update automatically. +- **Adjust scoring** — Modify `score_tweet()` to weight metrics differently. - **Change result count** — Update `max_results=10` in `RECENT_SEARCH_URL` (max 100 on Basic tier). -- **Add time context** — Append `start_time` to the API query for "this morning's tweets" vs "this week's". -- **Reading preferences** — Let users configure how many topics to read via the preferences file. +- **Add time filters** — Append `start_time` to the API query for "this morning's tweets". + +--- + +## Allowed Libraries + +This ability uses only OpenHome-approved imports: + +| Import | Purpose | +|--------|---------| +| `json` | Preferences file serialisation | +| `re` | Tweet text cleaning (regex) | +| `random` | Random filler phrase selection | +| `requests` | HTTP calls to X API | + +No `asyncio`, `concurrent`, `threading`, or `signal` — all blocked by the OpenHome sandbox. + +--- ## Troubleshooting -**"I'm having trouble reaching X right now"** -- Check your Bearer Token is correct in `main.py` +**"I could not find any tweets on [topic] right now"** +- Check your Bearer Token is set correctly in `main.py` - Verify you have API credits remaining (Free tier: 500 requests/month) -- Confirm network connectivity in your OpenHome settings +- Try a broader topic name — very niche queries can return zero results -**Ability doesn't trigger** -- Verify trigger words in the dashboard match `config.json` -- Try more explicit phrases: "What's trending on X" rather than just "trending" +**Ability does not trigger** +- Confirm trigger words in the dashboard match your `config.json` +- Try an explicit phrase: "Twitter trends" or "X news" - Confirm the ability is enabled and saved -**All topics fall back to demo data** -- Check the API token is not still set to the placeholder value `"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"` -- Run a manual `curl` test against the endpoint to confirm your token has v2 Recent Search access +**Token is set but still getting demo data** +- Make sure the token is not still the placeholder `"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"` +- Test manually with `curl`: + ```bash + curl -H "Authorization: Bearer YOUR_TOKEN" \ + "https://api.twitter.com/2/tweets/search/recent?query=AI&max_results=10" + ``` -## Contributing - -1. Fork the OpenHome abilities repo -2. Make your changes -3. Test thoroughly using "Start Live Test" -4. Submit a PR with a clear description of what changed and why +--- -## License +## API Reference -Open source under the same license as the OpenHome project. +**Endpoint:** `GET https://api.twitter.com/2/tweets/search/recent` +**Auth:** `Authorization: Bearer YOUR_TOKEN` +**Fields:** `text, public_metrics` +**Filters:** `-is:retweet -is:reply lang:en` +**Results:** `max_results=10` --- -**Built for OpenHome** — The open-source voice AI platform -**Questions?** Join the [OpenHome Discord](https://discord.gg/openhome) \ No newline at end of file +**Built for OpenHome** — The open-source voice AI platform \ No newline at end of file diff --git a/community/x-news-feed/main.py b/community/x-news-feed/main.py index 167c59f8..602a360a 100644 --- a/community/x-news-feed/main.py +++ b/community/x-news-feed/main.py @@ -1,6 +1,6 @@ -import asyncio import json import re +import random import requests from src.agent.capability import MatchingCapability @@ -13,7 +13,7 @@ X_API_BEARER_TOKEN = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" # ============================================================================ -# TOPIC SEEDS — one API call per topic, best tweet selected per topic +# TOPIC SEEDS — selectable options shown to user in demo mode # ============================================================================ TOPIC_SEEDS = [ "Artificial Intelligence", @@ -29,91 +29,92 @@ EXIT_WORDS = [ "exit", "stop", "quit", "done", "bye", "goodbye", "cancel", "nothing else", "all good", "nope", "no thanks", "i'm good", - "that's all", "never mind", "leave", "that is all" + "that's all", "never mind", "leave", "that is all", ] FULL_MODE_TRIGGERS = [ "catch me up", "all trends", "full briefing", "everything", "run through", "brief me", "all of them", "the full list", "full list", "all five", "read all", "read them all", - "dive in", "deep dive", "explore", "tell me everything", 'all tweets' + "dive in", "deep dive", "explore", "tell me everything", "all tweets", + "all three", "show all", ] MORE_WORDS = [ "more", "rest", "continue", "yes", "yeah", "sure", - "go ahead", "keep going", "read more", "next", "and" -] - -FILLER_PHRASES = [ - "One sec, checking what's hot on X.", - "Give me a moment, pulling the latest tweets.", - "Standby, grabbing the top topics from X.", - "Let me see what's trending right now.", - "Hang on, fetching the latest from X." + "go ahead", "keep going", "read more", "next", "and", ] FILLER_INTRO_TEMPLATES = [ - "Let me fetch the top tweets on {topics} — just a moment.", - "Pulling the most popular tweets on {topics} right now.", - "Give me a second, grabbing the top tweets on {topics}.", - "One moment — fetching top tweets on {topics}.", - "Looking up the best tweets on {topics} for you.", + "Let me fetch the top tweets on {topic}, just a moment.", + "Pulling the most popular tweets on {topic} right now.", + "Give me a second, grabbing the top tweets on {topic}.", + "One moment, fetching top tweets on {topic}.", + "Looking up the best tweets on {topic} for you.", ] -# Demo data — one entry per TOPIC_SEED, mirrors live structure {name, top_tweet, score, summary} -DEMO_TRENDS = [ - { - "name": "Artificial Intelligence", - "top_tweet": "2026 is the year of AI. But we use it differently at junior, mid, senior levels. Build foundations, collab with agents, orchestrate teams.", - "score": 42, - "summary": "Developers are debating how AI changes workflows across every seniority level, from building basics to orchestrating full agent teams." +# ============================================================================ +# Demo tweet data — 3 representative tweets per TOPIC_SEED +# ============================================================================ +DEMO_TRENDS = { + "Artificial Intelligence": { + "summary": "Developers are debating how AI changes workflows across every seniority level, from building basics to orchestrating full agent teams.", + "tweets": [ + {"text": "2026 is the year of AI. But we use it differently at junior, mid, senior levels. Build foundations, collab with agents, orchestrate teams.", "score": 420}, + {"text": "Every company is now an AI company whether they like it or not. The ones who adapt their workflows will win. The rest will be left behind.", "score": 310}, + {"text": "AI agents are not replacing engineers. They are replacing the boring parts. The creative, architectural thinking? Still 100% human.", "score": 275}, + ], }, - { - "name": "Crypto", - "top_tweet": "I'm Sergey Polonsky, the developer behind Moscow City. My new legacy is a global network of 12 luxury eco-hubs combined with the $OAZIS token.", - "score": 12, - "summary": "Real-world asset tokenisation is gaining momentum, with developers blending physical infrastructure and digital tokens into new hybrid ecosystems." + "Crypto": { + "summary": "Real-world asset tokenisation is gaining momentum, with developers blending physical infrastructure and digital tokens into new hybrid ecosystems.", + "tweets": [ + {"text": "My new legacy is a global network of 12 luxury eco-hubs combined with a new token. Real assets on-chain is the future.", "score": 120}, + {"text": "Bitcoin just crossed 105k again. The institutional money did not leave, they were just waiting for the right regulatory green light.", "score": 98}, + {"text": "The next wave of DeFi will not be speculative. It will be boring, compliant, and massive. Real assets, real yields, real users.", "score": 85}, + ], }, - { - "name": "Climate", - "top_tweet": "The Climate Summit 2026 concluded with 47 nations signing binding emissions targets, the most ambitious global agreement since Paris.", - "score": 98, - "summary": "Climate Summit 2026 has produced a landmark multi-nation commitment on emissions, reigniting optimism about coordinated global climate action." + "Climate": { + "summary": "Climate Summit 2026 has produced a landmark multi-nation commitment on emissions, reigniting optimism about coordinated global climate action.", + "tweets": [ + {"text": "The Climate Summit 2026 concluded with 47 nations signing binding emissions targets, the most ambitious global agreement since Paris.", "score": 980}, + {"text": "Solar is now the cheapest energy source in history. Every new coal plant built today is a stranded asset within 10 years. The math is clear.", "score": 740}, + {"text": "Carbon capture tech just hit a new efficiency milestone. We might actually have more tools than we thought to pull this back.", "score": 610}, + ], }, - { - "name": "Tech Innovation", - "top_tweet": "Ready to put your GPU to work? YOM Official is bridging the gap between high-end rendering and everyday devices for developers and gamers alike.", - "score": 35, - "summary": "Distributed GPU rendering is turning heads, with new platforms promising to make high-end graphics accessible on everyday consumer hardware." + "Tech Innovation": { + "summary": "Distributed GPU rendering is turning heads, with new platforms promising to make high-end graphics accessible on everyday consumer hardware.", + "tweets": [ + {"text": "Ready to put your GPU to work? New platforms are bridging the gap between high-end rendering and everyday devices for developers and gamers alike.", "score": 350}, + {"text": "Spatial computing is finally hitting its stride. The hardware caught up with the vision. 2026 is the year it stops being a demo.", "score": 290}, + {"text": "The most underrated tech story right now: edge inference. Running large models locally on consumer devices is getting real, fast.", "score": 240}, + ], }, - { - "name": "Global Markets", - "top_tweet": "Global markets rallied sharply today as inflation data came in below forecast, boosting investor confidence across equities and crypto alike.", - "score": 65, - "summary": "Better-than-expected inflation figures have sparked a broad market rally, lifting both traditional equities and digital assets simultaneously." + "Global Markets": { + "summary": "Better-than-expected inflation figures have sparked a broad market rally, lifting both traditional equities and digital assets simultaneously.", + "tweets": [ + {"text": "Global markets rallied sharply today as inflation data came in below forecast, boosting investor confidence across equities and crypto alike.", "score": 650}, + {"text": "The Fed held rates again. Markets expected it. But the language in the statement shifted and traders caught it immediately.", "score": 520}, + {"text": "Emerging markets are quietly outperforming in 2026. Most retail investors have not noticed yet. That is the opportunity.", "score": 410}, + ], }, -] +} PREFERENCES_FILE = "x_news_prefs.json" -# Recent Search API — fetches 10 tweets per query +# X Recent Search API — 10 tweets per query, no retweets, no replies, English only RECENT_SEARCH_URL = ( "https://api.twitter.com/2/tweets/search/recent" - "?query={query} -is:retweet -is:reply lang:en" + "?query={query}%20-is%3Aretweet%20-is%3Areply%20lang%3Aen" "&tweet.fields=text,public_metrics" - "&max_results=10" + "&max_results=30" ) # ============================================================================ # SCORING HELPER # ============================================================================ -def score_tweet(public_metrics: dict) -> int: - """ - Compute a weighted engagement score from public_metrics. - Weights: likes x3 | retweets x2 | quotes x2 | replies x1 | bookmarks x1 - Impression count excluded — it reflects reach, not engagement quality. - """ +def score_tweet(public_metrics): + """Weighted engagement: likes x3 | retweets x2 | quotes x2 | replies x1 | bookmarks x1""" return ( public_metrics.get("like_count", 0) * 3 + public_metrics.get("retweet_count", 0) * 2 @@ -124,22 +125,33 @@ def score_tweet(public_metrics: dict) -> int: # ============================================================================ -# MAIN ABILITY CLASS +# MAIN CAPABILITY CLASS # ============================================================================ class XNewsFeedCapability(MatchingCapability): """ - X News Feed Ability — for each topic in TOPIC_SEEDS: - 1. Fetch 10 recent tweets via Recent Search API - 2. Score each tweet with weighted public_metrics engagement - 3. Keep the highest-scoring tweet as the topic representative - 4. Send all 5 top tweets to the LLM for trend-style summaries - Quick Mode: top 3, offer more. - Full Mode: all 5, then interactive Q&A. + X News Feed Capability for OpenHome. + + DEMO MODE (no API token configured): + - Present the 5 static TOPIC_SEEDS as a numbered menu. + - User picks one by number or name. + - Show the top 3 pre-scored demo tweets for that topic. + + LIVE MODE (valid API token present): + - Ask the user to name any topic freely. + - Fetch up to 10 tweets from X Recent Search API (synchronous requests.get). + - Score with weighted public_metrics, keep top 3. + + QUICK MODE (default): show top 2 tweets, offer the 3rd. + FULL MODE (trigger phrases like "all tweets", "full briefing"): + show all 3 upfront then open an interactive Q&A loop. """ worker: AgentWorker = None capability_worker: CapabilityWorker = None - trending_topics: list = [] + + selected_topic: str = "" + fetched_tweets: list = [] + topic_summary: str = "" mode: str = "quick" user_name: str = "there" first_visit: bool = True @@ -159,27 +171,44 @@ def call(self, worker: AgentWorker): async def main_flow(self): try: await self.capture_user_input() - await self.load_user_preferences() self.mode = self.detect_mode_from_trigger() - self.worker.editor_logging_handler.info(f"Mode detected: {self.mode}") - - await self.fetch_trending_topics_with_filler() - - if not self.trending_topics: - await self.capability_worker.speak( - "I'm having trouble reaching X right now. Please try again in a moment." - ) - self.capability_worker.resume_normal_flow() - return + self.worker.editor_logging_handler.info(f"[XNews] Mode: {self.mode}") if self.first_visit: await self.capability_worker.speak( f"Hey {self.user_name}, welcome to X News! " - "First time here? I'll show you around." + "I will help you catch up on the latest tweets for any topic you care about." ) self.first_visit = False await self.save_user_preferences() + else: + await self.capability_worker.speak( + f"Hey {self.user_name}, let us check what is buzzing on X." + ) + + if self._is_demo_mode(): + self.selected_topic = await self.ask_user_to_pick_topic() + if not self.selected_topic: + await self.capability_worker.speak("No topic selected. Come back anytime!") + self.capability_worker.resume_normal_flow() + return + self.fetched_tweets = DEMO_TRENDS[self.selected_topic]["tweets"] + self.topic_summary = DEMO_TRENDS[self.selected_topic]["summary"] + else: + self.selected_topic = await self.ask_user_for_custom_topic() + if not self.selected_topic: + await self.capability_worker.speak("No topic provided. Come back anytime!") + self.capability_worker.resume_normal_flow() + return + await self.fetch_and_score_live_tweets(self.selected_topic) + if not self.fetched_tweets: + await self.capability_worker.speak( + f"I could not find any tweets on {self.selected_topic} right now. " + "Try a different topic!" + ) + self.capability_worker.resume_normal_flow() + return if self.mode == "full": await self.full_mode() @@ -187,354 +216,225 @@ async def main_flow(self): await self.quick_mode() except Exception as e: - self.worker.editor_logging_handler.error(f"Error in main_flow: {e}") - await self.capability_worker.speak( - "Sorry, something went wrong. Please try again." - ) + self.worker.editor_logging_handler.error(f"[XNews] main_flow error: {e}") + await self.capability_worker.speak("Sorry, something went wrong. Please try again.") self.capability_worker.resume_normal_flow() # ======================================================================== - # CAPTURE USER INPUT + # DEMO vs LIVE # ======================================================================== - async def capture_user_input(self): - try: - self.worker.editor_logging_handler.info("Waiting for user input...") - - user_input = await self.capability_worker.wait_for_complete_transcription() - if user_input and user_input.strip(): - self.trigger_phrase = user_input.strip().lower() - return - - user_input = await self.capability_worker.user_response() - if user_input and user_input.strip(): - self.trigger_phrase = user_input.strip().lower() - return - - await self.worker.session_tasks.sleep(0.5) - history = self.worker.agent_memory.full_message_history - if history: - last_msg = history[-1] - try: - if isinstance(last_msg, dict): - if last_msg.get("role") == "user": - self.trigger_phrase = last_msg.get("content", "").lower() - else: - if hasattr(last_msg, "role") and last_msg.role == "user": - self.trigger_phrase = (last_msg.content or "").lower() - except Exception: - pass - - except Exception as e: - self.worker.editor_logging_handler.error(f"Error capturing user input: {e}") - self.trigger_phrase = "" - - # ======================================================================== - # MODE DETECTION - # ======================================================================== - def detect_mode_from_trigger(self) -> str: - if not self.trigger_phrase: - return "quick" - for phrase in FULL_MODE_TRIGGERS: - if phrase in self.trigger_phrase: - self.worker.editor_logging_handler.info(f"Full mode triggered by: '{phrase}'") - return "full" - return "quick" - - # ======================================================================== - # FILE PERSISTENCE - # ======================================================================== - async def load_user_preferences(self): - try: - if await self.capability_worker.check_if_file_exists(PREFERENCES_FILE, False): - raw = await self.capability_worker.read_file(PREFERENCES_FILE, False) - prefs = json.loads(raw) - self.user_name = prefs.get("name", "there") - self.first_visit = prefs.get("first_visit", False) - else: - self.first_visit = True - self.user_name = "there" - await self.save_user_preferences() - except Exception as e: - self.worker.editor_logging_handler.warning(f"Couldn't load preferences: {e}") - self.first_visit = True - self.user_name = "there" - - async def save_user_preferences(self): - try: - prefs = {"name": self.user_name, "first_visit": self.first_visit, "last_used": "x_news_feed"} - await self.capability_worker.delete_file(PREFERENCES_FILE, False) - await self.capability_worker.write_file(PREFERENCES_FILE, json.dumps(prefs), False) - except Exception as e: - self.worker.editor_logging_handler.warning(f"Couldn't save preferences: {e}") + def _is_demo_mode(self): + return X_API_BEARER_TOKEN in ( + "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", "REPLACE_WITH_YOUR_KEY", "", None + ) # ======================================================================== - # PATIENT INPUT HELPER + # TOPIC SELECTION — DEMO MODE # ======================================================================== - async def wait_for_input(self, max_attempts: int = 5, wait_seconds: float = 3.0, context: str = "") -> str: - for attempt in range(max_attempts): - await self.worker.session_tasks.sleep(wait_seconds) - user_input = await self.capability_worker.user_response() - if user_input and user_input.strip(): - return user_input.strip() - self.worker.editor_logging_handler.info( - f"Empty on attempt {attempt + 1}/{max_attempts}, retrying..." - ) + async def ask_user_to_pick_topic(self): + topics_spoken = ", ".join( + f"{i}. {name}" for i, name in enumerate(TOPIC_SEEDS, 1) + ) + await self.capability_worker.speak( + f"Here are the available topics: {topics_spoken}. " + "Just say the number or the topic name." + ) - if context == "initial": + for attempt in range(3): + user_input = await self.wait_for_input(max_attempts=4, wait_seconds=3.0) + if not user_input: + if attempt < 2: + await self.capability_worker.speak( + "I did not catch that. Please say a number from 1 to 5, or a topic name." + ) + continue + return "" + if self.is_exit_command(user_input.lower()): + return "" + matched = self._match_topic(user_input) + if matched: + self.worker.editor_logging_handler.info(f"[XNews] Topic picked: {matched}") + return matched await self.capability_worker.speak( - "I didn't catch that. Just say 'more' to hear the rest, or I'll sign off." + "I did not recognise that. Try a number from 1 to 5, " + "or a name like Crypto or Climate." ) - await self.worker.session_tasks.sleep(2) - user_input = await self.capability_worker.user_response() - if user_input and user_input.strip(): - return user_input.strip() return "" + def _match_topic(self, user_input): + text = user_input.strip().lower() + for i, name in enumerate(TOPIC_SEEDS, 1): + if str(i) in text or self.number_to_word(i) in text: + return name + for name in TOPIC_SEEDS: + if any(word in text for word in name.lower().split()): + return name + return "" + # ======================================================================== - # DATA FETCHING — per-topic, scored, top-tweet selection + # TOPIC INPUT — LIVE MODE # ======================================================================== - async def fetch_trending_topics_with_filler(self): - import random - - # Build a natural-language list of the topic seeds - # e.g. "Artificial Intelligence, Crypto, Climate, Tech Innovation, and Global Markets" - if len(TOPIC_SEEDS) > 1: - topics_spoken = ", ".join(TOPIC_SEEDS[:-1]) + ", and " + TOPIC_SEEDS[-1] - else: - topics_spoken = TOPIC_SEEDS[0] - - template = random.choice(FILLER_INTRO_TEMPLATES) - filler_message = template.format(topics=topics_spoken) - - await self.capability_worker.speak(filler_message) - await self.fetch_trending_topics() - - async def fetch_trending_topics(self): - """ - For each topic in TOPIC_SEEDS: - 1. Fetch up to 10 recent tweets (no retweets, no replies, English only) - 2. Score every tweet using weighted public_metrics - 3. Select the highest-scoring tweet as the topic representative - Then pass all 5 top tweets to the LLM for trend-style summaries. - Falls back to DEMO_TRENDS if the API key is missing or all topic calls fail. - """ - if X_API_BEARER_TOKEN in ("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", "REPLACE_WITH_YOUR_KEY", "", None): - self.worker.editor_logging_handler.info("Demo mode — API key not configured.") - self.trending_topics = DEMO_TRENDS.copy() - return - - # Fetch best tweet per topic concurrently - tasks = [self._fetch_top_tweet_for_topic(topic) for topic in TOPIC_SEEDS] - results = await asyncio.gather(*tasks, return_exceptions=True) - - top_tweets = [] # [{name, top_tweet, score}, ...] - for topic, result in zip(TOPIC_SEEDS, results): - if isinstance(result, Exception) or result is None: - self.worker.editor_logging_handler.warning( - f"No result for topic '{topic}', skipping." - ) - continue - top_tweets.append(result) - - if not top_tweets: - self.worker.editor_logging_handler.warning("All topic fetches failed — using demo data.") - self.trending_topics = DEMO_TRENDS.copy() - return - - self.worker.editor_logging_handler.info( - f"Collected top tweets for {len(top_tweets)} / {len(TOPIC_SEEDS)} topics. " - "Sending to LLM for summarisation." + async def ask_user_for_custom_topic(self): + await self.capability_worker.speak( + "What topic would you like to explore? " + "You can say anything, for example Space Exploration, Football, or Electric Vehicles." ) - self.trending_topics = await self._summarise_top_tweets_with_llm(top_tweets) + for attempt in range(3): + user_input = await self.wait_for_input(max_attempts=4, wait_seconds=4.0) + if not user_input: + if attempt < 2: + await self.capability_worker.speak( + "I did not catch that. What topic are you interested in?" + ) + continue + return "" + if self.is_exit_command(user_input.lower()): + return "" + topic = user_input.strip() + self.worker.editor_logging_handler.info(f"[XNews] Custom topic: {topic}") + return topic + return "" - async def _fetch_top_tweet_for_topic(self, topic: str) -> dict | None: - """ - Fetch 10 recent tweets for `topic`, score each one with public_metrics, - and return the best as {name, top_tweet, score}. Returns None on failure. + # ======================================================================== + # LIVE TWEET FETCHING + SCORING + # Plain synchronous requests.get — same pattern as the OpenHome weather example. + # No asyncio, no concurrent, no threading needed. + # ======================================================================== + async def fetch_and_score_live_tweets(self, topic): + filler = random.choice(FILLER_INTRO_TEMPLATES) + await self.capability_worker.speak(filler.format(topic=topic)) - Scoring formula (see score_tweet): - likes x3 | retweets x2 | quotes x2 | replies x1 | bookmarks x1 - """ try: - url = RECENT_SEARCH_URL.format(query=requests.utils.quote(topic)) + encoded_topic = requests.utils.quote(topic) + url = RECENT_SEARCH_URL.format(query=encoded_topic) headers = {"Authorization": f"Bearer {X_API_BEARER_TOKEN}"} - resp = await asyncio.to_thread( - requests.get, url, headers=headers, timeout=10 - ) + resp = requests.get(url, headers=headers, timeout=10) if resp.status_code != 200: self.worker.editor_logging_handler.warning( - f"[{topic}] API returned {resp.status_code}." + f"[XNews] API {resp.status_code} for '{topic}'" ) - return None - self.worker.editor_logging_handler.warning( - f"[{topic}] API returned {resp.json()}." - ) + self.fetched_tweets = [] + return tweets = resp.json().get("data", []) if not tweets: - self.worker.editor_logging_handler.warning( - f"[{topic}] No tweets in response." - ) - return None + self.worker.editor_logging_handler.warning(f"[XNews] No tweets for '{topic}'") + self.fetched_tweets = [] + return - # Log all scores for debugging + scored = [] for t in tweets: s = score_tweet(t.get("public_metrics", {})) self.worker.editor_logging_handler.info( - f" [{topic}] score={s:>4} {t.get('text', '')[:60]}" + f"[XNews] score={s} {t.get('text', '')[:60]}" ) + scored.append({"text": t.get("text", "").strip(), "score": s}) - # Pick the winner - best_tweet = max( - tweets, - key=lambda t: score_tweet(t.get("public_metrics", {})) - ) - best_score = score_tweet(best_tweet.get("public_metrics", {})) - + scored.sort(key=lambda x: x["score"], reverse=True) + self.fetched_tweets = scored[:3] self.worker.editor_logging_handler.info( - f"[{topic}] WINNER score={best_score} | {best_tweet.get('text', '')[:80]}" + f"[XNews] Top 3 selected for '{topic}'" ) - return { - "name": topic, - "top_tweet": best_tweet.get("text", "").strip(), - "score": best_score, - } + self.topic_summary = self._summarise_with_llm(topic, self.fetched_tweets) except Exception as e: - self.worker.editor_logging_handler.error(f"[{topic}] Fetch error: {e}") - return None + self.worker.editor_logging_handler.error(f"[XNews] Fetch error for '{topic}': {e}") + self.fetched_tweets = [] - async def _summarise_top_tweets_with_llm(self, top_tweets: list) -> list: - """ - Send the best tweet per topic to the LLM and ask for trend-style summaries. - Returns a list of {name, top_tweet, score, summary} dicts. - Falls back to DEMO_TRENDS on any parsing error. - """ + def _summarise_with_llm(self, topic, tweets): + """text_to_text_response is synchronous per the OpenHome docs.""" try: tweet_block = "\n".join( - f"{i + 1}. Topic: {item['name']}\n Top Tweet: {item['top_tweet']}" - for i, item in enumerate(top_tweets) + f"{i + 1}. {t['text']}" for i, t in enumerate(tweets) ) - prompt = ( - "You are a news analyst. Below are the highest-engagement tweets for each topic.\n" - "For each topic write a short, conversational 1-2 sentence summary that captures " - "the key theme or sentiment from that tweet.\n" - "Return ONLY a valid JSON array — no markdown, no explanation — in this exact format:\n" - '[{"name": "", "summary": ""}, ...]\n\n' - f"Topics and their top tweets:\n{tweet_block}" + f"You are a news analyst. Below are the top tweets on '{topic}'.\n" + f"Write a short 1-2 sentence conversational summary capturing the key theme.\n" + f"No markdown. No preamble. Just the summary.\n\nTweets:\n{tweet_block}" ) - - raw_response = self.capability_worker.text_to_text_response(prompt) - - # Strip accidental markdown fences - clean = raw_response.strip() - if clean.startswith("```"): - clean = re.sub(r"```[a-z]*\n?", "", clean).strip("` \n") - - parsed = json.loads(clean) - if not isinstance(parsed, list) or not parsed: - raise ValueError("LLM returned unexpected structure.") - - # Index summaries by topic name for easy lookup - summaries_by_name = {item["name"]: item.get("summary", "") for item in parsed} - - # Merge LLM summaries back with original top-tweet data - enriched = [] - for item in top_tweets: - enriched.append({ - "name": item["name"], - "top_tweet": item["top_tweet"], - "score": item["score"], - "summary": summaries_by_name.get(item["name"], ""), - }) - - self.worker.editor_logging_handler.info( - f"LLM produced summaries for {len(enriched)} topics." - ) - return enriched - + return self.capability_worker.text_to_text_response(prompt).strip() except Exception as e: - self.worker.editor_logging_handler.error( - f"LLM summarisation failed: {e} — using demo data." - ) - return DEMO_TRENDS.copy() + self.worker.editor_logging_handler.warning(f"[XNews] LLM summary failed: {e}") + return "" # ======================================================================== - # QUICK MODE + # QUICK MODE — top 2 shown, offer the 3rd # ======================================================================== async def quick_mode(self): - """Top 3 summaries, offer more, patient wait for response.""" + count = len(self.fetched_tweets) + show_first = min(2, count) + await self.capability_worker.speak( - f"Hey {self.user_name}, here are the top 3 trending topics right now:" + f"Here are the top {show_first} tweets on {self.selected_topic}:" ) await self.worker.session_tasks.sleep(0.4) - for i, topic in enumerate(self.trending_topics[:3], 1): - await self.speak_single_trend(i, topic) + for i in range(show_first): + await self.speak_single_tweet(i + 1, self.fetched_tweets[i]) await self.worker.session_tasks.sleep(0.3) - await self.capability_worker.speak("Want to hear more, or are you all set?") - - user_input = await self.wait_for_input(max_attempts=5, wait_seconds=3.0, context="initial") - - if not user_input: - await self.capability_worker.speak("Catch you later!") - self.capability_worker.resume_normal_flow() - return - - user_input_lower = user_input.lower() + if count >= 3: + await self.capability_worker.speak( + "There is one more tweet. Want to hear it, or are you all set?" + ) + user_input = await self.wait_for_input( + max_attempts=5, wait_seconds=3.0, context="initial" + ) - if self.is_exit_command(user_input_lower): - await self.generate_contextual_goodbye() - self.capability_worker.resume_normal_flow() - return + if not user_input or self.is_exit_command(user_input.lower()): + await self.generate_contextual_goodbye() + self.capability_worker.resume_normal_flow() + return - if self.is_more_request(user_input_lower): - await self.capability_worker.speak("Here are the remaining topics:") - await self.worker.session_tasks.sleep(0.3) - for i, topic in enumerate(self.trending_topics[3:], 4): - await self.speak_single_trend(i, topic) + if self.is_more_request(user_input.lower()) or self.is_full_mode_request(user_input.lower()): + await self.speak_single_tweet(3, self.fetched_tweets[2]) await self.worker.session_tasks.sleep(0.3) - await self.capability_worker.speak("That's all 5. Anything else?") + await self.capability_worker.speak("That is the top 3. Anything else?") + else: + await self.capability_worker.speak("No problem. Anything else you would like?") final = await self.wait_for_input(max_attempts=3, wait_seconds=2.0) if not final or self.is_exit_command(final.lower()): await self.capability_worker.speak("Take care!") else: - await self.capability_worker.speak("That's what's hot on X. Anything else?") + await self.capability_worker.speak("That is all I found. Anything else?") final = await self.wait_for_input(max_attempts=3, wait_seconds=2.0) if not final or self.is_exit_command(final.lower()): - await self.capability_worker.speak("Alright, catch you later!") + await self.capability_worker.speak("Catch you later!") self.capability_worker.resume_normal_flow() # ======================================================================== - # FULL MODE + # FULL MODE — all 3 shown upfront, then Q&A loop # ======================================================================== async def full_mode(self): - """Read all 5 summaries, then open interactive Q&A loop.""" + count = len(self.fetched_tweets) await self.capability_worker.speak( - f"Hey {self.user_name}, here's your full rundown of the top 5 trending topics on X:" + f"Here is the full rundown of the top {count} tweets on {self.selected_topic}:" ) await self.worker.session_tasks.sleep(0.5) - for i, topic in enumerate(self.trending_topics, 1): - await self.speak_single_trend(i, topic) + for i, tweet in enumerate(self.fetched_tweets, 1): + await self.speak_single_tweet(i, tweet) await self.worker.session_tasks.sleep(0.4) + if self.topic_summary: + await self.capability_worker.speak(f"Overall: {self.topic_summary}") + await self.worker.session_tasks.sleep(0.3) + await self.capability_worker.speak( - "Want to know more about any of these? Ask away, or say done when you're finished." + "Want to know more about any of these? Ask away, or say done when finished." ) - await self.interactive_loop() + # ======================================================================== + # INTERACTIVE Q&A LOOP + # ======================================================================== async def interactive_loop(self): - """Q&A loop with idle detection.""" idle_count = 0 while True: @@ -544,30 +444,30 @@ async def interactive_loop(self): idle_count += 1 if idle_count >= 2: await self.capability_worker.speak( - "I'm still here if you need anything. Otherwise I'll sign off." + "I am still here if you need anything. Otherwise I will sign off." ) await self.worker.session_tasks.sleep(3) break continue idle_count = 0 - user_input_lower = user_input.lower() + lower = user_input.lower() - if self.is_exit_command(user_input_lower): + if self.is_exit_command(lower): await self.generate_contextual_goodbye() break - if any(p in user_input_lower for p in ["again", "repeat", "read again"]): + if any(p in lower for p in ["again", "repeat", "read again"]): await self.capability_worker.speak("Sure, here they are again:") await self.worker.session_tasks.sleep(0.3) - for i, topic in enumerate(self.trending_topics, 1): - await self.speak_single_trend(i, topic) + for i, tweet in enumerate(self.fetched_tweets, 1): + await self.speak_single_tweet(i, tweet) await self.worker.session_tasks.sleep(0.3) await self.capability_worker.speak("Anything else?") continue - if any(w in user_input_lower for w in ["number", "topic", "tell me about", "more about"]): - await self.handle_topic_question(user_input_lower) + if any(w in lower for w in ["number", "tweet", "tell me about", "more about"]): + await self.handle_tweet_question(lower) continue await self.handle_general_question(user_input) @@ -575,61 +475,203 @@ async def interactive_loop(self): self.capability_worker.resume_normal_flow() # ======================================================================== - # HELPERS + # CAPTURE INITIAL TRIGGER # ======================================================================== - def is_exit_command(self, text: str) -> bool: - for word in EXIT_WORDS: - if re.search(r'\b' + re.escape(word) + r'\b', text): - return True - return False + async def capture_user_input(self): + try: + self.worker.editor_logging_handler.info("[XNews] Capturing trigger phrase...") - def is_more_request(self, text: str) -> bool: - return any(word in text for word in MORE_WORDS) + user_input = await self.capability_worker.wait_for_complete_transcription() + if user_input and user_input.strip(): + self.trigger_phrase = user_input.strip().lower() + return + + user_input = await self.capability_worker.user_response() + if user_input and user_input.strip(): + self.trigger_phrase = user_input.strip().lower() + return + + await self.worker.session_tasks.sleep(0.5) + history = self.worker.agent_memory.full_message_history + if history: + last_msg = history[-1] + try: + if isinstance(last_msg, dict): + if last_msg.get("role") == "user": + self.trigger_phrase = last_msg.get("content", "").lower() + else: + if hasattr(last_msg, "role") and last_msg.role == "user": + self.trigger_phrase = (last_msg.content or "").lower() + except Exception: + pass + + except Exception as e: + self.worker.editor_logging_handler.error(f"[XNews] capture_user_input: {e}") + self.trigger_phrase = "" + + # ======================================================================== + # MODE DETECTION + # ======================================================================== + def detect_mode_from_trigger(self): + if not self.trigger_phrase: + return "quick" + for phrase in FULL_MODE_TRIGGERS: + if phrase in self.trigger_phrase: + self.worker.editor_logging_handler.info(f"[XNews] Full mode via: '{phrase}'") + return "full" + return "quick" + + def is_full_mode_request(self, text): + return any(phrase in text for phrase in FULL_MODE_TRIGGERS) - async def speak_single_trend(self, number: int, topic: dict): - """Speak one trend. Reads the LLM summary; falls back to topic name only.""" - name = topic.get("name", "Unknown") - summary = topic.get("summary", "") - clean_name = re.sub(r'#', 'hashtag ', name) - msg = f"Number {number}: {clean_name}. {summary}" if summary else f"Number {number}: {clean_name}." - await self.capability_worker.speak(msg) - - async def handle_topic_question(self, user_input: str): - topic_number = None - for i in range(1, 6): + # ======================================================================== + # FILE PERSISTENCE + # ======================================================================== + async def load_user_preferences(self): + try: + if await self.capability_worker.check_if_file_exists(PREFERENCES_FILE, False): + raw = await self.capability_worker.read_file(PREFERENCES_FILE, False) + prefs = json.loads(raw) + self.user_name = prefs.get("name", "there") + self.first_visit = prefs.get("first_visit", False) + else: + self.first_visit = True + self.user_name = "there" + await self.save_user_preferences() + except Exception as e: + self.worker.editor_logging_handler.warning(f"[XNews] load_prefs: {e}") + self.first_visit = True + self.user_name = "there" + + async def save_user_preferences(self): + try: + prefs = {"name": self.user_name, "first_visit": self.first_visit} + await self.capability_worker.delete_file(PREFERENCES_FILE, False) + await self.capability_worker.write_file(PREFERENCES_FILE, json.dumps(prefs), False) + except Exception as e: + self.worker.editor_logging_handler.warning(f"[XNews] save_prefs: {e}") + + # ======================================================================== + # PATIENT INPUT HELPER + # ======================================================================== + async def wait_for_input(self, max_attempts=5, wait_seconds=3.0, context=""): + for attempt in range(max_attempts): + await self.worker.session_tasks.sleep(wait_seconds) + user_input = await self.capability_worker.user_response() + if user_input and user_input.strip(): + return user_input.strip() + self.worker.editor_logging_handler.info( + f"[XNews] Empty input {attempt + 1}/{max_attempts}" + ) + + if context == "initial": + await self.capability_worker.speak( + "I did not catch that. Say more to hear the rest, or I will sign off." + ) + await self.worker.session_tasks.sleep(2) + user_input = await self.capability_worker.user_response() + if user_input and user_input.strip(): + return user_input.strip() + + return "" + + # ======================================================================== + # PRESENTATION HELPERS + # ======================================================================== + def clean_tweet_text(self, text): + """ + Strip elements that are unnatural when read aloud: + - URLs (http/https links) + - Hashtags (#word) + - Mentions (@word) + - HTML entities (& < > " ') + - Excess whitespace left behind after stripping + The cleaned text is then passed to the LLM to produce a + single polished sentence before speaking. + """ + # Remove URLs + text = re.sub(r'https?://\S+', '', text) + # Remove hashtags + text = re.sub(r'#\S+', '', text) + # Remove mentions + text = re.sub(r'@\S+', '', text) + # Decode common HTML entities + text = text.replace('&', 'and') + text = text.replace('<', 'less than') + text = text.replace('>', 'greater than') + text = text.replace('"', '"') + text = text.replace(''', "'") + # Collapse extra spaces / newlines + text = re.sub(r'[\r\n]+', ' ', text) + text = re.sub(r' {2,}', ' ', text).strip() + return text + + def polish_tweet_for_speech(self, raw_text): + """ + Ask the LLM to rewrite the cleaned tweet as a single clean, + natural-sounding sentence — removing leftover symbols, emoji, + and any awkward phrasing introduced by stripping URLs/hashtags. + """ + try: + prompt = ( + "Rewrite the following tweet as a single clean, natural-sounding sentence " + "suitable for being read aloud. Remove any emoji, symbols, or awkward fragments. " + "Keep the core meaning. No markdown. No preamble. Just the sentence.\n\n" + f"Tweet: {raw_text}" + ) + result = self.capability_worker.text_to_text_response(prompt).strip() + return result if result else raw_text + except Exception as e: + self.worker.editor_logging_handler.warning(f"[XNews] polish_tweet failed: {e}") + return raw_text + + async def speak_single_tweet(self, number, tweet): + raw_text = tweet.get("text", "").strip() + score = tweet.get("score", 0) + if raw_text: + cleaned = self.clean_tweet_text(raw_text) + polished = self.polish_tweet_for_speech(cleaned) + await self.capability_worker.speak( + f"Tweet {number}: {polished}" + ) + else: + await self.capability_worker.speak(f"Tweet {number}: no content available.") + + # ======================================================================== + # Q&A HELPERS + # ======================================================================== + async def handle_tweet_question(self, user_input): + tweet_number = None + for i in range(1, 4): if str(i) in user_input or self.number_to_word(i) in user_input: - topic_number = i + tweet_number = i break - if topic_number and topic_number <= len(self.trending_topics): - topic = self.trending_topics[topic_number - 1] - name = topic.get("name", "Unknown") - existing_summary = topic.get("summary", "") - top_tweet = topic.get("top_tweet", "") - + if tweet_number and tweet_number <= len(self.fetched_tweets): + tweet = self.fetched_tweets[tweet_number - 1] + text = tweet.get("text", "") prompt = ( - f"Topic: '{name}' is trending on X.\n" - f"Top tweet: \"{top_tweet}\"\n" - f"Existing summary: {existing_summary}\n" - f"Give an additional 1-2 sentence conversational insight about why this matters. " + f"Topic: '{self.selected_topic}' is trending on X.\n" + f"Tweet: \"{text}\"\n" + f"Give an additional 1-2 sentence conversational insight about why this tweet matters. " f"Be concise. Under 40 words. No markdown." ) analysis = self.capability_worker.text_to_text_response(prompt) - await self.capability_worker.speak(f"More on {name}: {analysis}") + await self.capability_worker.speak(f"More on tweet {tweet_number}: {analysis}") await self.worker.session_tasks.sleep(0.3) await self.capability_worker.speak("What else would you like to know?") else: await self.capability_worker.speak( - "I didn't catch that number. Try saying a number between 1 and 5." + f"I did not catch that. Try saying a number between 1 and {len(self.fetched_tweets)}." ) - async def handle_general_question(self, user_input: str): - topics_context = "; ".join( - [f"{t['name']}: {t.get('summary', '')}" for t in self.trending_topics] - ) + async def handle_general_question(self, user_input): + tweets_context = " | ".join(t.get("text", "") for t in self.fetched_tweets) prompt = ( - f"You are a helpful X news assistant. Current trending topics and summaries: {topics_context}.\n" - f"User: {user_input}\n" + f"You are a helpful X news assistant. The user asked about '{self.selected_topic}'.\n" + f"Top tweets: {tweets_context}\n" + f"Summary: {self.topic_summary}\n" + f"User question: {user_input}\n" f"Reply in 2 sentences max. Conversational. No markdown." ) response = self.capability_worker.text_to_text_response(prompt) @@ -640,10 +682,22 @@ async def handle_general_question(self, user_input: str): async def generate_contextual_goodbye(self): prompt = ( "Generate a brief friendly goodbye under 10 words for a news briefing. " - "Casual. Examples: 'Catch you later!', 'Stay informed!', 'Take care!'\nOne only:" + "Casual. Examples: Catch you later, Stay informed, Take care. One only:" ) goodbye = self.capability_worker.text_to_text_response(prompt).strip() await self.capability_worker.speak(goodbye) - def number_to_word(self, num: int) -> str: + # ======================================================================== + # UTILITY + # ======================================================================== + def is_exit_command(self, text): + for word in EXIT_WORDS: + if re.search(r'\b' + re.escape(word) + r'\b', text): + return True + return False + + def is_more_request(self, text): + return any(word in text for word in MORE_WORDS) + + def number_to_word(self, num): return {1: "one", 2: "two", 3: "three", 4: "four", 5: "five"}.get(num, "") From 19aec1dfeda2ca6504110ad47e15f47656397e15 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 12 Mar 2026 00:37:22 +0000 Subject: [PATCH 13/17] style: auto-format Python files with autoflake + autopep8 --- community/x-news-feed/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/community/x-news-feed/main.py b/community/x-news-feed/main.py index 602a360a..ab237d22 100644 --- a/community/x-news-feed/main.py +++ b/community/x-news-feed/main.py @@ -627,7 +627,7 @@ def polish_tweet_for_speech(self, raw_text): async def speak_single_tweet(self, number, tweet): raw_text = tweet.get("text", "").strip() - score = tweet.get("score", 0) + tweet.get("score", 0) if raw_text: cleaned = self.clean_tweet_text(raw_text) polished = self.polish_tweet_for_speech(cleaned) From db73c63d01c8265777d7deee59fcc6e3f8690efa Mon Sep 17 00:00:00 2001 From: Uzair Ullah Date: Fri, 13 Mar 2026 15:27:18 +0500 Subject: [PATCH 14/17] Delete community/x-news-feed/config.json Signed-off-by: Uzair Ullah --- community/x-news-feed/config.json | 16 ---------------- 1 file changed, 16 deletions(-) delete mode 100644 community/x-news-feed/config.json diff --git a/community/x-news-feed/config.json b/community/x-news-feed/config.json deleted file mode 100644 index 243d934c..00000000 --- a/community/x-news-feed/config.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "unique_name": "x_news_feed_analysis", - "matching_hotwords": [ - "what's trending on x", - "twitter trends", - "x news", - "x trending topics", - "show me x trends", - "what is trending on x", - "latest from x", - "x trends", - "all trends", - "all five trends", - "top tweets" - ] -} \ No newline at end of file From 0e6b8d67933403ab9b34b2733fa6e43822147d9a Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sun, 15 Mar 2026 19:07:20 +0000 Subject: [PATCH 15/17] style: auto-format Python files with autoflake + autopep8 --- community/x-news-feed/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/community/x-news-feed/main.py b/community/x-news-feed/main.py index f88aef86..aebaf9e8 100644 --- a/community/x-news-feed/main.py +++ b/community/x-news-feed/main.py @@ -157,7 +157,7 @@ class XNewsFeedCapability(MatchingCapability): first_visit: bool = True trigger_phrase: str = "" - #{{register_capability}} + # {{register_capability}} def call(self, worker: AgentWorker): self.worker = worker From eeaaa8169fae8f0ffc1a3757bebdd4ef261f1897 Mon Sep 17 00:00:00 2001 From: ali Date: Tue, 31 Mar 2026 04:10:56 +0500 Subject: [PATCH 16/17] resolve the github bot mentioned issue --- community/x-news-feed/main.py | 309 +++++++++++++++++++++++++++------- 1 file changed, 246 insertions(+), 63 deletions(-) diff --git a/community/x-news-feed/main.py b/community/x-news-feed/main.py index aebaf9e8..ab8f30fe 100644 --- a/community/x-news-feed/main.py +++ b/community/x-news-feed/main.py @@ -23,26 +23,66 @@ "Global Markets", ] +# ============================================================================ +# TOPIC ALIASES — spoken variants that map to each TOPIC_SEED +# Issue 3 fix: natural spoken aliases per topic so user speech reliably matches +# ============================================================================ +TOPIC_ALIASES = { + "Artificial Intelligence": [ + "artificial intelligence", "ai", "machine learning", "ml", + "llms", "llm", "chatgpt", "gpt", "deep learning", "neural", + ], + "Crypto": [ + "crypto", "cryptocurrency", "bitcoin", "btc", "ethereum", "eth", + "blockchain", "defi", "nft", "web3", "coin", "token", + ], + "Climate": [ + "climate", "climate change", "environment", "environmental", + "global warming", "weather", "carbon", "emissions", "green energy", + ], + "Tech Innovation": [ + "tech", "tech innovation", "technology", "gadgets", "startups", + "startup", "innovation", "hardware", "software", + ], + "Global Markets": [ + "global markets", "stocks", "stock market", "wall street", "finance", + "markets", "investing", "economy", "trading", "equities", + ], +} + # ============================================================================ # CONSTANTS # ============================================================================ + +# Issue 5 fix: expanded EXIT_WORDS to cover common spoken closings EXIT_WORDS = [ "exit", "stop", "quit", "done", "bye", "goodbye", "cancel", "nothing else", "all good", "nope", "no thanks", "i'm good", "that's all", "never mind", "leave", "that is all", + "i'm done", "i'm all set", "that'll do it", "we're good", + "no more", "i'm finished", "enough", "wrap it up", + "i think that's it", "i'm out", +] + +# Issue 6 fix: expanded MORE_WORDS to cover natural spoken affirmatives +MORE_WORDS = [ + "more", "rest", "continue", "yes", "yeah", "sure", + "go ahead", "keep going", "read more", "next", "and", + "yep", "yup", "absolutely", "totally", "of course", + "let's hear it", "hit me", "bring it", "do it", + "go on", "please", "uh huh", ] +# Issue 7 fix: expanded FULL_MODE_TRIGGERS to cover natural "give me everything" phrasing FULL_MODE_TRIGGERS = [ "catch me up", "all trends", "full briefing", "everything", "run through", "brief me", "all of them", "the full list", "full list", "all five", "read all", "read them all", "dive in", "deep dive", "explore", "tell me everything", "all tweets", "all three", "show all", -] - -MORE_WORDS = [ - "more", "rest", "continue", "yes", "yeah", "sure", - "go ahead", "keep going", "read more", "next", "and", + "give me everything", "lay it all on me", "the whole thing", + "all of it", "hit me with everything", "don't hold back", + "the whole rundown", "just go for it", ] FILLER_INTRO_TEMPLATES = [ @@ -53,6 +93,13 @@ "Looking up the best tweets on {topic} for you.", ] +# Issue 9 fix: shared voice guardrail appended to every LLM prompt that feeds speak() +VOICE_GUARDRAIL = ( + "Plain spoken English only. No lists, no bullet points, no numbers used as list markers, " + "no colons used as headers, no emoji, no markdown. " + "Write as if speaking naturally to someone in the room." +) + # ============================================================================ # Demo tweet data — 3 representative tweets per TOPIC_SEED # ============================================================================ @@ -157,7 +204,8 @@ class XNewsFeedCapability(MatchingCapability): first_visit: bool = True trigger_phrase: str = "" - # {{register_capability}} + # Do not change following tag of register capability + # {{register capability}} def call(self, worker: AgentWorker): self.worker = worker @@ -175,9 +223,9 @@ async def main_flow(self): self.worker.editor_logging_handler.info(f"[XNews] Mode: {self.mode}") if self.first_visit: + # Issue 14 fix: shortened welcome message to ~14 words, combined with topic ask await self.capability_worker.speak( - f"Hey {self.user_name}, welcome to X News! " - "I will help you catch up on the latest tweets for any topic you care about." + f"Hey {self.user_name}, welcome to X News. What topic are you curious about?" ) self.first_visit = False await self.save_user_preferences() @@ -231,8 +279,9 @@ def _is_demo_mode(self): # TOPIC SELECTION — DEMO MODE # ======================================================================== async def ask_user_to_pick_topic(self): + # Issue 8 fix: use "number 1" format instead of "1." so TTS reads cleanly topics_spoken = ", ".join( - f"{i}. {name}" for i, name in enumerate(TOPIC_SEEDS, 1) + f"number {i}, {name}" for i, name in enumerate(TOPIC_SEEDS, 1) ) await self.capability_worker.speak( f"Here are the available topics: {topics_spoken}. " @@ -250,25 +299,52 @@ async def ask_user_to_pick_topic(self): return "" if self.is_exit_command(user_input.lower()): return "" - matched = self._match_topic(user_input) + matched = await self._match_topic_with_llm(user_input) if matched: self.worker.editor_logging_handler.info(f"[XNews] Topic picked: {matched}") return matched + # Issue 10 fix: "recognize" (US spelling) await self.capability_worker.speak( - "I did not recognise that. Try a number from 1 to 5, " + "I did not recognize that. Try a number from 1 to 5, " "or a name like Crypto or Climate." ) return "" - def _match_topic(self, user_input): + async def _match_topic_with_llm(self, user_input): + """ + Issue 3 fix: two-stage matching. + Stage 1 — fast alias lookup (no LLM cost). + Stage 2 — LLM fallback for phrasing not in the alias map. + """ text = user_input.strip().lower() + + # Stage 1: number match for i, name in enumerate(TOPIC_SEEDS, 1): if str(i) in text or self.number_to_word(i) in text: return name - for name in TOPIC_SEEDS: - if any(word in text for word in name.lower().split()): - return name + + # Stage 2: alias map lookup + for topic_name, aliases in TOPIC_ALIASES.items(): + if any(alias in text for alias in aliases): + return topic_name + + # Stage 3: LLM fallback for anything not in alias map + try: + topic_list = ", ".join(TOPIC_SEEDS) + prompt = ( + f"Which of these topics is the user asking about: {topic_list}?\n" + f"User said: \"{user_input}\"\n" + f"Answer with the exact topic name from the list, or the word none if no match.\n" + f"No explanation. Just the topic name or the word none." + ) + result = self.capability_worker.text_to_text_response(prompt).strip() + for name in TOPIC_SEEDS: + if name.lower() in result.lower(): + return name + except Exception as e: + self.worker.editor_logging_handler.warning(f"[XNews] LLM topic match failed: {e}") + return "" # ======================================================================== @@ -350,10 +426,11 @@ def _summarise_with_llm(self, topic, tweets): tweet_block = "\n".join( f"{i + 1}. {t['text']}" for i, t in enumerate(tweets) ) + # Issue 9 + 13 fix: added VOICE_GUARDRAIL and a hard word count prompt = ( f"You are a news analyst. Below are the top tweets on '{topic}'.\n" - f"Write a short 1-2 sentence conversational summary capturing the key theme.\n" - f"No markdown. No preamble. Just the summary.\n\nTweets:\n{tweet_block}" + f"Write a 1-sentence spoken summary, under 20 words, capturing the key theme.\n" + f"{VOICE_GUARDRAIL}\n\nTweets:\n{tweet_block}" ) return self.capability_worker.text_to_text_response(prompt).strip() except Exception as e: @@ -361,7 +438,9 @@ def _summarise_with_llm(self, topic, tweets): return "" # ======================================================================== - # QUICK MODE — top 2 shown, offer the 3rd + # QUICK MODE + # Issue 16 fix: collapse the two back-to-back dead-end prompts into one + # open-ended prompt routed by LLM intent classifier. # ======================================================================== async def quick_mode(self): count = len(self.fetched_tweets) @@ -377,35 +456,73 @@ async def quick_mode(self): await self.worker.session_tasks.sleep(0.3) if count >= 3: + # Single combined prompt — one wait, LLM decides the branch await self.capability_worker.speak( - "There is one more tweet. Want to hear it, or are you all set?" - ) - user_input = await self.wait_for_input( - max_attempts=5, wait_seconds=3.0, context="initial" + "That is the top two. Want the third, a deeper dive on any of them, or are we done?" ) + user_input = await self.wait_for_input(max_attempts=5, wait_seconds=3.0) if not user_input or self.is_exit_command(user_input.lower()): await self.generate_contextual_goodbye() self.capability_worker.resume_normal_flow() return - if self.is_more_request(user_input.lower()) or self.is_full_mode_request(user_input.lower()): + intent = self._classify_quick_mode_intent(user_input) + self.worker.editor_logging_handler.info(f"[XNews] quick_mode intent: {intent}") + + if intent == "hear_more": await self.speak_single_tweet(3, self.fetched_tweets[2]) await self.worker.session_tasks.sleep(0.3) - await self.capability_worker.speak("That is the top 3. Anything else?") - else: - await self.capability_worker.speak("No problem. Anything else you would like?") - - final = await self.wait_for_input(max_attempts=3, wait_seconds=2.0) - if not final or self.is_exit_command(final.lower()): - await self.capability_worker.speak("Take care!") + elif intent == "deep_dive": + await self.handle_tweet_question(user_input.lower()) + elif intent == "exit": + await self.generate_contextual_goodbye() + self.capability_worker.resume_normal_flow() + return + # "other" falls through to the interactive loop below else: await self.capability_worker.speak("That is all I found. Anything else?") - final = await self.wait_for_input(max_attempts=3, wait_seconds=2.0) - if not final or self.is_exit_command(final.lower()): + user_input = await self.wait_for_input(max_attempts=3, wait_seconds=2.0) + if not user_input or self.is_exit_command(user_input.lower()): await self.capability_worker.speak("Catch you later!") + self.capability_worker.resume_normal_flow() + return - self.capability_worker.resume_normal_flow() + # Open interactive loop for follow-up questions + await self.interactive_loop() + + def _classify_quick_mode_intent(self, user_input): + """ + Issue 16 fix: LLM-based intent classifier for the post-delivery prompt. + Returns one of: hear_more | deep_dive | exit | other + """ + try: + prompt = ( + f"Classify the user's reply into exactly one of these intents:\n" + f"hear_more — they want to hear the next tweet\n" + f"deep_dive — they want more detail on a specific tweet\n" + f"exit — they are done and want to leave\n" + f"other — something else entirely\n\n" + f"User said: \"{user_input}\"\n" + f"Answer with exactly one word from the list above. No explanation." + ) + result = self.capability_worker.text_to_text_response(prompt).strip().lower() + if result in ("hear_more", "deep_dive", "exit", "other"): + return result + except Exception as e: + self.worker.editor_logging_handler.warning(f"[XNews] intent classify failed: {e}") + + # Fallback to keyword checks + lower = user_input.lower() + if self.is_exit_command(lower): + return "exit" + if self.is_more_request(lower) or self.is_full_mode_request(lower): + return "hear_more" + if any(w in lower for w in ["number", "tweet", "tell me about", "more about", + "dig into", "expand", "break that down", "deeper", + "elaborate", "what about", "let's talk about"]): + return "deep_dive" + return "other" # ======================================================================== # FULL MODE — all 3 shown upfront, then Q&A loop @@ -442,8 +559,9 @@ async def interactive_loop(self): if not user_input: idle_count += 1 if idle_count >= 2: + # Issue 15 fix: replaced broadcast "sign off" with natural home-device phrasing await self.capability_worker.speak( - "I am still here if you need anything. Otherwise I will sign off." + "Still here if you need me, otherwise I'll wrap up." ) await self.worker.session_tasks.sleep(3) break @@ -456,7 +574,8 @@ async def interactive_loop(self): await self.generate_contextual_goodbye() break - if any(p in lower for p in ["again", "repeat", "read again"]): + # Issue 1 fix: LLM-based repeat detection instead of brittle keyword list + if await self._user_wants_repeat(user_input): await self.capability_worker.speak("Sure, here they are again:") await self.worker.session_tasks.sleep(0.3) for i, tweet in enumerate(self.fetched_tweets, 1): @@ -465,16 +584,77 @@ async def interactive_loop(self): await self.capability_worker.speak("Anything else?") continue - if any(w in lower for w in ["number", "tweet", "tell me about", "more about"]): - await self.handle_tweet_question(lower) + # Issue 2 fix: LLM-based deep-dive detection instead of brittle keyword list + tweet_number = await self._extract_tweet_number_for_deepdive(user_input) + if tweet_number is not None: + await self.handle_tweet_question_by_number(tweet_number) continue await self.handle_general_question(user_input) self.capability_worker.resume_normal_flow() + # ======================================================================== + # LLM INTENT HELPERS + # Issue 1 fix: LLM classifier for repeat/replay detection + # Issue 2 fix: LLM classifier for tweet deep-dive detection + # ======================================================================== + async def _user_wants_repeat(self, user_input): + """Returns True if the user wants the tweets repeated.""" + try: + prompt = ( + f"Does the user want the tweets to be repeated or read again?\n" + f"User said: \"{user_input}\"\n" + f"Answer with exactly yes or no. No explanation." + ) + result = self.capability_worker.text_to_text_response(prompt).strip().lower() + return result.startswith("yes") + except Exception as e: + self.worker.editor_logging_handler.warning(f"[XNews] repeat detect failed: {e}") + # Fallback keyword check + return any(p in user_input.lower() for p in ["again", "repeat", "read again", + "say that again", "one more time", + "go back", "run through those", + "from the top", "play that back", + "reread", "didn't catch"]) + + async def _extract_tweet_number_for_deepdive(self, user_input): + """ + Returns the tweet number (1, 2, or 3) the user wants to deep-dive into, + or None if they are not asking for a deep dive. + Issue 2 fix: LLM-based detection replaces brittle keyword list. + """ + try: + count = len(self.fetched_tweets) + prompt = ( + f"Is the user asking for more detail on a specific tweet?\n" + f"There are {count} tweets numbered 1 to {count}.\n" + f"User said: \"{user_input}\"\n" + f"If yes, reply with just the number (1, 2, or 3). " + f"If no, reply with the word no. No explanation." + ) + result = self.capability_worker.text_to_text_response(prompt).strip().lower() + for i in range(1, count + 1): + if str(i) in result or self.number_to_word(i) in result: + return i + return None + except Exception as e: + self.worker.editor_logging_handler.warning(f"[XNews] deep-dive detect failed: {e}") + # Fallback keyword check + lower = user_input.lower() + if any(w in lower for w in ["number", "tweet", "tell me about", "more about", + "dig into", "expand", "deeper", "elaborate", + "what about", "break that down", "let's talk about", + "that last one", "that third", "the second"]): + for i in range(1, len(self.fetched_tweets) + 1): + if str(i) in lower or self.number_to_word(i) in lower: + return i + return None + # ======================================================================== # CAPTURE INITIAL TRIGGER + # Reviewer fix: replaced deprecated self.worker.agent_memory.full_message_history + # with self.capability_worker.get_full_message_history() # ======================================================================== async def capture_user_input(self): try: @@ -491,7 +671,8 @@ async def capture_user_input(self): return await self.worker.session_tasks.sleep(0.5) - history = self.worker.agent_memory.full_message_history + # Reviewer fix: use the approved API instead of the deprecated agent_memory attribute + history = self.capability_worker.get_full_message_history() if history: last_msg = history[-1] try: @@ -585,22 +766,15 @@ def clean_tweet_text(self, text): - Mentions (@word) - HTML entities (& < > " ') - Excess whitespace left behind after stripping - The cleaned text is then passed to the LLM to produce a - single polished sentence before speaking. """ - # Remove URLs text = re.sub(r'https?://\S+', '', text) - # Remove hashtags text = re.sub(r'#\S+', '', text) - # Remove mentions text = re.sub(r'@\S+', '', text) - # Decode common HTML entities text = text.replace('&', 'and') text = text.replace('<', 'less than') text = text.replace('>', 'greater than') text = text.replace('"', '"') text = text.replace(''', "'") - # Collapse extra spaces / newlines text = re.sub(r'[\r\n]+', ' ', text) text = re.sub(r' {2,}', ' ', text).strip() return text @@ -608,14 +782,15 @@ def clean_tweet_text(self, text): def polish_tweet_for_speech(self, raw_text): """ Ask the LLM to rewrite the cleaned tweet as a single clean, - natural-sounding sentence — removing leftover symbols, emoji, - and any awkward phrasing introduced by stripping URLs/hashtags. + natural-sounding sentence. + Issue 9 fix: added VOICE_GUARDRAIL to this prompt. """ try: prompt = ( "Rewrite the following tweet as a single clean, natural-sounding sentence " "suitable for being read aloud. Remove any emoji, symbols, or awkward fragments. " - "Keep the core meaning. No markdown. No preamble. Just the sentence.\n\n" + "Keep the core meaning. " + f"{VOICE_GUARDRAIL}\n\n" f"Tweet: {raw_text}" ) result = self.capability_worker.text_to_text_response(prompt).strip() @@ -626,13 +801,10 @@ def polish_tweet_for_speech(self, raw_text): async def speak_single_tweet(self, number, tweet): raw_text = tweet.get("text", "").strip() - tweet.get("score", 0) if raw_text: cleaned = self.clean_tweet_text(raw_text) polished = self.polish_tweet_for_speech(cleaned) - await self.capability_worker.speak( - f"Tweet {number}: {polished}" - ) + await self.capability_worker.speak(f"Tweet {number}: {polished}") else: await self.capability_worker.speak(f"Tweet {number}: no content available.") @@ -640,20 +812,27 @@ async def speak_single_tweet(self, number, tweet): # Q&A HELPERS # ======================================================================== async def handle_tweet_question(self, user_input): - tweet_number = None - for i in range(1, 4): - if str(i) in user_input or self.number_to_word(i) in user_input: - tweet_number = i - break + """Entry point when we already have a raw user utterance and need to resolve the number.""" + tweet_number = await self._extract_tweet_number_for_deepdive(user_input) + if tweet_number is not None: + await self.handle_tweet_question_by_number(tweet_number) + else: + await self.capability_worker.speak( + f"I did not catch that. Try saying a number between 1 and {len(self.fetched_tweets)}." + ) + async def handle_tweet_question_by_number(self, tweet_number): + """Deliver the deep-dive analysis for a specific tweet number.""" if tweet_number and tweet_number <= len(self.fetched_tweets): tweet = self.fetched_tweets[tweet_number - 1] text = tweet.get("text", "") + # Issue 9 + 11 fix: added VOICE_GUARDRAIL and reduced word ceiling to 20 prompt = ( f"Topic: '{self.selected_topic}' is trending on X.\n" f"Tweet: \"{text}\"\n" - f"Give an additional 1-2 sentence conversational insight about why this tweet matters. " - f"Be concise. Under 40 words. No markdown." + f"Give a 1-sentence conversational insight about why this tweet matters, " + f"under 20 words. " + f"{VOICE_GUARDRAIL}" ) analysis = self.capability_worker.text_to_text_response(prompt) await self.capability_worker.speak(f"More on tweet {tweet_number}: {analysis}") @@ -666,12 +845,14 @@ async def handle_tweet_question(self, user_input): async def handle_general_question(self, user_input): tweets_context = " | ".join(t.get("text", "") for t in self.fetched_tweets) + # Issue 9 + 12 fix: added VOICE_GUARDRAIL and a hard word count of 25 prompt = ( f"You are a helpful X news assistant. The user asked about '{self.selected_topic}'.\n" f"Top tweets: {tweets_context}\n" f"Summary: {self.topic_summary}\n" f"User question: {user_input}\n" - f"Reply in 2 sentences max. Conversational. No markdown." + f"Reply in 1-2 sentences, under 25 words total. " + f"{VOICE_GUARDRAIL}" ) response = self.capability_worker.text_to_text_response(prompt) await self.capability_worker.speak(response) @@ -679,9 +860,11 @@ async def handle_general_question(self, user_input): await self.capability_worker.speak("Anything else?") async def generate_contextual_goodbye(self): + # Issue 4 fix: replaced broadcast-style examples with casual spoken closings prompt = ( - "Generate a brief friendly goodbye under 10 words for a news briefing. " - "Casual. Examples: Catch you later, Stay informed, Take care. One only:" + "Casual spoken goodbye under 6 words. " + "Examples: Later! Have a good one! Talk soon! Take it easy! " + "One only, no punctuation that sounds unnatural read aloud:" ) goodbye = self.capability_worker.text_to_text_response(prompt).strip() await self.capability_worker.speak(goodbye) @@ -699,4 +882,4 @@ def is_more_request(self, text): return any(word in text for word in MORE_WORDS) def number_to_word(self, num): - return {1: "one", 2: "two", 3: "three", 4: "four", 5: "five"}.get(num, "") + return {1: "one", 2: "two", 3: "three", 4: "four", 5: "five"}.get(num, "") \ No newline at end of file From 8ea4fb5213fa7fc1206d828d05a53ed919727892 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Mon, 30 Mar 2026 23:12:12 +0000 Subject: [PATCH 17/17] style: auto-format Python files with autoflake + autopep8 --- community/x-news-feed/main.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/community/x-news-feed/main.py b/community/x-news-feed/main.py index ab8f30fe..22cf373f 100644 --- a/community/x-news-feed/main.py +++ b/community/x-news-feed/main.py @@ -519,8 +519,8 @@ def _classify_quick_mode_intent(self, user_input): if self.is_more_request(lower) or self.is_full_mode_request(lower): return "hear_more" if any(w in lower for w in ["number", "tweet", "tell me about", "more about", - "dig into", "expand", "break that down", "deeper", - "elaborate", "what about", "let's talk about"]): + "dig into", "expand", "break that down", "deeper", + "elaborate", "what about", "let's talk about"]): return "deep_dive" return "other" @@ -613,10 +613,10 @@ async def _user_wants_repeat(self, user_input): self.worker.editor_logging_handler.warning(f"[XNews] repeat detect failed: {e}") # Fallback keyword check return any(p in user_input.lower() for p in ["again", "repeat", "read again", - "say that again", "one more time", - "go back", "run through those", - "from the top", "play that back", - "reread", "didn't catch"]) + "say that again", "one more time", + "go back", "run through those", + "from the top", "play that back", + "reread", "didn't catch"]) async def _extract_tweet_number_for_deepdive(self, user_input): """ @@ -643,9 +643,9 @@ async def _extract_tweet_number_for_deepdive(self, user_input): # Fallback keyword check lower = user_input.lower() if any(w in lower for w in ["number", "tweet", "tell me about", "more about", - "dig into", "expand", "deeper", "elaborate", - "what about", "break that down", "let's talk about", - "that last one", "that third", "the second"]): + "dig into", "expand", "deeper", "elaborate", + "what about", "break that down", "let's talk about", + "that last one", "that third", "the second"]): for i in range(1, len(self.fetched_tweets) + 1): if str(i) in lower or self.number_to_word(i) in lower: return i @@ -882,4 +882,4 @@ def is_more_request(self, text): return any(word in text for word in MORE_WORDS) def number_to_word(self, num): - return {1: "one", 2: "two", 3: "three", 4: "four", 5: "five"}.get(num, "") \ No newline at end of file + return {1: "one", 2: "two", 3: "three", 4: "four", 5: "five"}.get(num, "")