From fc598487c2c9bfad5035c328704e6ad105b602cc Mon Sep 17 00:00:00 2001 From: Claude Date: Sun, 18 Jan 2026 04:58:21 +0000 Subject: [PATCH 1/3] Fix AI command detection by narrowing on/off check The "ai on/off" special handling was intercepting all 2-word messages starting with "ai", blocking legitimate AI commands like "ai test" or "ai hello". Changed the condition to only trigger when the second word is exactly "on" or "off", allowing all other AI commands to proceed to normal command handling. Fixes: Users can now send AI commands with single-word queries --- main.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/main.py b/main.py index f01e54c..2a369a4 100644 --- a/main.py +++ b/main.py @@ -757,14 +757,13 @@ def parse_incoming_text(text, sender_id, is_direct, channel_idx): parts = text_lower.split() first_word = parts[0] if parts else "" - if first_word == "ai" and len(parts) == 2: + if first_word == "ai" and len(parts) == 2 and parts[1] in ("on", "off"): if parts[1] == "on": active_ai_channels[channel_idx] = now return "🤖 AI enabled for this channel." if parts[1] == "off": active_ai_channels.pop(channel_idx, None) return "🤖 AI disabled for this channel." - return "Usage: ai on | ai off" # ---------------------------- # 2. Command matching (word-based) From 567f458842b8c001f19c2bbf35fa7bbce8fa6383 Mon Sep 17 00:00:00 2001 From: Claude Date: Sun, 18 Jan 2026 05:00:54 +0000 Subject: [PATCH 2/3] Configure Ollama as the AI provider The ai_provider was set to a placeholder value "lmstudio, openai, or ollama" which caused get_ai_response() to return None for all AI commands. Set ai_provider to "ollama" to enable AI command responses. Fixes: AI commands now route to Ollama for processing --- config/config.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/config.json b/config/config.json index 525b1dc..6a3050c 100644 --- a/config/config.json +++ b/config/config.json @@ -7,7 +7,7 @@ "timezone": "America/New_York", "serial_port": "", - "ai_provider": "lmstudio, openai, or ollama", + "ai_provider": "ollama", "system_prompt": "You are a helpful assistant responding to mesh network chats. Respond in as few words as possible while still answering fully.", "lmstudio_url": "http://localhost:1234/v1/chat/completions", From ff9a052f731218818305f197ba4c9bfdf9430e2e Mon Sep 17 00:00:00 2001 From: Claude Date: Sun, 18 Jan 2026 05:03:38 +0000 Subject: [PATCH 3/3] Add diagnostic logging for AI command debugging Added detailed logging to help diagnose AI command issues: - Print loaded commands at startup - Print configured AI provider - Log when config commands are matched - Log AI prompt template and final prompt - Log AI response before returning This will help identify where the command flow breaks when AI commands don't respond as expected. --- main.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/main.py b/main.py index 2a369a4..35b2d97 100644 --- a/main.py +++ b/main.py @@ -151,6 +151,8 @@ def save_config(cfg): print(f"Timezone set to: {timezone_str}") add_script_log(f"Timezone set to: {timezone_str}") commands_config = safe_load_json(COMMANDS_CONFIG_FILE, {"commands": []}) +cmd_list = [c.get("command", "") for c in commands_config.get("commands", [])] +print(f"Loaded {len(cmd_list)} commands from config: {', '.join(cmd_list)}") def reload_config(): global config @@ -194,6 +196,7 @@ def info_print(*args, **kwargs): # AI Provider & Other Config Vars # ----------------------------- AI_PROVIDER = config.get("ai_provider", "lmstudio").lower() +print(f"AI Provider configured: {AI_PROVIDER}") SYSTEM_PROMPT = config.get("system_prompt", "You are a helpful assistant responding to mesh network chats.") LMSTUDIO_URL = config.get("lmstudio_url", "http://localhost:1234/v1/chat/completions") LMSTUDIO_TIMEOUT = config.get("lmstudio_timeout", 60) @@ -781,8 +784,10 @@ def parse_incoming_text(text, sender_id, is_direct, channel_idx): cmd_text = c.get("command", "").lower() if cmd_text and cmd_text == first_word: + dprint(f"Matched config command: '{cmd_text}'") if "ai_prompt" in c: prompt = c["ai_prompt"].replace("{user_input}", user_input) + dprint(f"AI prompt template: {c['ai_prompt']}, final prompt: '{prompt}'") if AI_PROVIDER == "home_assistant" and HOME_ASSISTANT_ENABLE_PIN: if not pin_is_valid(user_input): @@ -793,7 +798,9 @@ def parse_incoming_text(text, sender_id, is_direct, channel_idx): if not is_direct: active_ai_channels[channel_idx] = now - return get_ai_response(prompt) or "🤖 [No AI response]" + ai_response = get_ai_response(prompt) + dprint(f"AI response: {ai_response}") + return ai_response or "🤖 [No AI response]" if "response" in c: return c["response"].replace("{user_input}", user_input)