Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
109 changes: 106 additions & 3 deletions buddi/Buddi/Resources/buddi-hook.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,13 @@
Buddi Hook
Comment thread
cubic-dev-ai[bot] marked this conversation as resolved.
- Sends session state to Buddi.app via Unix socket
- For PermissionRequest: waits for user decision from the app
- Tracks session rhythm for buddy dialogue flavoring
"""
import json
import os
import socket
import sys
import time

SOCKET_PATH = "/tmp/buddi.sock"
TIMEOUT_SECONDS = 300 # 5 minutes for permission decisions
Expand Down Expand Up @@ -80,6 +82,100 @@ def get_cmux_surface():
return None, None


SESSION_STATS_PATH = os.path.expanduser("~/.buddi-session-stats.json")
SESSION_STATS_LOCK_PATH = SESSION_STATS_PATH + ".lock"


def load_session_stats():
try:
with open(SESSION_STATS_PATH) as f:
return json.load(f)
except (OSError, json.JSONDecodeError):
return {}


def save_session_stats(stats):
try:
import tempfile
dir_ = os.path.dirname(SESSION_STATS_PATH)
with tempfile.NamedTemporaryFile("w", dir=dir_, delete=False, suffix=".tmp") as tmp:
json.dump(stats, tmp)
tmp_path = tmp.name
os.replace(tmp_path, SESSION_STATS_PATH)
except OSError:
pass


def _update_session_stats_unlocked(session_id, event, tool_name=None, denied=False):
stats = load_session_stats()
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P2: Session stats file can grow without bound, causing increasing full-file read/write cost on every tracked event.

Prompt for AI agents
Check if this issue is valid — if so, understand the root cause and fix it. At buddi/Buddi/Resources/buddi-hook.py, line 110:

<comment>Session stats file can grow without bound, causing increasing full-file read/write cost on every tracked event.</comment>

<file context>
@@ -80,6 +82,100 @@ def get_cmux_surface():
+
+
+def _update_session_stats_unlocked(session_id, event, tool_name=None, denied=False):
+    stats = load_session_stats()
+    s = stats.setdefault(session_id, {
+        "tool_counts": {},
</file context>

s = stats.setdefault(session_id, {
"tool_counts": {},
"denial_count": 0,
"prompt_count": 0,
"session_start": time.time(),
"last_event_time": time.time(),
})

s["last_event_time"] = time.time()

if event == "PreToolUse" and tool_name:
s["tool_counts"][tool_name] = s["tool_counts"].get(tool_name, 0) + 1
elif event == "UserPromptSubmit":
s["prompt_count"] = s.get("prompt_count", 0) + 1
elif denied:
s["denial_count"] = s.get("denial_count", 0) + 1

save_session_stats(stats)
return s


def update_session_stats_atomic(session_id, event, tool_name=None, denied=False):
"""File-locked variant — safe under concurrent hook invocations."""
import fcntl
try:
lock = open(SESSION_STATS_LOCK_PATH, "w")
fcntl.flock(lock, fcntl.LOCK_EX)
try:
return _update_session_stats_unlocked(session_id, event, tool_name=tool_name, denied=denied)
finally:
fcntl.flock(lock, fcntl.LOCK_UN)
lock.close()
except OSError:
# Lock file unavailable — fall back to unlocked update rather than dropping the event
return _update_session_stats_unlocked(session_id, event, tool_name=tool_name, denied=denied)


def compute_dialogue_flavor(stats):
"""
Returns a dialogue flavor string based on session rhythm.
This is purely additive — it never affects buddy identity.
The Swift side uses this to vary what the buddy says, not what it looks like.
"""
tool_counts = stats.get("tool_counts", {})
denial_count = stats.get("denial_count", 0)
prompt_count = max(stats.get("prompt_count", 1), 1)
total_tools = sum(tool_counts.values())

chaos_rate = denial_count / prompt_count

shell_tools = {"Bash", "computer"}
explore_tools = {"Read", "Grep", "LS", "Glob"}

shell_uses = sum(tool_counts.get(t, 0) for t in shell_tools)
explore_uses = sum(tool_counts.get(t, 0) for t in explore_tools)

if chaos_rate > 0.4:
return "chaotic"
elif total_tools > 0 and shell_uses / max(total_tools, 1) > 0.5:
return "runner"
elif total_tools > 0 and explore_uses / max(total_tools, 1) > 0.5:
return "explorer"
elif total_tools > 20 and chaos_rate < 0.1:
return "methodical"
else:
return "neutral"


def send_event(state):
"""Send event to app, return response if any"""
try:
Expand Down Expand Up @@ -134,13 +230,17 @@ def main():

# Map events to status
if event == "UserPromptSubmit":
# User just sent a message - Claude is now processing
session_stats = update_session_stats_atomic(session_id, event)
state["status"] = "processing"
state["dialogue_flavor"] = compute_dialogue_flavor(session_stats)

elif event == "PreToolUse":
tool_name = data.get("tool_name")
session_stats = update_session_stats_atomic(session_id, event, tool_name=tool_name)
state["status"] = "running_tool"
state["tool"] = data.get("tool_name")
state["tool"] = tool_name
state["tool_input"] = tool_input
state["dialogue_flavor"] = compute_dialogue_flavor(session_stats)
# Send tool_use_id to Swift for caching
tool_use_id_from_event = data.get("tool_use_id")
if tool_use_id_from_event:
Expand All @@ -158,8 +258,10 @@ def main():
elif event == "PermissionRequest":
# This is where we can control the permission
state["status"] = "waiting_for_approval"
state["tool"] = data.get("tool_name")
tool_name = data.get("tool_name")
state["tool"] = tool_name
state["tool_input"] = tool_input
# Count denials for chaos tracking — updated after response below
# tool_use_id lookup handled by Swift-side cache from PreToolUse

# Send to app and wait for decision
Expand All @@ -181,6 +283,7 @@ def main():
sys.exit(0)

elif decision == "deny":
update_session_stats_atomic(session_id, event, denied=True)
# Output JSON to deny
output = {
"hookSpecificOutput": {
Expand Down
29 changes: 25 additions & 4 deletions buddi/Buddi/Services/Shared/UsageService.swift
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,14 @@ final class UsageService: ObservableObject {
private init() {}

func startPolling() {
guard pollTimer == nil else { return }
loadCache()
// If already polling, only kick off a fresh fetch if the last one is stale (>5 min old)
guard pollTimer == nil else {
if let cached = loadCachedUsage(), Date().timeIntervalSince(cached.fetchedAt) > baseInterval {
poll()
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P2: startPolling() can trigger poll() while polling is already active, allowing overlapping poll tasks that race polling state updates.

Prompt for AI agents
Check if this issue is valid — if so, understand the root cause and fix it. At buddi/Buddi/Services/Shared/UsageService.swift, line 50:

<comment>`startPolling()` can trigger `poll()` while polling is already active, allowing overlapping poll tasks that race polling state updates.</comment>

<file context>
@@ -43,8 +43,14 @@ final class UsageService: ObservableObject {
+        // If already polling, only kick off a fresh fetch if the last one is stale (>5 min old)
+        guard pollTimer == nil else {
+            if let cached = loadCachedUsage(), Date().timeIntervalSince(cached.fetchedAt) > baseInterval {
+                poll()
+            }
+            return
</file context>

}
return
}
poll()
}

Expand All @@ -60,7 +66,8 @@ final class UsageService: ObservableObject {

private func scheduleNextPoll() {
pollTimer?.invalidate()
pollTimer = Timer.scheduledTimer(withTimeInterval: currentInterval, repeats: false) { [weak self] _ in
let interval = max(currentInterval, 1)
pollTimer = Timer.scheduledTimer(withTimeInterval: interval, repeats: false) { [weak self] _ in
Task { @MainActor [weak self] in
self?.poll()
}
Expand All @@ -71,6 +78,9 @@ final class UsageService: ObservableObject {
pollTask = Task {
guard let token = Self.readOAuthToken() else {
isAvailable = false
// Reset interval so a missing token doesn't trap us in the
// 2-second stale-cache fast path forever
currentInterval = baseInterval
scheduleNextPoll()
return
}
Expand All @@ -92,6 +102,8 @@ final class UsageService: ObservableObject {
if consecutiveFailures > 5 && usage.fiveHour == nil && usage.sevenDay == nil {
isAvailable = false
}
// Always restore to base interval on any failure path so we never loop fast
currentInterval = baseInterval
}
scheduleNextPoll()
}
Expand All @@ -111,11 +123,20 @@ final class UsageService: ObservableObject {
let fetchedAt: Date
}

private func loadCachedUsage() -> CachedUsage? {
guard let data = try? Data(contentsOf: Self.cacheURL) else { return nil }
return try? JSONDecoder().decode(CachedUsage.self, from: data)
}

private func loadCache() {
guard let data = try? Data(contentsOf: Self.cacheURL),
let cached = try? JSONDecoder().decode(CachedUsage.self, from: data) else { return }
guard let cached = loadCachedUsage() else { return }
usage = cached.usage
isAvailable = true
// If cached data is stale, use a short interval so the first poll fires quickly
// without creating a tight loop (minimum is clamped to 1s in scheduleNextPoll)
if Date().timeIntervalSince(cached.fetchedAt) > baseInterval {
currentInterval = 2
Comment thread
cubic-dev-ai[bot] marked this conversation as resolved.
}
}

private func saveCache() {
Expand Down
Loading
Loading