Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
418 changes: 142 additions & 276 deletions agent/api_client.py

Large diffs are not rendered by default.

2 changes: 2 additions & 0 deletions agent/attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ def format_themes_for_prompt(mi,uid:str,spike:bool=False,k_user:int=12,k_global:
if mode=="inline":return ", ".join(ut+gt)
if mode=="tagged":return ", ".join([f"User:{t}" for t in ut]+[f"Bot:{t}" for t in gt])
if mode=="sections":return f"Current User Preferences:\n{', '.join(ut)}\n\nYour Global Preferences:\n{', '.join(gt)}"
if mode=="just_user":return f"Current User Preferences:\n{', '.join(ut)}"
if mode=="just_global":return f"Your Global Preferences:\n{', '.join(gt)}"
return "\n".join([*map(lambda x:f"- U:{x}",ut),*map(lambda x:f"- G:{x}",gt)])


Expand Down
51 changes: 22 additions & 29 deletions agent/bot_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,14 +12,17 @@

class LogConfig(BaseModel):
"""Logging configuration and paths"""
base_log_dir: str = Field(default="logs") # Simple flat directory
base_log_dir: str = Field(default="cache")
jsonl_pattern: str = Field(default="bot_log_{bot_id}.jsonl")
db_pattern: str = Field(default="bot_log_{bot_id}.db")
log_level: str = Field(default=os.getenv('LOGLEVEL', 'INFO'))
log_format: str = Field(default='%(asctime)s - %(levelname)s - %(message)s')

enable_console: bool = Field(default=True, description="Enable console logging")
enable_jsonl: bool = Field(default=True, description="Enable JSONL file logging")
enable_sql: bool = Field(default=False, description="Enable SQLite database logging")


class APIConfig(BaseModel):
"""API authentication and endpoint configurations"""
discord_token: str = Field(default=os.getenv('DISCORD_TOKEN'))
Expand All @@ -34,8 +37,6 @@ class FileConfig(BaseModel):
"""File handling configuration"""
allowed_extensions: Set[str] = Field(default={'.py', '.js', '.html', '.css', '.json', '.md', '.txt'})
allowed_image_extensions: Set[str] = Field(default={'.jpg', '.jpeg', '.png', '.gif', '.bmp'})
# add audio extension for voice message module expansion
allowed_audio_extensions: Set[str] = Field(default={'.mp3', '.wav', '.ogg', '.m4a'})

class SearchConfig(BaseModel):
"""Search and indexing configuration"""
Expand All @@ -56,11 +57,11 @@ class PersonaConfig(BaseModel):
default_amygdala_response: int = Field(default=70)
temperature: float = Field(default_factory=lambda: 70/100.0)
hippocampus_bandwidth: float = Field(default=0.70)
memory_capacity: int = Field(default=24)
memory_capacity: int = Field(default=30)
use_hippocampus_reranking: bool = Field(default=True)
reranking_blend_factor: float = Field(default=0.20, description="Weight for blending initial search scores with reranking similarity (0-1)")
minimum_reranking_threshold: float = Field(default=0.60, description="Minimum threshold for reranked memories")
mood_coefficient: float = Field(default=0.30, description="Coefficient (0-1) that controls how strongly amygdala state lowers or raises the memory-selection threshold")
reranking_blend_factor: float = Field(default=0.5, description="Weight for blending initial search scores with reranking similarity (0-1)")
minimum_reranking_threshold: float = Field(default=0.64, description="Minimum threshold for reranked memories")
mood_coefficient: float = Field(default=0.15, description="Coefficient (0-1) that controls how strongly amygdala state lowers or raises the memory-selection threshold")

class NotionConfig(BaseModel):
"""Notion database configuration"""
Expand All @@ -69,33 +70,17 @@ class NotionConfig(BaseModel):
tasks_db_id: str = Field(default=os.getenv('TASKS_DB_ID'))
kanban_db_id: str = Field(default=os.getenv('KANBAN_DB_ID'))

class TwitterConfig(BaseModel):
"""Twitter API and limits configuration"""
username: str = Field(default=os.getenv('TWITTER_USERNAME'))
api_key: str = Field(default=os.getenv('TWITTER_API_KEY'))
api_secret: str = Field(default=os.getenv('TWITTER_API_SECRET'))
access_token: str = Field(default=os.getenv('TWITTER_ACCESS_TOKEN'))
access_secret: str = Field(default=os.getenv('TWITTER_ACCESS_SECRET'))
bearer_token: str = Field(default=os.getenv('TWITTER_BEARER_TOKEN'))
char_limit: int = Field(default=280)
media_limit: int = Field(default=4)
gif_limit: int = Field(default=1)
video_limit: int = Field(default=1)
reply_depth_limit: int = Field(default=25)
tweet_rate_limit: int = Field(default=300)
dm_rate_limit: int = Field(default=1000)

class SystemConfig(BaseModel):
"""System-wide configuration"""
poll_interval: int = Field(default=int(os.getenv('POLL_INTERVAL', 120)))
tick_rate: int = Field(default=800)

class AttentionConfig(BaseModel):
"""Attention mechanism configuration"""
threshold: int = Field(default=70, description="Fuzzy match threshold for attention triggers (0-100)")
threshold: int = Field(default=60, description="Fuzzy match threshold for attention triggers (0-100)")
default_top_n: int = Field(default=32, description="Default number of top trigrams to extract from memory")
default_min_occ: int = Field(default=8, description="Minimum occurrence count for trigrams to be considered")
refresh_interval_hours: int = Field(default=1, description="Hours between trigram cache refreshes")
refresh_interval_hours: int = Field(default=2, description="Hours between trigram cache refreshes")
cooldown_minutes: float = Field(default=0.30, description="Minutes between attention trigger activations")

stop_words: Set[str] = Field(default_factory=lambda: {
Expand Down Expand Up @@ -133,6 +118,7 @@ class DMNConfig(BaseModel):
"""DMN configuration"""
tick_rate: int = Field(default=1200, description="Time between thought generations in seconds")
temperature: float = Field(default=0.7, description="Base creative temperature")
temperature_max: float = Field(default=1.5)
combination_threshold: float = Field(default=0.2, description="Minimum relevance score for memory combinations")
decay_rate: float = Field(default=0.1, description="Rate at which used memory weights decrease")
top_k: int = Field(default=24, description="Top k memories to consider for combination")
Expand All @@ -141,13 +127,21 @@ class DMNConfig(BaseModel):
fuzzy_search_threshold: int = Field(default=90, description="Minimum fuzzy search threshold for term matching")
max_memory_length: int = Field(default=64, description="Maximum length of a memory based on truncate_middle function")
similarity_threshold: float = Field(default=0.5, description="Minimum similarity score for memory relevance")
top_p_min_clamp: float = Field(default=0.8, description="Minimum clamp value for top_p scaling (0.0-1.0)")


# DMN-specific API settings
dmn_api_type: str = Field(default=None, description="API type for DMN processor (ollama, openai, anthropic, etc.)")
dmn_model: str = Field(default=None, description="Model name for DMN processor")

# Mode presets
# Focus presets
consciousness_default: str = Field(default="creative")
consciousness_presets: Dict[str, Dict[str, float]] = Field(default_factory=lambda:{
"hyperfocus": {"temp_base":0.20,"temp_span":0.40, "p_sparse":0.80, "p_mid":0.65, "p_dense":0.50}, # low T + low p
"creative": {"temp_base":0.80,"temp_span":0.70,"p_sparse":0.92,"p_mid":0.90,"p_dense":0.88}, # high T + low p
"drowsy": {"temp_base":0.30,"temp_span":0.20,"p_sparse":0.99,"p_mid":0.985,"p_dense":0.98}, # low T + high p
"dream": {"temp_base":0.90,"temp_span":0.80,"p_sparse":0.99,"p_mid":0.99,"p_dense":0.985} # high T + high p
})

# Memory presets
modes: Dict[str, Dict[str, float]] = Field(default_factory=lambda: {
"forgetful": {
"combination_threshold": 0.02,
Expand Down Expand Up @@ -262,7 +256,6 @@ class BotConfig(BaseModel):
conversation: ConversationConfig = Field(default_factory=ConversationConfig)
persona: PersonaConfig = Field(default_factory=PersonaConfig)
notion: NotionConfig = Field(default_factory=NotionConfig)
twitter: TwitterConfig = Field(default_factory=TwitterConfig)
system: SystemConfig = Field(default_factory=SystemConfig)
logging: LogConfig = Field(default_factory=LogConfig)
attention: AttentionConfig = Field(default_factory=AttentionConfig)
Expand Down
117 changes: 51 additions & 66 deletions agent/defaultmode.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,17 +51,23 @@ def __init__(self, memory_index, prompt_formats, system_prompts, bot, dmn_config
self.temporal_parser = TemporalParser() # Add temporal parser instance
# Search similarity settings
self.similarity_threshold = dmn_config.similarity_threshold
# Top-p scaling settings
self.top_p_min_clamp = dmn_config.top_p_min_clamp
# Store modes from config
self.modes = dmn_config.modes
# Set initial mode
self.set_mode(mode)
# DMN-specific API settings
self.dmn_api_type = dmn_api_type
self.dmn_model = dmn_model
# Consciousness settings
self.temperature_max=dmn_config.temperature_max
self.consciousness_state=dmn_config.consciousness_default
self.consciousness_presets=dmn_config.consciousness_presets

self.logger.info(f"DMN Processor initialized with API: {dmn_api_type or 'default'}, Model: {dmn_model or 'default'}")

def set_consciousness(self,name:str):
if name in self.consciousness_presets: self.consciousness_state=name

async def start(self):
"""Start the DMN processing loop."""
if not self.enabled:
Expand Down Expand Up @@ -327,22 +333,25 @@ async def _generate_thought(self):
else:
# Default to max intensity if top_k is 0 (edge case)
# Corresponds to density = 0 in the formula
density = 0.0
intensity_multiplier = 1.0

# Calculate final intensity using the exact original clamping/scaling
new_intensity = min(100, max(0, int(100 * intensity_multiplier)))
# Update both DMN and global state
self.amygdala_response = new_intensity
self.temperature = new_intensity / 100.0
self.bot.amygdala_response = new_intensity # Update bot's amygdala arousal
# Update bot's private API temperature (not global)
self.bot.update_api_temperature(new_intensity)
# Update top_p with inverse scaling using same intensity value
# High temp (sparse) -> high top_p (diverse), Low temp (dense) -> low top_p (focused)
top_p_value = max(self.top_p_min_clamp, new_intensity / 100.0)
# intensity already computed above as new_intensity and density already computed
self.amygdala_response=new_intensity
I=new_intensity/100.0
self.temperature=0.3+I
self.bot.amygdala_response=new_intensity
# Convert intensity to temperature before passing to API client
self.bot.update_api_temperature(self.temperature)

top_p_value=0.98 if density<.33 else 0.95 if density<.66 else 0.92
self.bot.update_api_top_p(top_p_value)

self.logger.info(f"Updated bot amygdala arousal to {new_intensity} based on memory density")
self.logger.info(f"Updated bot top_p to {top_p_value:.2f} (inverse density scaling)")
self.logger.info(f"Updated bot top_p to {top_p_value:.2f} (banded density mapping)")

system_prompt = self.system_prompts['dmn_thought_generation'].replace(
'{amygdala_response}',
str(self.amygdala_response)
Expand Down Expand Up @@ -423,7 +432,8 @@ async def _generate_thought(self):
# Use user-specific weight decay
self.memory_weights[user_id][memory] *= (1 - (self.decay_rate * decay))
#self.logger.info(f"Memory weight updated for user {user_id}: {memory[:50]}... (decay: {decay:.2f})")
self.memory_index.save_cache()
#self.memory_index.save_cache()
self.memory_index._saver.request()
self.logger.info(f"Updated memory cache after pruning {len(affected_memories)} memories")

# Add cleanup here after new memory addition and weight updates
Expand Down Expand Up @@ -455,59 +465,34 @@ async def _generate_thought(self):
})

def _cleanup_disconnected_memories(self):
"""Remove memories that have no keyword associations in the inverted index."""
# Get all memory IDs that appear in the inverted index
connected_memories = set()
for term_memories in self.memory_index.inverted_index.values():
connected_memories.update(term_memories)
# Find disconnected memories for each user
for user_id, memories in list(self.memory_index.user_memories.items()):
disconnected = sorted([mid for mid in memories if mid not in connected_memories], reverse=True)
if disconnected:
# Remove disconnected memories from weights
for memory_id in disconnected:
if memory_id in self.memory_weights[user_id]:
del self.memory_weights[user_id][memory_id]
# Remove from main memories list and adjust indices
for memory_id in disconnected:
self.memory_index.memories.pop(memory_id)
# Update all higher indices in user_memories
for uid, mems in self.memory_index.user_memories.items():
self.memory_index.user_memories[uid] = [
mid if mid < memory_id else mid - 1
for mid in mems
if mid != memory_id
]

# Update inverted index
for word in list(self.memory_index.inverted_index.keys()):
self.memory_index.inverted_index[word] = [
mid if mid < memory_id else mid - 1
for mid in self.memory_index.inverted_index[word]
if mid != memory_id
]
# Clean up empty word entries
if not self.memory_index.inverted_index[word]:
del self.memory_index.inverted_index[word]

# Remove users with no memories left
if not self.memory_index.user_memories[user_id]:
del self.memory_index.user_memories[user_id]
# print the disconnected memories
self.logger.info(f"Cleaned up {len(disconnected)} disconnected memories for user {user_id}")
#format the disconnected memories for logging
disconnected_memories = [self.memory_index.memories[mid] for mid in disconnected]
# Save the cleaned up state
self.memory_index.save_cache()
# print the disconnected memories
self.logger.info(f"Disconnected memories: {disconnected_memories}")
# Log the cleanup event
self.logger.log({
'event': 'dmn_memory_cleanup',
'timestamp': datetime.now().isoformat(),
'user_id': user_id,
'disconnected_memories': disconnected_memories
})
connected=set(); [connected.update(v) for v in self.memory_index.inverted_index.values()]
for uid,mems in list(self.memory_index.user_memories.items()):
disc=sorted([i for i in mems if i not in connected])
if not disc: continue
texts=[self.memory_index.memories[i] for i in disc]
removed=set(disc)
old_mem=self.memory_index.memories
self.memory_index.memories=[m for i,m in enumerate(old_mem) if i not in removed]
def remap(i):
c=0
for d in disc:
if d<i: c+=1
else: break
return i-c
for u,ls in list(self.memory_index.user_memories.items()):
new_ls=[remap(i) for i in ls if i not in removed]
if new_ls: self.memory_index.user_memories[u]=new_ls
else: self.memory_index.user_memories.pop(u,None)
for w,ls in list(self.memory_index.inverted_index.items()):
nl=[remap(i) for i in ls if i not in removed]
if nl: self.memory_index.inverted_index[w]=nl
else: self.memory_index.inverted_index.pop(w,None)
for u,weights in list(self.memory_weights.items()):
self.memory_weights[u]={remap(k):v for k,v in weights.items() if k not in removed}
self.logger.info(f"Cleaned up {len(disc)} disconnected memories for user {uid}")
self.logger.info(f"Disconnected memories: {texts}")
self.logger.log({'event':'dmn_memory_cleanup','timestamp':datetime.now().isoformat(),'user_id':uid,'disconnected_memories':texts})


def set_mode(self, mode):
"""Update DMN parameters based on mode."""
Expand Down
Loading
Loading