-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathexec_repl.py
More file actions
56 lines (42 loc) · 1.67 KB
/
exec_repl.py
File metadata and controls
56 lines (42 loc) · 1.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import os
import docker
client = docker.from_env()
FILENAME = "frankenstein.txt"
def execute_repl_code(code_string: str) -> str:
"""Executes the LLM's code inside the persistent Docker container."""
# Context injection: Read the file from the mounted volume on every call
script_content = f"""
from llm_query import llm_query
import os
import re
# Ensure context is loaded into memory for the LLM's code to use
try:
with open("/workspace/{FILENAME}", "r", encoding="utf-8") as f:
context = f.read()
except FileNotFoundError:
context = "Error: Could not find {FILENAME} in /workspace"
# --- LLM GENERATED CODE BELOW ---
{code_string}
"""
# Save the injected context + LLM generated code to a temporary .py file in the shared mounted workspace
# which is immediately accessible to the running container. This allows us to execute arbitrary code generated by the LLM
temp_filename = "tmp_execution.py"
with open(temp_filename, "w", encoding="utf-8") as f:
f.write(script_content)
try:
# Get the running container
container = client.containers.get("rlm_worker")
# Execute the script
exit_code, output = container.exec_run(f"python /workspace/{temp_filename}")
# Clean up the file
os.remove(temp_filename)
result = output.decode('utf-8')
if exit_code != 0:
return f"Execution Error:\n{result}"
return result
except docker.errors.NotFound:
return "System Error: Container 'rlm_worker' is not running."
except Exception as e:
if os.path.exists(temp_filename):
os.remove(temp_filename)
return f"System Error:\n{str(e)}"