Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 22 additions & 6 deletions packages/opencode/src/session/prompt.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1324,12 +1324,27 @@ NOTE: At any point in time through this workflow you should feel free to ask the
let lastUser: MessageV2.User | undefined
let lastAssistant: MessageV2.Assistant | undefined
let lastFinished: MessageV2.Assistant | undefined
// Track transcript positions instead of relying on lexical ID ordering.
// Callers may supply custom messageIDs via the API, so MessageID order
// is not guaranteed to match transcript order — use array indices.
let lastUserIndex = -1
let lastAssistantIndex = -1
let lastFinishedIndex = -1
let tasks: (MessageV2.CompactionPart | MessageV2.SubtaskPart)[] = []
for (let i = msgs.length - 1; i >= 0; i--) {
const msg = msgs[i]
if (!lastUser && msg.info.role === "user") lastUser = msg.info
if (!lastAssistant && msg.info.role === "assistant") lastAssistant = msg.info
if (!lastFinished && msg.info.role === "assistant" && msg.info.finish) lastFinished = msg.info
if (!lastUser && msg.info.role === "user") {
lastUser = msg.info
lastUserIndex = i
}
if (!lastAssistant && msg.info.role === "assistant") {
lastAssistant = msg.info
lastAssistantIndex = i
}
if (!lastFinished && msg.info.role === "assistant" && msg.info.finish) {
lastFinished = msg.info
lastFinishedIndex = i
}
if (lastUser && lastFinished) break
const task = msg.parts.filter((part) => part.type === "compaction" || part.type === "subtask")
if (task && !lastFinished) tasks.push(...task)
Expand All @@ -1351,7 +1366,7 @@ NOTE: At any point in time through this workflow you should feel free to ask the
lastAssistant?.finish &&
!["tool-calls"].includes(lastAssistant.finish) &&
!hasToolCalls &&
lastUser.id < lastAssistant.id
lastUserIndex < lastAssistantIndex
) {
yield* slog.info("exiting loop")
break
Expand Down Expand Up @@ -1456,8 +1471,9 @@ NOTE: At any point in time through this workflow you should feel free to ask the
yield* summary.summarize({ sessionID, messageID: lastUser.id }).pipe(Effect.ignore, Effect.forkIn(scope))

if (step > 1 && lastFinished) {
for (const m of msgs) {
if (m.info.role !== "user" || m.info.id <= lastFinished.id) continue
for (let i = lastFinishedIndex + 1; i < msgs.length; i++) {
const m = msgs[i]
if (m.info.role !== "user") continue
for (const p of m.parts) {
if (p.type !== "text" || p.ignored || p.synthetic) continue
if (!p.text.trim()) continue
Expand Down
60 changes: 60 additions & 0 deletions packages/opencode/test/session/prompt.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -342,6 +342,66 @@ it.live("loop exits immediately when last assistant has stop finish", () =>
),
)

// Regression: #23490. The HTTP API accepts caller-supplied messageIDs, but the
// loop-exit decision used to compare lexical IDs (`lastUser.id < lastAssistant.id`).
// A custom user ID that lex-sorts AFTER the assistant's auto-generated ID would
// cause the loop to keep running after assistant `finish: "stop"`, sending a
// transcript ending in an assistant turn — which providers reject with
// "last message must be a user message". Ordering must use transcript position,
// not opaque ID compare.
it.live("loop exits when caller-supplied user messageID lex-sorts after assistant id", () =>
provideTmpdirServer(
Effect.fnUntraced(function* ({ llm }) {
const prompt = yield* SessionPrompt.Service
const sessions = yield* Session.Service
const chat = yield* sessions.create({ title: "Custom IDs" })
const session = yield* Session.Service

// Custom user ID that lex-sorts greater than any MessageID.ascending()
// output (which uses hex 0-9a-f for the timestamp prefix).
const userTime = Date.now()
const userMsg = yield* session.updateMessage({
id: MessageID.make("msg_zzzzzzzzzzzzzzzzzzzzzzzz"),
role: "user",
sessionID: chat.id,
agent: "build",
model: ref,
time: { created: userTime },
})
yield* session.updatePart({
id: PartID.ascending(),
messageID: userMsg.id,
sessionID: chat.id,
type: "text",
text: "hello",
})
yield* session.updateMessage({
id: MessageID.ascending(),
role: "assistant",
parentID: userMsg.id,
sessionID: chat.id,
mode: "build",
agent: "build",
cost: 0,
path: { cwd: "/tmp", root: "/tmp" },
tokens: { input: 0, output: 0, reasoning: 0, cache: { read: 0, write: 0 } },
modelID: ref.modelID,
providerID: ref.providerID,
// Strictly greater than userTime so transcript ordering by time_created
// is unambiguous regardless of ID lex order.
time: { created: userTime + 1 },
finish: "stop",
})

const result = yield* prompt.loop({ sessionID: chat.id })
expect(result.info.role).toBe("assistant")
if (result.info.role === "assistant") expect(result.info.finish).toBe("stop")
expect(yield* llm.calls).toBe(0)
}),
{ git: true, config: providerCfg },
),
)

it.live("loop calls LLM and returns assistant message", () =>
provideTmpdirServer(
Effect.fnUntraced(function* ({ llm }) {
Expand Down
Loading