Skip to content

Commit ee269fa

Browse files
authored
fix: treat premature close during tool calls as a tool error (#8221)
* fix: mark premature close as tool call error * fix: stop streaming if aborted * fix: throw error not string * fix: cleanup premature close catching * fix: only capture premature close if there are tool calls to cancel * fix: posthog should log either way for premature close * fix: revert system message change
1 parent 64c7dd1 commit ee269fa

File tree

4 files changed

+105
-71
lines changed

4 files changed

+105
-71
lines changed

core/llm/streamChat.ts

Lines changed: 1 addition & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -161,24 +161,7 @@ export async function* llmStreamChat(
161161
return next.value;
162162
}
163163
} catch (error) {
164-
if (
165-
error instanceof Error &&
166-
error.message.toLowerCase().includes("premature close")
167-
) {
168-
void Telemetry.capture(
169-
"stream_premature_close_error",
170-
{
171-
model: model.model,
172-
provider: model.providerName,
173-
errorMessage: error.message,
174-
context: legacySlashCommandData ? "slash_command" : "regular_chat",
175-
...(legacySlashCommandData && {
176-
command: legacySlashCommandData.command.name,
177-
}),
178-
},
179-
false,
180-
);
181-
}
164+
// Moved error handling that was here to GUI, keeping try/catch for clean diff
182165
throw error;
183166
}
184167
}

gui/src/context/IdeMessenger.tsx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,7 @@ export class IdeMessenger implements IIdeMessenger {
220220
try {
221221
while (!done) {
222222
if (error) {
223-
throw error;
223+
throw new Error(error);
224224
}
225225
if (buffer.length > index) {
226226
const chunks = buffer.slice(index);

gui/src/redux/slices/sessionSlice.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -881,6 +881,7 @@ export const sessionSlice = createSlice({
881881
state,
882882
action: PayloadAction<{
883883
toolCallId: string;
884+
output?: ContextItem[]; // optional for convenience
884885
}>,
885886
) => {
886887
const toolCallState = findToolCallById(
@@ -889,6 +890,9 @@ export const sessionSlice = createSlice({
889890
);
890891
if (toolCallState) {
891892
toolCallState.status = "errored";
893+
if (action.payload.output) {
894+
toolCallState.output = action.payload.output;
895+
}
892896
}
893897
},
894898
acceptToolCall: (

gui/src/redux/thunks/streamNormalInput.ts

Lines changed: 99 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ import { selectSelectedChatModel } from "../slices/configSlice";
77
import {
88
abortStream,
99
addPromptCompletionPair,
10+
errorToolCall,
1011
setActive,
1112
setAppliedRulesAtIndex,
1213
setContextPercentage,
@@ -23,6 +24,7 @@ import { modelSupportsNativeTools } from "core/llm/toolSupport";
2324
import { addSystemMessageToolsToSystemMessage } from "core/tools/systemMessageTools/buildToolsSystemMessage";
2425
import { interceptSystemToolCalls } from "core/tools/systemMessageTools/interceptSystemToolCalls";
2526
import { SystemMessageToolCodeblocksFramework } from "core/tools/systemMessageTools/toolCodeblocks";
27+
import posthog from "posthog-js";
2628
import {
2729
selectCurrentToolCalls,
2830
selectPendingToolCalls,
@@ -177,69 +179,111 @@ export const streamNormalInput = createAsyncThunk<
177179
dispatch(setIsPruned(didPrune));
178180
dispatch(setContextPercentage(contextPercentage));
179181

180-
// Send request and stream response
182+
const start = Date.now();
181183
const streamAborter = state.session.streamAborter;
182-
let gen = extra.ideMessenger.llmStreamChat(
183-
{
184-
completionOptions,
185-
title: selectedChatModel.title,
186-
messages: compiledChatMessages,
187-
legacySlashCommandData,
188-
messageOptions: { precompiled: true },
189-
},
190-
streamAborter.signal,
191-
);
192-
if (systemToolsFramework && activeTools.length > 0) {
193-
gen = interceptSystemToolCalls(gen, streamAborter, systemToolsFramework);
194-
}
195-
196-
let next = await gen.next();
197-
while (!next.done) {
198-
if (!getState().session.isStreaming) {
199-
dispatch(abortStream());
200-
break;
184+
try {
185+
let gen = extra.ideMessenger.llmStreamChat(
186+
{
187+
completionOptions,
188+
title: selectedChatModel.title,
189+
messages: compiledChatMessages,
190+
legacySlashCommandData,
191+
messageOptions: { precompiled: true },
192+
},
193+
streamAborter.signal,
194+
);
195+
if (systemToolsFramework && activeTools.length > 0) {
196+
gen = interceptSystemToolCalls(
197+
gen,
198+
streamAborter,
199+
systemToolsFramework,
200+
);
201201
}
202202

203-
dispatch(streamUpdate(next.value));
204-
next = await gen.next();
205-
}
203+
let next = await gen.next();
204+
while (!next.done) {
205+
if (!getState().session.isStreaming) {
206+
dispatch(abortStream());
207+
break;
208+
}
206209

207-
// Attach prompt log and end thinking for reasoning models
208-
if (next.done && next.value) {
209-
dispatch(addPromptCompletionPair([next.value]));
210+
dispatch(streamUpdate(next.value));
211+
next = await gen.next();
212+
}
210213

211-
try {
212-
extra.ideMessenger.post("devdata/log", {
213-
name: "chatInteraction",
214-
data: {
215-
prompt: next.value.prompt,
216-
completion: next.value.completion,
217-
modelProvider: selectedChatModel.underlyingProviderName,
218-
modelName: selectedChatModel.title,
219-
modelTitle: selectedChatModel.title,
220-
sessionId: state.session.id,
221-
...(!!activeTools.length && {
222-
tools: activeTools.map((tool) => tool.function.name),
223-
}),
224-
...(appliedRules.length > 0 && {
225-
rules: appliedRules.map((rule) => ({
226-
id: getRuleId(rule),
227-
rule: rule.rule,
228-
slug: rule.slug,
229-
})),
214+
// Attach prompt log and end thinking for reasoning models
215+
if (next.done && next.value) {
216+
dispatch(addPromptCompletionPair([next.value]));
217+
218+
try {
219+
extra.ideMessenger.post("devdata/log", {
220+
name: "chatInteraction",
221+
data: {
222+
prompt: next.value.prompt,
223+
completion: next.value.completion,
224+
modelProvider: selectedChatModel.underlyingProviderName,
225+
modelName: selectedChatModel.title,
226+
modelTitle: selectedChatModel.title,
227+
sessionId: state.session.id,
228+
...(!!activeTools.length && {
229+
tools: activeTools.map((tool) => tool.function.name),
230+
}),
231+
...(appliedRules.length > 0 && {
232+
rules: appliedRules.map((rule) => ({
233+
id: getRuleId(rule),
234+
rule: rule.rule,
235+
slug: rule.slug,
236+
})),
237+
}),
238+
},
239+
});
240+
} catch (e) {
241+
console.error("Failed to send dev data interaction log", e);
242+
}
243+
}
244+
} catch (e) {
245+
const toolCallsToCancel = selectCurrentToolCalls(getState());
246+
posthog.capture("stream_premature_close_error", {
247+
duration: (Date.now() - start) / 1000,
248+
model: selectedChatModel.model,
249+
provider: selectedChatModel.underlyingProviderName,
250+
context: legacySlashCommandData ? "slash_command" : "regular_chat",
251+
...(legacySlashCommandData && {
252+
command: legacySlashCommandData.command.name,
253+
}),
254+
});
255+
if (
256+
toolCallsToCancel.length > 0 &&
257+
e instanceof Error &&
258+
e.message.toLowerCase().includes("premature close")
259+
) {
260+
for (const tc of toolCallsToCancel) {
261+
dispatch(
262+
errorToolCall({
263+
toolCallId: tc.toolCallId,
264+
output: [
265+
{
266+
name: "Tool Call Error",
267+
description: "Premature Close",
268+
content: `"Premature Close" error: this tool call was aborted mid-stream because the arguments took too long to stream or there were network issues. Please re-attempt by breaking the operation into smaller chunks or trying something else`,
269+
icon: "problems",
270+
},
271+
],
230272
}),
231-
},
232-
});
233-
} catch (e) {
234-
console.error("Failed to send dev data interaction log", e);
273+
);
274+
}
275+
} else {
276+
throw e;
235277
}
236278
}
237279

238280
// Tool call sequence:
239281
// 1. Mark generating tool calls as generated
240282
const state1 = getState();
283+
if (streamAborter.signal.aborted || !state1.session.isStreaming) {
284+
return;
285+
}
241286
const originalToolCalls = selectCurrentToolCalls(state1);
242-
243287
const generatingCalls = originalToolCalls.filter(
244288
(tc) => tc.status === "generating",
245289
);
@@ -285,7 +329,7 @@ export const streamNormalInput = createAsyncThunk<
285329
if (originalToolCalls.length === 0 || anyRequireApproval) {
286330
dispatch(setInactive());
287331
} else {
288-
// auto stream cases increase thunk depth by 1
332+
// auto stream cases increase thunk depth by 1 for debugging
289333
const state4 = getState();
290334
const generatedCalls4 = selectPendingToolCalls(state4);
291335
if (streamAborter.signal.aborted || !state4.session.isStreaming) {
@@ -311,7 +355,10 @@ export const streamNormalInput = createAsyncThunk<
311355
for (const { toolCallId } of originalToolCalls) {
312356
unwrapResult(
313357
await dispatch(
314-
streamResponseAfterToolCall({ toolCallId, depth: depth + 1 }),
358+
streamResponseAfterToolCall({
359+
toolCallId,
360+
depth: depth + 1,
361+
}),
315362
),
316363
);
317364
}

0 commit comments

Comments
 (0)