@@ -7,6 +7,7 @@ import { selectSelectedChatModel } from "../slices/configSlice";
77import {
88 abortStream ,
99 addPromptCompletionPair ,
10+ errorToolCall ,
1011 setActive ,
1112 setAppliedRulesAtIndex ,
1213 setContextPercentage ,
@@ -23,6 +24,7 @@ import { modelSupportsNativeTools } from "core/llm/toolSupport";
2324import { addSystemMessageToolsToSystemMessage } from "core/tools/systemMessageTools/buildToolsSystemMessage" ;
2425import { interceptSystemToolCalls } from "core/tools/systemMessageTools/interceptSystemToolCalls" ;
2526import { SystemMessageToolCodeblocksFramework } from "core/tools/systemMessageTools/toolCodeblocks" ;
27+ import posthog from "posthog-js" ;
2628import {
2729 selectCurrentToolCalls ,
2830 selectPendingToolCalls ,
@@ -177,69 +179,111 @@ export const streamNormalInput = createAsyncThunk<
177179 dispatch ( setIsPruned ( didPrune ) ) ;
178180 dispatch ( setContextPercentage ( contextPercentage ) ) ;
179181
180- // Send request and stream response
182+ const start = Date . now ( ) ;
181183 const streamAborter = state . session . streamAborter ;
182- let gen = extra . ideMessenger . llmStreamChat (
183- {
184- completionOptions,
185- title : selectedChatModel . title ,
186- messages : compiledChatMessages ,
187- legacySlashCommandData,
188- messageOptions : { precompiled : true } ,
189- } ,
190- streamAborter . signal ,
191- ) ;
192- if ( systemToolsFramework && activeTools . length > 0 ) {
193- gen = interceptSystemToolCalls ( gen , streamAborter , systemToolsFramework ) ;
194- }
195-
196- let next = await gen . next ( ) ;
197- while ( ! next . done ) {
198- if ( ! getState ( ) . session . isStreaming ) {
199- dispatch ( abortStream ( ) ) ;
200- break ;
184+ try {
185+ let gen = extra . ideMessenger . llmStreamChat (
186+ {
187+ completionOptions,
188+ title : selectedChatModel . title ,
189+ messages : compiledChatMessages ,
190+ legacySlashCommandData,
191+ messageOptions : { precompiled : true } ,
192+ } ,
193+ streamAborter . signal ,
194+ ) ;
195+ if ( systemToolsFramework && activeTools . length > 0 ) {
196+ gen = interceptSystemToolCalls (
197+ gen ,
198+ streamAborter ,
199+ systemToolsFramework ,
200+ ) ;
201201 }
202202
203- dispatch ( streamUpdate ( next . value ) ) ;
204- next = await gen . next ( ) ;
205- }
203+ let next = await gen . next ( ) ;
204+ while ( ! next . done ) {
205+ if ( ! getState ( ) . session . isStreaming ) {
206+ dispatch ( abortStream ( ) ) ;
207+ break ;
208+ }
206209
207- // Attach prompt log and end thinking for reasoning models
208- if ( next . done && next . value ) {
209- dispatch ( addPromptCompletionPair ( [ next . value ] ) ) ;
210+ dispatch ( streamUpdate ( next . value ) ) ;
211+ next = await gen . next ( ) ;
212+ }
210213
211- try {
212- extra . ideMessenger . post ( "devdata/log" , {
213- name : "chatInteraction" ,
214- data : {
215- prompt : next . value . prompt ,
216- completion : next . value . completion ,
217- modelProvider : selectedChatModel . underlyingProviderName ,
218- modelName : selectedChatModel . title ,
219- modelTitle : selectedChatModel . title ,
220- sessionId : state . session . id ,
221- ...( ! ! activeTools . length && {
222- tools : activeTools . map ( ( tool ) => tool . function . name ) ,
223- } ) ,
224- ...( appliedRules . length > 0 && {
225- rules : appliedRules . map ( ( rule ) => ( {
226- id : getRuleId ( rule ) ,
227- rule : rule . rule ,
228- slug : rule . slug ,
229- } ) ) ,
214+ // Attach prompt log and end thinking for reasoning models
215+ if ( next . done && next . value ) {
216+ dispatch ( addPromptCompletionPair ( [ next . value ] ) ) ;
217+
218+ try {
219+ extra . ideMessenger . post ( "devdata/log" , {
220+ name : "chatInteraction" ,
221+ data : {
222+ prompt : next . value . prompt ,
223+ completion : next . value . completion ,
224+ modelProvider : selectedChatModel . underlyingProviderName ,
225+ modelName : selectedChatModel . title ,
226+ modelTitle : selectedChatModel . title ,
227+ sessionId : state . session . id ,
228+ ...( ! ! activeTools . length && {
229+ tools : activeTools . map ( ( tool ) => tool . function . name ) ,
230+ } ) ,
231+ ...( appliedRules . length > 0 && {
232+ rules : appliedRules . map ( ( rule ) => ( {
233+ id : getRuleId ( rule ) ,
234+ rule : rule . rule ,
235+ slug : rule . slug ,
236+ } ) ) ,
237+ } ) ,
238+ } ,
239+ } ) ;
240+ } catch ( e ) {
241+ console . error ( "Failed to send dev data interaction log" , e ) ;
242+ }
243+ }
244+ } catch ( e ) {
245+ const toolCallsToCancel = selectCurrentToolCalls ( getState ( ) ) ;
246+ posthog . capture ( "stream_premature_close_error" , {
247+ duration : ( Date . now ( ) - start ) / 1000 ,
248+ model : selectedChatModel . model ,
249+ provider : selectedChatModel . underlyingProviderName ,
250+ context : legacySlashCommandData ? "slash_command" : "regular_chat" ,
251+ ...( legacySlashCommandData && {
252+ command : legacySlashCommandData . command . name ,
253+ } ) ,
254+ } ) ;
255+ if (
256+ toolCallsToCancel . length > 0 &&
257+ e instanceof Error &&
258+ e . message . toLowerCase ( ) . includes ( "premature close" )
259+ ) {
260+ for ( const tc of toolCallsToCancel ) {
261+ dispatch (
262+ errorToolCall ( {
263+ toolCallId : tc . toolCallId ,
264+ output : [
265+ {
266+ name : "Tool Call Error" ,
267+ description : "Premature Close" ,
268+ content : `"Premature Close" error: this tool call was aborted mid-stream because the arguments took too long to stream or there were network issues. Please re-attempt by breaking the operation into smaller chunks or trying something else` ,
269+ icon : "problems" ,
270+ } ,
271+ ] ,
230272 } ) ,
231- } ,
232- } ) ;
233- } catch ( e ) {
234- console . error ( "Failed to send dev data interaction log" , e ) ;
273+ ) ;
274+ }
275+ } else {
276+ throw e ;
235277 }
236278 }
237279
238280 // Tool call sequence:
239281 // 1. Mark generating tool calls as generated
240282 const state1 = getState ( ) ;
283+ if ( streamAborter . signal . aborted || ! state1 . session . isStreaming ) {
284+ return ;
285+ }
241286 const originalToolCalls = selectCurrentToolCalls ( state1 ) ;
242-
243287 const generatingCalls = originalToolCalls . filter (
244288 ( tc ) => tc . status === "generating" ,
245289 ) ;
@@ -285,7 +329,7 @@ export const streamNormalInput = createAsyncThunk<
285329 if ( originalToolCalls . length === 0 || anyRequireApproval ) {
286330 dispatch ( setInactive ( ) ) ;
287331 } else {
288- // auto stream cases increase thunk depth by 1
332+ // auto stream cases increase thunk depth by 1 for debugging
289333 const state4 = getState ( ) ;
290334 const generatedCalls4 = selectPendingToolCalls ( state4 ) ;
291335 if ( streamAborter . signal . aborted || ! state4 . session . isStreaming ) {
@@ -311,7 +355,10 @@ export const streamNormalInput = createAsyncThunk<
311355 for ( const { toolCallId } of originalToolCalls ) {
312356 unwrapResult (
313357 await dispatch (
314- streamResponseAfterToolCall ( { toolCallId, depth : depth + 1 } ) ,
358+ streamResponseAfterToolCall ( {
359+ toolCallId,
360+ depth : depth + 1 ,
361+ } ) ,
315362 ) ,
316363 ) ;
317364 }
0 commit comments