Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 27 additions & 9 deletions src/app/endpoints/streaming_query.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,9 @@ def _handle_turn_complete_event(chunk: Any, chunk_id: int) -> Iterator[str]:
"event": "turn_complete",
"data": {
"id": chunk_id,
"token": chunk.event.payload.turn.output_message.content,
"token": interleaved_content_as_str(
chunk.event.payload.turn.output_message.content
),
},
}
)
Expand Down Expand Up @@ -335,7 +337,10 @@ def _handle_tool_execution_event(
"data": {
"id": chunk_id,
"role": chunk.event.payload.step_type,
"token": f"Tool:{t.tool_name} arguments:{t.arguments}",
"token": {
"tool_name": t.tool_name,
"arguments": t.arguments,
},
},
}
)
Expand All @@ -349,7 +354,10 @@ def _handle_tool_execution_event(
"data": {
"id": chunk_id,
"role": chunk.event.payload.step_type,
"token": f"Fetched {len(inserted_context)} bytes from memory",
"token": {
"tool_name": r.tool_name,
"response": f"Fetched {len(inserted_context)} bytes from memory",
},
},
}
)
Expand Down Expand Up @@ -380,7 +388,10 @@ def _handle_tool_execution_event(
"data": {
"id": chunk_id,
"role": chunk.event.payload.step_type,
"token": f"Tool:{r.tool_name} summary:{summary}",
"token": {
"tool_name": r.tool_name,
"summary": summary,
},
},
}
)
Expand All @@ -392,7 +403,10 @@ def _handle_tool_execution_event(
"data": {
"id": chunk_id,
"role": chunk.event.payload.step_type,
"token": f"Tool:{r.tool_name} response:{r.content}",
"token": {
"tool_name": r.tool_name,
"response": interleaved_content_as_str(r.content),
},
},
}
)
Expand Down Expand Up @@ -446,16 +460,20 @@ async def streaming_query_endpoint_handler(
async def response_generator(turn_response: Any) -> AsyncIterator[str]:
"""Generate SSE formatted streaming response."""
chunk_id = 0
complete_response = ""
complete_response = "No response from the model"

# Send start event
yield stream_start_event(conversation_id)

async for chunk in turn_response:
for event in stream_build_event(chunk, chunk_id, metadata_map):
complete_response += json.loads(event.replace("data: ", ""))[
"data"
]["token"]
if (
json.loads(event.replace("data: ", ""))["event"]
== "turn_complete"
):
complete_response = json.loads(event.replace("data: ", ""))[
"data"
]["token"]
chunk_id += 1
yield event

Expand Down
48 changes: 39 additions & 9 deletions tests/unit/app/endpoints/test_streaming_query.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,11 +181,21 @@ async def _test_streaming_query_endpoint_handler(mocker, store_transcript=False)
payload=AgentTurnResponseStepProgressPayload(
event_type="step_progress",
step_type="inference",
delta=TextDelta(text="LLM answer", type="text"),
delta=TextDelta(text="LLM ", type="text"),
step_id="s1",
)
)
),
AgentTurnResponseStreamChunk(
event=TurnResponseEvent(
payload=AgentTurnResponseStepProgressPayload(
event_type="step_progress",
step_type="inference",
delta=TextDelta(text="answer", type="text"),
step_id="s2",
)
)
),
AgentTurnResponseStreamChunk(
event=TurnResponseEvent(
payload=AgentTurnResponseStepCompletePayload(
Expand All @@ -194,7 +204,7 @@ async def _test_streaming_query_endpoint_handler(mocker, store_transcript=False)
step_type="tool_execution",
step_details=ToolExecutionStep(
turn_id="t1",
step_id="s2",
step_id="s3",
step_type="tool_execution",
tool_responses=[
ToolResponse(
Expand All @@ -215,6 +225,27 @@ async def _test_streaming_query_endpoint_handler(mocker, store_transcript=False)
)
)
),
AgentTurnResponseStreamChunk(
event=TurnResponseEvent(
payload=AgentTurnResponseTurnCompletePayload(
event_type="turn_complete",
turn=Turn(
turn_id="t1",
input_messages=[],
output_message=CompletionMessage(
role="assistant",
content=[TextContentItem(text="LLM answer", type="text")],
stop_reason="end_of_turn",
),
session_id="test_session_id",
started_at=datetime.now(),
steps=[],
completed_at=datetime.now(),
output_attachments=[],
),
)
)
),
]

query = "What is OpenStack?"
Expand Down Expand Up @@ -263,8 +294,8 @@ async def _test_streaming_query_endpoint_handler(mocker, store_transcript=False)
assert "LLM answer" in full_content

# Assert referenced documents
assert len(streaming_content) == 5
d = json.loads(streaming_content[4][5:])
assert len(streaming_content) == 7
d = json.loads(streaming_content[6][5:])
referenced_documents = d["data"]["referenced_documents"]
assert len(referenced_documents) == 2
assert referenced_documents[1]["doc_title"] == "Doc2"
Expand All @@ -277,8 +308,7 @@ async def _test_streaming_query_endpoint_handler(mocker, store_transcript=False)
query_is_valid=True,
query=query,
query_request=query_request,
response="LLM answerTool:knowledge_search arguments:{}Tool:knowledge_search "
"summary:knowledge_search tool found 2 chunks:",
response="LLM answer",
attachments=[],
rag_chunks=[],
truncated=False,
Expand Down Expand Up @@ -940,12 +970,12 @@ def test_stream_build_event_step_complete():
assert result is not None
assert "data: " in result
assert '"event": "tool_call"' in result
assert '"token": "Tool:knowledge_search arguments:' in result
assert '"token": {"tool_name": "knowledge_search", "arguments": {}}' in result

result = next(itr)
assert (
'"token": "Tool:knowledge_search summary:knowledge_search tool found 2 chunks:"'
in result
'"token": {"tool_name": "knowledge_search", '
'"summary": "knowledge_search tool found 2 chunks:"}' in result
)
assert '"role": "tool_execution"' in result
assert '"id": 0' in result
Expand Down