From 8a44c5415644bf2973fe01b417ee9451f310d11a Mon Sep 17 00:00:00 2001 From: Yuanmiao Gui Date: Tue, 26 Aug 2025 17:26:58 +0800 Subject: [PATCH 1/3] [MagenticOne] Report the error reason in the exception and skip the second check if an error exists Signed-off-by: Yuanmiao Gui --- .../_magentic_one/_magentic_one_orchestrator.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py index 176789257ba7..b8fe5c2ddfa0 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py @@ -315,6 +315,7 @@ async def _orchestrate_step(self, cancellation_token: CancellationToken) -> None progress_ledger: Dict[str, Any] = {} assert self._max_json_retries > 0 key_error: bool = False + key_e_msg: str = None for _ in range(self._max_json_retries): if self._model_client.model_info.get("structured_output", False): response = await self._model_client.create( @@ -355,6 +356,7 @@ async def _orchestrate_step(self, cancellation_token: CancellationToken) -> None ] key_error = False + key_e_msg = None for key in required_keys: if ( key not in progress_ledger @@ -363,14 +365,17 @@ async def _orchestrate_step(self, cancellation_token: CancellationToken) -> None or "reason" not in progress_ledger[key] ): key_error = True + key_e_msg = f"'{key}' does not exist in progress ledger or the corresponding value is invalid" break # Validate the next speaker if the task is not yet complete if ( - not progress_ledger["is_request_satisfied"]["answer"] + not key_error + and not progress_ledger["is_request_satisfied"]["answer"] and progress_ledger["next_speaker"]["answer"] not in self._participant_names ): key_error = True + key_e_msg = f"next speaker ({progress_ledger['next_speaker']['answer']}) does not exist" break if not key_error: @@ -378,10 +383,11 @@ async def _orchestrate_step(self, cancellation_token: CancellationToken) -> None await self._log_message(f"Failed to parse ledger information, retrying: {ledger_str}") except (json.JSONDecodeError, TypeError): key_error = True + key_e_msg = "invalid ledger format" await self._log_message("Invalid ledger format encountered, retrying...") continue if key_error: - raise ValueError("Failed to parse ledger information after multiple retries.") + raise ValueError(f"Failed to parse ledger information after multiple retries: {key_e_msg}") await self._log_message(f"Progress Ledger: {progress_ledger}") # Check for task completion From b0191ffc25f36f4205dbb30f913691308499a7d7 Mon Sep 17 00:00:00 2001 From: Yuanmiao Gui Date: Thu, 9 Oct 2025 16:49:24 +0800 Subject: [PATCH 2/3] fix type checking --- .../_group_chat/_magentic_one/_magentic_one_orchestrator.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py index b8fe5c2ddfa0..827306b17e25 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py @@ -2,7 +2,7 @@ import json import logging import re -from typing import Any, Dict, List, Mapping, Sequence +from typing import Any, Dict, List, Mapping, Optional, Sequence from autogen_core import AgentId, CancellationToken, DefaultTopicId, MessageContext, event, rpc from autogen_core.models import ( @@ -315,7 +315,7 @@ async def _orchestrate_step(self, cancellation_token: CancellationToken) -> None progress_ledger: Dict[str, Any] = {} assert self._max_json_retries > 0 key_error: bool = False - key_e_msg: str = None + key_e_msg: Optional[str] = None for _ in range(self._max_json_retries): if self._model_client.model_info.get("structured_output", False): response = await self._model_client.create( From 4b3b95cb2faa0a123647cd3f1ec18ce515567c4b Mon Sep 17 00:00:00 2001 From: Yuanmiao Gui Date: Thu, 16 Oct 2025 20:25:15 +0800 Subject: [PATCH 3/3] fix test --- .../tests/test_magentic_one_group_chat.py | 55 +++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py b/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py index d217d54763c7..35bc0da77e57 100644 --- a/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py +++ b/python/packages/autogen-agentchat/tests/test_magentic_one_group_chat.py @@ -219,3 +219,58 @@ async def test_magentic_one_group_chat_with_stalls(runtime: AgentRuntime | None) assert isinstance(result.messages[4], TextMessage) assert result.messages[4].content.startswith("\nWe are working to address the following user request:") assert result.stop_reason is not None and result.stop_reason == "test" + + +@pytest.mark.asyncio +async def test_magentic_one_group_chat_response_key_error(runtime: AgentRuntime | None) -> None: + agent_1 = _EchoAgent("agent_1", description="echo agent 1") + agent_2 = _EchoAgent("agent_2", description="echo agent 2") + agent_3 = _EchoAgent("agent_3", description="echo agent 3") + agent_4 = _EchoAgent("agent_4", description="echo agent 4") + + async def mock_response(data: str) -> None: + model_client = ReplayChatCompletionClient( + chat_completions=["test", "test", *[json.dumps({})] * 9, data], + ) + team = MagenticOneGroupChat( + participants=[agent_1, agent_2, agent_3, agent_4], model_client=model_client, runtime=runtime + ) + await team.run(task="Write a program that prints 'Hello, world!'") + + with pytest.raises(ValueError) as error_info: + await mock_response(json.dumps({})) + assert "is_request_satisfied" in str(error_info.value) + + with pytest.raises(ValueError) as error_info: + await mock_response(json.dumps({"is_request_satisfied": ""})) + assert "is_request_satisfied" in str(error_info.value) + + with pytest.raises(ValueError) as error_info: + await mock_response(json.dumps({"is_request_satisfied": {"answer": True}})) + assert "is_request_satisfied" in str(error_info.value) + + with pytest.raises(ValueError) as error_info: + await mock_response(json.dumps({"is_request_satisfied": {"reason": "test"}})) + assert "is_request_satisfied" in str(error_info.value) + + with pytest.raises(ValueError) as error_info: + await mock_response(json.dumps({"is_request_satisfied": {"answer": True, "reason": "test"}})) + assert "is_progress_being_made" in str(error_info.value) + + with pytest.raises(ValueError) as error_info: + await mock_response( + json.dumps( + { + "is_request_satisfied": {"answer": False, "reason": "test"}, + "is_progress_being_made": {"answer": False, "reason": "test"}, + "is_in_loop": {"answer": True, "reason": "test"}, + "instruction_or_question": {"answer": "Stalling", "reason": "test"}, + "next_speaker": {"answer": "agent_0", "reason": "test"}, + } + ) + ) + assert "agent_0" in str(error_info.value) + + with pytest.raises(ValueError) as error_info: + await mock_response(json.dumps('{"is_request_satisfied":}')) + assert "invalid ledger format" in str(error_info.value)