-
Notifications
You must be signed in to change notification settings - Fork 0
Normalize TeamManager task results before wrapping #9
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Normalize TeamManager task results before wrapping #9
Conversation
|
Important Review skippedAuto reviews are disabled on base/target branches other than the default branch. Please check the settings in the CodeRabbit UI or the You can disable this status message by setting the ✨ Finishing touches🧪 Generate unit tests (beta)✅ Copyable Unit Test edits generated.
Comment |
|
Note Unit test generation is an Early Access feature. Expect some limitations and changes as we gather feedback and continue to improve it. Generating unit tests... This may take up to 20 minutes. |
2 similar comments
|
Note Unit test generation is an Early Access feature. Expect some limitations and changes as we gather feedback and continue to improve it. Generating unit tests... This may take up to 20 minutes. |
|
Note Unit test generation is an Early Access feature. Expect some limitations and changes as we gather feedback and continue to improve it. Generating unit tests... This may take up to 20 minutes. |
|
Caution CodeRabbit will not generate a local commit with generated unit tests for a closed pull request. Try using copyable edits or generating a pull request. |
|
Note Unit test generation is an Early Access feature. Expect some limitations and changes as we gather feedback and continue to improve it. Generating unit tests... This may take up to 20 minutes. |
1 similar comment
|
Note Unit test generation is an Early Access feature. Expect some limitations and changes as we gather feedback and continue to improve it. Generating unit tests... This may take up to 20 minutes. |
|
Caution CodeRabbit will not generate a local commit with generated unit tests for a closed pull request. Try using copyable edits or generating a pull request. |
|
Caution The CodeRabbit agent failed during execution: Server error: no LLM provider could handle the message |
|
Here are the copyable unit test edits: Copyable Editscoderabbit.markdownlint-cli2.jsoncThis is a new file. python/packages/autogen-studio/tests/test_team_manager_additional.py@@ -286,4 +286,502 @@
team_manager = TeamManager()
with pytest.raises(ValueError, match="Unsupported team_config type"):
- await team_manager._create_team(12345) # Invalid type
+ await team_manager._create_team(12345) # Invalid type
+
+class TestNormalizeTaskResult:
+ """Comprehensive tests for the _normalize_task_result static method"""
+
+ @pytest.mark.asyncio
+ async def test_normalize_task_result_with_valid_task_result(self):
+ """Test that a valid TaskResult is returned as-is"""
+ from autogen_agentchat.base import TaskResult
+ from autogen_agentchat.messages import TextMessage
+
+ original_message = TextMessage(content="Test", source="agent1")
+ original_result = TaskResult(messages=[original_message], stop_reason="completed")
+
+ normalized = TeamManager._normalize_task_result(original_result)
+
+ assert normalized is original_result
+ assert len(normalized.messages) == 1
+ assert normalized.stop_reason == "completed"
+
+ @pytest.mark.asyncio
+ async def test_normalize_task_result_with_valid_dict(self):
+ """Test normalization from a valid dict representation"""
+ from autogen_agentchat.messages import TextMessage
+
+ result_dict = {
+ "messages": [],
+ "stop_reason": "max_turns"
+ }
+
+ normalized = TeamManager._normalize_task_result(result_dict)
+
+ from autogen_agentchat.base import TaskResult
+ assert isinstance(normalized, TaskResult)
+ assert normalized.stop_reason == "max_turns"
+ assert len(normalized.messages) == 0
+
+ @pytest.mark.asyncio
+ async def test_normalize_task_result_with_invalid_dict(self):
+ """Test normalization with dict that can't be validated as TaskResult"""
+ result_dict = {
+ "invalid_field": "value",
+ "another_invalid": 123
+ }
+
+ normalized = TeamManager._normalize_task_result(result_dict)
+
+ from autogen_agentchat.base import TaskResult
+ assert isinstance(normalized, TaskResult)
+ # Should fall back to extracting what it can
+ assert isinstance(normalized.messages, list)
+
+ @pytest.mark.asyncio
+ async def test_normalize_task_result_with_object_having_messages(self):
+ """Test normalization from object with messages attribute"""
+ from autogen_agentchat.messages import TextMessage
+
+ class MockResult:
+ def __init__(self):
+ self.messages = [
+ TextMessage(content="msg1", source="agent1"),
+ TextMessage(content="msg2", source="agent2"),
+ ]
+ self.stop_reason = "test_stop"
+
+ mock_result = MockResult()
+ normalized = TeamManager._normalize_task_result(mock_result)
+
+ from autogen_agentchat.base import TaskResult
+ assert isinstance(normalized, TaskResult)
+ assert len(normalized.messages) == 2
+ assert normalized.stop_reason == "test_stop"
+
+ @pytest.mark.asyncio
+ async def test_normalize_task_result_filters_invalid_messages(self):
+ """Test that invalid messages are filtered out"""
+ from autogen_agentchat.messages import TextMessage
+
+ class MockResult:
+ def __init__(self):
+ self.messages = [
+ TextMessage(content="valid", source="agent1"),
+ "invalid_string_message",
+ 123,
+ {"invalid": "dict"},
+ TextMessage(content="valid2", source="agent2"),
+ ]
+ self.stop_reason = "filtered"
+
+ mock_result = MockResult()
+ normalized = TeamManager._normalize_task_result(mock_result)
+
+ # Only valid messages should remain
+ assert len(normalized.messages) == 2
+ assert all(hasattr(msg, "content") for msg in normalized.messages)
+
+ @pytest.mark.asyncio
+ async def test_normalize_task_result_with_non_string_stop_reason(self):
+ """Test that non-string stop_reason is converted to string"""
+ class MockResult:
+ def __init__(self):
+ self.messages = []
+ self.stop_reason = 12345 # Not a string
+
+ mock_result = MockResult()
+ normalized = TeamManager._normalize_task_result(mock_result)
+
+ assert normalized.stop_reason == "12345"
+ assert isinstance(normalized.stop_reason, str)
+
+ @pytest.mark.asyncio
+ async def test_normalize_task_result_with_none_stop_reason(self):
+ """Test that None stop_reason is handled correctly"""
+ class MockResult:
+ def __init__(self):
+ self.messages = []
+ self.stop_reason = None
+
+ mock_result = MockResult()
+ normalized = TeamManager._normalize_task_result(mock_result)
+
+ assert normalized.stop_reason is None
+
+ @pytest.mark.asyncio
+ async def test_normalize_task_result_with_string_messages_attribute(self):
+ """Test that string messages attribute (not a sequence) is ignored"""
+ class MockResult:
+ def __init__(self):
+ self.messages = "this is a string, not a list"
+ self.stop_reason = "test"
+
+ mock_result = MockResult()
+ normalized = TeamManager._normalize_task_result(mock_result)
+
+ # Should result in empty messages since string is not a valid sequence
+ assert len(normalized.messages) == 0
+
+ @pytest.mark.asyncio
+ async def test_normalize_task_result_with_bytes_messages_attribute(self):
+ """Test that bytes messages attribute is ignored"""
+ class MockResult:
+ def __init__(self):
+ self.messages = b"bytes data"
+ self.stop_reason = "test"
+
+ mock_result = MockResult()
+ normalized = TeamManager._normalize_task_result(mock_result)
+
+ # Bytes should be ignored, resulting in empty messages
+ assert len(normalized.messages) == 0
+
+ @pytest.mark.asyncio
+ async def test_normalize_task_result_without_messages_attribute(self):
+ """Test normalization when object has no messages attribute"""
+ class MockResult:
+ def __init__(self):
+ self.stop_reason = "no_messages"
+
+ mock_result = MockResult()
+ normalized = TeamManager._normalize_task_result(mock_result)
+
+ assert len(normalized.messages) == 0
+ assert normalized.stop_reason == "no_messages"
+
+ @pytest.mark.asyncio
+ async def test_normalize_task_result_with_empty_messages_list(self):
+ """Test normalization with empty messages list"""
+ class MockResult:
+ def __init__(self):
+ self.messages = []
+ self.stop_reason = "empty"
+
+ mock_result = MockResult()
+ normalized = TeamManager._normalize_task_result(mock_result)
+
+ assert len(normalized.messages) == 0
+ assert normalized.stop_reason == "empty"
+
+ @pytest.mark.asyncio
+ async def test_normalize_task_result_falls_back_on_validation_error(self):
+ """Test that validation errors result in fallback to empty TaskResult"""
+ class ProblematicResult:
+ def __init__(self):
+ # This will cause issues during TaskResult creation
+ self.messages = [object()] # Invalid message type
+ self.stop_reason = None
+
+ problematic = ProblematicResult()
+ normalized = TeamManager._normalize_task_result(problematic)
+
+ from autogen_agentchat.base import TaskResult
+ assert isinstance(normalized, TaskResult)
+ # Should fall back to empty result
+ assert len(normalized.messages) == 0
+
+ @pytest.mark.asyncio
+ async def test_normalize_task_result_with_complex_stop_reason_object(self):
+ """Test that complex objects as stop_reason are converted to string"""
+ class CustomStopReason:
+ def __str__(self):
+ return "custom_stop_representation"
+
+ class MockResult:
+ def __init__(self):
+ self.messages = []
+ self.stop_reason = CustomStopReason()
+
+ mock_result = MockResult()
+ normalized = TeamManager._normalize_task_result(mock_result)
+
+ assert normalized.stop_reason == "custom_stop_representation"
+
+ @pytest.mark.asyncio
+ async def test_normalize_task_result_with_mixed_valid_invalid_messages(self):
+ """Test filtering with mixed valid BaseAgentEvent and BaseChatMessage"""
+ from autogen_agentchat.messages import TextMessage
+
+ class MockAgentEvent:
+ """Mock representing BaseAgentEvent"""
+ pass
+
+ # We need to use actual message types since the code checks isinstance
+ class MockResult:
+ def __init__(self):
+ self.messages = [
+ TextMessage(content="msg1", source="agent1"),
+ None, # Invalid
+ TextMessage(content="msg2", source="agent2"),
+ ]
+ self.stop_reason = "mixed"
+
+ mock_result = MockResult()
+ normalized = TeamManager._normalize_task_result(mock_result)
+
+ # Should only keep valid messages
+ assert len(normalized.messages) == 2
+
+
+class TestTeamManagerRunWithNormalization:
+ """Test that run and run_stream properly use _normalize_task_result"""
+
+ @pytest.mark.asyncio
+ async def test_run_normalizes_non_task_result_output(self, sample_config):
+ """Test that run() normalizes non-TaskResult outputs"""
+ team_manager = TeamManager()
+
+ # Create a mock result that's not a TaskResult
+ class MockTeamResult:
+ def __init__(self):
+ from autogen_agentchat.messages import TextMessage
+ self.messages = [TextMessage(content="test", source="agent")]
+ self.stop_reason = "done"
+
+ with patch.object(team_manager, "_create_team") as mock_create:
+ mock_team = MagicMock()
+
+ async def mock_run(*args, **kwargs):
+ return MockTeamResult()
+
+ mock_team.run = mock_run
+ mock_team._participants = []
+ mock_create.return_value = mock_team
+
+ result = await team_manager.run(task="test", team_config=sample_config)
+
+ assert isinstance(result, TeamResult)
+ from autogen_agentchat.base import TaskResult
+ assert isinstance(result.task_result, TaskResult)
+ assert len(result.task_result.messages) == 1
+
+ @pytest.mark.asyncio
+ async def test_run_stream_normalizes_task_result_messages(self, sample_config):
+ """Test that run_stream() normalizes TaskResult messages"""
+ team_manager = TeamManager()
+
+ from autogen_agentchat.base import TaskResult
+ from autogen_agentchat.messages import TextMessage
+
+ # Create a mock TaskResult
+ mock_task_result = TaskResult(
+ messages=[TextMessage(content="stream_test", source="agent")],
+ stop_reason="completed"
+ )
+
+ with patch.object(team_manager, "_create_team") as mock_create:
+ mock_team = MagicMock()
+
+ async def mock_run_stream(*args, **kwargs):
+ yield TextMessage(content="msg1", source="agent1")
+ yield mock_task_result
+
+ mock_team.run_stream = mock_run_stream
+ mock_team._participants = []
+ mock_create.return_value = mock_team
+
+ messages = []
+ async for message in team_manager.run_stream(task="test", team_config=sample_config):
+ messages.append(message)
+
+ # Should have text message and TeamResult
+ assert len(messages) >= 2
+ # Last message should be TeamResult with normalized task_result
+ assert isinstance(messages[-1], TeamResult)
+ assert isinstance(messages[-1].task_result, TaskResult)
+
+ @pytest.mark.asyncio
+ async def test_run_preserves_already_normalized_result(self, sample_config):
+ """Test that run() doesn't double-normalize already valid TaskResults"""
+ team_manager = TeamManager()
+
+ from autogen_agentchat.base import TaskResult
+ from autogen_agentchat.messages import TextMessage
+
+ original_task_result = TaskResult(
+ messages=[TextMessage(content="original", source="agent")],
+ stop_reason="original_stop"
+ )
+
+ with patch.object(team_manager, "_create_team") as mock_create:
+ mock_team = MagicMock()
+
+ async def mock_run(*args, **kwargs):
+ return original_task_result
+
+ mock_team.run = mock_run
+ mock_team._participants = []
+ mock_create.return_value = mock_team
+
+ result = await team_manager.run(task="test", team_config=sample_config)
+
+ # Should be the same object since it's already a TaskResult
+ assert result.task_result is original_task_result
+
+
+class TestWebAppRoutesRemoval:
+ """Test that removed routes are no longer included in the app"""
+
+ def test_analytics_routes_not_included(self):
+ """Test that analytics routes are not included in app"""
+ # Import the app module
+ from autogenstudio.web import app as app_module
+
+ # Check that analytics import doesn't exist in the routes
+ with open("python/packages/autogen-studio/autogenstudio/web/app.py") as f:
+ app_content = f.read()
+ assert "from .routes import" in app_content
+ # Should not import analytics, export, or streaming
+ assert "analytics" not in app_content.split("from .routes import")[1].split("\n")[0]
+
+ def test_export_routes_not_included(self):
+ """Test that export routes are not included in app"""
+ with open("python/packages/autogen-studio/autogenstudio/web/app.py") as f:
+ app_content = f.read()
+ # Should not include export router
+ assert "export_routes.router" not in app_content
+ assert 'prefix="/export"' not in app_content
+
+ def test_streaming_routes_not_included(self):
+ """Test that streaming routes are not included in app"""
+ with open("python/packages/autogen-studio/autogenstudio/web/app.py") as f:
+ app_content = f.read()
+ # Should not include streaming router
+ assert "streaming.router" not in app_content
+ assert 'prefix="/streaming"' not in app_content
+
+ def test_routes_init_is_empty(self):
+ """Test that routes __init__.py is empty"""
+ with open("python/packages/autogen-studio/autogenstudio/web/routes/__init__.py") as f:
+ content = f.read().strip()
+ # Should be empty or only contain whitespace/comments
+ assert len(content) == 0 or content.startswith("#")
+
+
+# Additional edge case tests for Docker executor property removal
+class TestDockerExecutorPropertyRemoval:
+ """Test that the removed delete_tmp_files property doesn't break functionality"""
+
+ @pytest.mark.asyncio
+ async def test_docker_executor_without_delete_tmp_files_property(self):
+ """Test that executor works without the delete_tmp_files property"""
+ from autogen_ext.code_executors.docker import DockerCommandLineCodeExecutorConfig
+
+ # The property was removed but the config still has the field
+ config = DockerCommandLineCodeExecutorConfig(
+ image="python:3-slim",
+ timeout=60,
+ delete_tmp_files=True
+ )
+
+ # Should be able to create config with delete_tmp_files
+ assert config.delete_tmp_files is True
+
+ # Create another with False
+ config2 = DockerCommandLineCodeExecutorConfig(
+ image="python:3-slim",
+ timeout=60,
+ delete_tmp_files=False
+ )
+ assert config2.delete_tmp_files is False
+
+ @pytest.mark.asyncio
+ async def test_docker_executor_config_serialization_still_works(self):
+ """Test that config serialization works without the property"""
+ from autogen_ext.code_executors.docker import (
+ DockerCommandLineCodeExecutor,
+ DockerCommandLineCodeExecutorConfig
+ )
+
+ config = DockerCommandLineCodeExecutorConfig(
+ image="python:3.11-slim",
+ timeout=90,
+ delete_tmp_files=True
+ )
+
+ # Should be able to create executor from config
+ executor = DockerCommandLineCodeExecutor._from_config(config)
+
+ # Should be able to serialize back to config
+ new_config = executor._to_config()
+
+ # delete_tmp_files should still be in the config
+ assert hasattr(new_config, "delete_tmp_files")
+ assert new_config.delete_tmp_files == config.delete_tmp_files
+
+
+# Integration-style tests for complete workflows
+class TestTeamManagerIntegrationWithNormalization:
+ """Integration tests for TeamManager with normalization"""
+
+ @pytest.mark.asyncio
+ async def test_complete_run_workflow_with_normalization(self, sample_config):
+ """Test complete run workflow ensuring normalization happens"""
+ team_manager = TeamManager()
+
+ # Simulate a complete workflow
+ class ComplexResult:
+ def __init__(self):
+ from autogen_agentchat.messages import TextMessage
+ self.messages = [
+ TextMessage(content="Step 1", source="agent1"),
+ TextMessage(content="Step 2", source="agent2"),
+ "invalid", # This should be filtered
+ ]
+ self.stop_reason = 999 # Should be converted to string
+
+ with patch.object(team_manager, "_create_team") as mock_create:
+ mock_team = MagicMock()
+
+ async def mock_run(*args, **kwargs):
+ return ComplexResult()
+
+ mock_team.run = mock_run
+ mock_team._participants = []
+ mock_create.return_value = mock_team
+
+ result = await team_manager.run(task="complex task", team_config=sample_config)
+
+ # Verify normalization happened
+ assert isinstance(result, TeamResult)
+ assert len(result.task_result.messages) == 2 # Invalid filtered
+ assert result.task_result.stop_reason == "999" # Converted to string
+ assert result.duration >= 0
+
+ @pytest.mark.asyncio
+ async def test_run_stream_workflow_with_multiple_message_types(self, sample_config):
+ """Test run_stream with various message types and normalization"""
+ team_manager = TeamManager()
+
+ from autogen_agentchat.messages import TextMessage
+ from autogen_agentchat.base import TaskResult
+
+ class CustomResult:
+ def __init__(self):
+ self.messages = [TextMessage(content="final", source="agent")]
+ self.stop_reason = "custom_end"
+
+ with patch.object(team_manager, "_create_team") as mock_create:
+ mock_team = MagicMock()
+
+ async def mock_run_stream(*args, **kwargs):
+ yield TextMessage(content="msg1", source="a1")
+ yield TextMessage(content="msg2", source="a2")
+ yield CustomResult() # Non-TaskResult that needs normalization
+
+ mock_team.run_stream = mock_run_stream
+ mock_team._participants = []
+ mock_create.return_value = mock_team
+
+ messages = []
+ async for message in team_manager.run_stream(task="stream test", team_config=sample_config):
+ messages.append(message)
+
+ # Should have 2 text messages + 1 TeamResult
+ assert len(messages) == 3
+ assert isinstance(messages[0], TextMessage)
+ assert isinstance(messages[1], TextMessage)
+ assert isinstance(messages[2], TeamResult)
+ # The CustomResult should be normalized
+ assert len(messages[2].task_result.messages) == 1python/packages/autogen-studio/tests/test_web_app_routes.pyThis is a new file. ruff.tomlThis is a new file. TEST_GENERATION_SUMMARY.mdThis is a new file. |
|
Here are the copyable unit test edits: Copyable Editscoderabbit.markdownlint-cli2.jsoncThis is a new file. COMPREHENSIVE_TEST_REPORT.mdThis is a new file. python/packages/autogen-ext/tests/code_executors/test_docker_commandline_code_executor.py@@ -636,4 +636,72 @@
assert new_config.extra_volumes == test_config.extra_volumes
assert new_config.extra_hosts == test_config.extra_hosts
assert new_config.init_command == test_config.init_command
- assert new_config.delete_tmp_files == test_config.delete_tmp_files
+ assert new_config.delete_tmp_files == test_config.delete_tmp_files
+
+@pytest.mark.asyncio
+async def test_delete_tmp_files_property_removed() -> None:
+ """Test that delete_tmp_files property is no longer accessible"""
+ if not docker_tests_enabled():
+ pytest.skip("Docker tests are disabled")
+
+ with tempfile.TemporaryDirectory() as temp_dir:
+ async with DockerCommandLineCodeExecutor(
+ work_dir=temp_dir,
+ delete_tmp_files=True
+ ) as executor:
+ # Verify the property no longer exists
+ assert not hasattr(executor, 'delete_tmp_files')
+
+ # Verify accessing it raises AttributeError
+ with pytest.raises(AttributeError):
+ _ = executor.delete_tmp_files
+
+
+@pytest.mark.asyncio
+async def test_docker_executor_still_respects_delete_tmp_files_config() -> None:
+ """Test that delete_tmp_files config parameter still works internally"""
+ if not docker_tests_enabled():
+ pytest.skip("Docker tests are disabled")
+
+ with tempfile.TemporaryDirectory() as temp_dir:
+ # Test with delete_tmp_files=True in constructor
+ async with DockerCommandLineCodeExecutor(
+ work_dir=temp_dir,
+ delete_tmp_files=True
+ ) as executor:
+ code = """
+import os
+print("Temp files test")
+"""
+ code_blocks = [CodeBlock(code=code, language="python")]
+ result = await executor.execute_code_blocks(
+ code_blocks,
+ cancellation_token=CancellationToken()
+ )
+
+ # Should execute successfully even though property is removed
+ assert result.exit_code == 0
+ assert "Temp files test" in result.output
+
+
+@pytest.mark.asyncio
+async def test_docker_executor_config_delete_tmp_files_accessible() -> None:
+ """Test that delete_tmp_files is still accessible via config"""
+ if not docker_tests_enabled():
+ pytest.skip("Docker tests are disabled")
+
+ config = DockerCommandLineCodeExecutorConfig(
+ image="python:3-slim",
+ delete_tmp_files=True
+ )
+
+ # Config should still have the attribute
+ assert hasattr(config, 'delete_tmp_files')
+ assert config.delete_tmp_files is True
+
+ # Test with False
+ config2 = DockerCommandLineCodeExecutorConfig(
+ image="python:3-slim",
+ delete_tmp_files=False
+ )
+ assert config2.delete_tmp_files is Falsepython/packages/autogen-studio/tests/test_removed_app_routes.pyThis is a new file. python/packages/autogen-studio/tests/test_team_manager_additional.py@@ -10,6 +10,7 @@
from autogenstudio.teammanager import TeamManager
from autogenstudio.datamodel.types import TeamResult, EnvironmentVariable
from autogen_core import CancellationToken
+from autogen_agentchat.base import TaskResult
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_ext.models.openai import OpenAIChatCompletionClient
@@ -153,7 +154,6 @@
mock_team = MagicMock()
# Create mock messages of different types
- from autogen_agentchat.base import TaskResult
from autogen_agentchat.messages import TextMessage
mock_text_message = TextMessage(content="Test message", source="agent")
@@ -286,4 +286,371 @@
team_manager = TeamManager()
with pytest.raises(ValueError, match="Unsupported team_config type"):
- await team_manager._create_team(12345) # Invalid type
+ await team_manager._create_team(12345) # Invalid type
+
+class TestNormalizeTaskResult:
+ """Comprehensive tests for the _normalize_task_result static method"""
+
+ def test_normalize_task_result_already_taskresult(self):
+ """Test that TaskResult instances are returned as-is"""
+ from autogen_agentchat.messages import TextMessage
+
+ msg = TextMessage(content="test", source="agent")
+ original_result = TaskResult(messages=[msg], stop_reason="completed")
+
+ normalized = TeamManager._normalize_task_result(original_result)
+
+ assert normalized is original_result
+ assert normalized.stop_reason == "completed"
+ assert len(normalized.messages) == 1
+
+ def test_normalize_task_result_from_valid_dict(self):
+ """Test normalization from a valid dict that matches TaskResult schema"""
+ from autogen_agentchat.messages import TextMessage
+
+ msg = TextMessage(content="test", source="agent")
+ result_dict = {
+ "messages": [msg],
+ "stop_reason": "max_turns"
+ }
+
+ normalized = TeamManager._normalize_task_result(result_dict)
+
+ assert isinstance(normalized, TaskResult)
+ assert len(normalized.messages) == 1
+ assert normalized.stop_reason == "max_turns"
+
+ def test_normalize_task_result_from_invalid_dict(self):
+ """Test normalization from dict that fails validation"""
+ # Dict that doesn't match TaskResult schema
+ invalid_dict = {
+ "some_field": "value",
+ "another_field": 123
+ }
+
+ normalized = TeamManager._normalize_task_result(invalid_dict)
+
+ # Should fall back to creating TaskResult with messages from object attributes
+ assert isinstance(normalized, TaskResult)
+ assert len(normalized.messages) == 0
+
+ def test_normalize_task_result_with_messages_attribute(self):
+ """Test normalization from object with messages attribute"""
+ from autogen_agentchat.messages import TextMessage
+
+ class MockResult:
+ def __init__(self):
+ self.messages = [
+ TextMessage(content="msg1", source="agent1"),
+ TextMessage(content="msg2", source="agent2")
+ ]
+ self.stop_reason = "completed"
+
+ mock_result = MockResult()
+ normalized = TeamManager._normalize_task_result(mock_result)
+
+ assert isinstance(normalized, TaskResult)
+ assert len(normalized.messages) == 2
+ assert normalized.stop_reason == "completed"
+
+ def test_normalize_task_result_filters_invalid_messages(self):
+ """Test that non-message objects are filtered out"""
+ from autogen_agentchat.messages import TextMessage
+
+ class MockResult:
+ def __init__(self):
+ self.messages = [
+ TextMessage(content="valid", source="agent"),
+ "invalid_string",
+ 123,
+ {"invalid": "dict"},
+ TextMessage(content="also_valid", source="agent2")
+ ]
+ self.stop_reason = None
+
+ mock_result = MockResult()
+ normalized = TeamManager._normalize_task_result(mock_result)
+
+ assert isinstance(normalized, TaskResult)
+ # Should only have the 2 valid TextMessage objects
+ assert len(normalized.messages) == 2
+
+ def test_normalize_task_result_string_messages_ignored(self):
+ """Test that string sequences are not treated as message lists"""
+ class MockResult:
+ def __init__(self):
+ self.messages = "This is a string, not a list"
+ self.stop_reason = None
+
+ mock_result = MockResult()
+ normalized = TeamManager._normalize_task_result(mock_result)
+
+ # String should be ignored (it's a Sequence but excluded)
+ assert isinstance(normalized, TaskResult)
+ assert len(normalized.messages) == 0
+
+ def test_normalize_task_result_bytes_messages_ignored(self):
+ """Test that bytes sequences are not treated as message lists"""
+ class MockResult:
+ def __init__(self):
+ self.messages = b"bytes data"
+ self.stop_reason = None
+
+ mock_result = MockResult()
+ normalized = TeamManager._normalize_task_result(mock_result)
+
+ # Bytes should be ignored (it's a Sequence but excluded)
+ assert isinstance(normalized, TaskResult)
+ assert len(normalized.messages) == 0
+
+ def test_normalize_task_result_no_messages_attribute(self):
+ """Test normalization when object has no messages attribute"""
+ class MockResult:
+ def __init__(self):
+ self.stop_reason = "completed"
+
+ mock_result = MockResult()
+ normalized = TeamManager._normalize_task_result(mock_result)
+
+ assert isinstance(normalized, TaskResult)
+ assert len(normalized.messages) == 0
+ assert normalized.stop_reason == "completed"
+
+ def test_normalize_task_result_non_string_stop_reason(self):
+ """Test that non-string stop_reason is converted to string"""
+ class MockResult:
+ def __init__(self):
+ self.messages = []
+ self.stop_reason = 12345 # Integer stop reason
+
+ mock_result = MockResult()
+ normalized = TeamManager._normalize_task_result(mock_result)
+
+ assert isinstance(normalized, TaskResult)
+ assert normalized.stop_reason == "12345"
+ assert isinstance(normalized.stop_reason, str)
+
+ def test_normalize_task_result_none_stop_reason(self):
+ """Test that None stop_reason is preserved as None"""
+ class MockResult:
+ def __init__(self):
+ self.messages = []
+ self.stop_reason = None
+
+ mock_result = MockResult()
+ normalized = TeamManager._normalize_task_result(mock_result)
+
+ assert isinstance(normalized, TaskResult)
+ assert normalized.stop_reason is None
+
+ def test_normalize_task_result_no_stop_reason_attribute(self):
+ """Test normalization when object has no stop_reason attribute"""
+ from autogen_agentchat.messages import TextMessage
+
+ class MockResult:
+ def __init__(self):
+ self.messages = [TextMessage(content="msg", source="agent")]
+
+ mock_result = MockResult()
+ normalized = TeamManager._normalize_task_result(mock_result)
+
+ assert isinstance(normalized, TaskResult)
+ assert normalized.stop_reason is None
+
+ def test_normalize_task_result_fallback_to_empty(self):
+ """Test fallback when TaskResult construction fails"""
+ # Create an object that will cause validation to fail
+ class ProblematicResult:
+ def __init__(self):
+ # Create a messages list that looks valid but might cause issues
+ self.messages = None
+ self.stop_reason = None
+
+ result = ProblematicResult()
+ normalized = TeamManager._normalize_task_result(result)
+
+ # Should fall back to empty TaskResult
+ assert isinstance(normalized, TaskResult)
+ assert len(normalized.messages) == 0
+
+ def test_normalize_task_result_with_base_agent_events(self):
+ """Test normalization with BaseAgentEvent objects"""
+ from autogen_agentchat.messages import TextMessage
+
+ class MockResult:
+ def __init__(self):
+ # Mix of message types
+ self.messages = [
+ TextMessage(content="text", source="agent"),
+ ]
+ self.stop_reason = "test"
+
+ mock_result = MockResult()
+ normalized = TeamManager._normalize_task_result(mock_result)
+
+ assert isinstance(normalized, TaskResult)
+ assert len(normalized.messages) >= 1
+
+ def test_normalize_task_result_empty_messages_list(self):
+ """Test normalization with empty messages list"""
+ class MockResult:
+ def __init__(self):
+ self.messages = []
+ self.stop_reason = "no_messages"
+
+ mock_result = MockResult()
+ normalized = TeamManager._normalize_task_result(mock_result)
+
+ assert isinstance(normalized, TaskResult)
+ assert len(normalized.messages) == 0
+ assert normalized.stop_reason == "no_messages"
+
+ def test_normalize_task_result_tuple_messages(self):
+ """Test normalization with messages as tuple (also a Sequence)"""
+ from autogen_agentchat.messages import TextMessage
+
+ class MockResult:
+ def __init__(self):
+ self.messages = (
+ TextMessage(content="msg1", source="agent1"),
+ TextMessage(content="msg2", source="agent2"),
+ )
+ self.stop_reason = "tuple_test"
+
+ mock_result = MockResult()
+ normalized = TeamManager._normalize_task_result(mock_result)
+
+ assert isinstance(normalized, TaskResult)
+ assert len(normalized.messages) == 2
+
+ def test_normalize_task_result_complex_stop_reason_object(self):
+ """Test normalization with complex object as stop_reason"""
+ class StopReasonObj:
+ def __str__(self):
+ return "custom_stop_reason"
+
+ class MockResult:
+ def __init__(self):
+ self.messages = []
+ self.stop_reason = StopReasonObj()
+
+ mock_result = MockResult()
+ normalized = TeamManager._normalize_task_result(mock_result)
+
+ assert isinstance(normalized, TaskResult)
+ assert normalized.stop_reason == "custom_stop_reason"
+
+ def test_normalize_task_result_integration_with_run(self):
+ """Test _normalize_task_result integration in run method"""
+ team_manager = TeamManager()
+
+ with patch.object(team_manager, "_create_team") as mock_create:
+ mock_team = MagicMock()
+
+ # Return a non-TaskResult object
+ class CustomResult:
+ def __init__(self):
+ self.messages = []
+ self.stop_reason = "custom"
+
+ async def mock_run(*args, **kwargs):
+ return CustomResult()
+
+ mock_team.run = mock_run
+ mock_create.return_value = mock_team
+
+ import asyncio
+ result = asyncio.run(team_manager.run(
+ task="test",
+ team_config={"test": "config"}
+ ))
+
+ # Result should be normalized to TeamResult
+ assert isinstance(result, TeamResult)
+ assert hasattr(result, "task_result")
+ assert hasattr(result, "duration")
+
+ def test_normalize_task_result_with_dict_empty_messages(self):
+ """Test dict normalization with empty messages"""
+ result_dict = {
+ "messages": [],
+ "stop_reason": "early_stop"
+ }
+
+ normalized = TeamManager._normalize_task_result(result_dict)
+
+ assert isinstance(normalized, TaskResult)
+ assert len(normalized.messages) == 0
+ assert normalized.stop_reason == "early_stop"
+
+
+class TestTeamManagerRunStreamNormalization:
+ """Test _normalize_task_result integration with run_stream"""
+
+ @pytest.mark.asyncio
+ async def test_run_stream_normalizes_task_result(self):
+ """Test that run_stream normalizes TaskResult messages"""
+ from autogen_agentchat.messages import TextMessage
+
+ team_manager = TeamManager()
+
+ with patch.object(team_manager, "_create_team") as mock_create:
+ mock_team = MagicMock()
+
+ # Create a custom result that needs normalization
+ class CustomTaskResult:
+ def __init__(self):
+ self.messages = [TextMessage(content="test", source="agent")]
+ self.stop_reason = 999 # Non-string
+
+ custom_result = CustomTaskResult()
+
+ async def mock_run_stream(*args, **kwargs):
+ yield TextMessage(content="streaming", source="agent")
+ yield custom_result
+
+ mock_team.run_stream = mock_run_stream
+ mock_create.return_value = mock_team
+
+ messages = []
+ async for message in team_manager.run_stream(
+ task="test",
+ team_config={"test": "config"}
+ ):
+ messages.append(message)
+
+ # Last message should be TeamResult with normalized task_result
+ assert len(messages) >= 2
+ last_msg = messages[-1]
+ assert isinstance(last_msg, TeamResult)
+
+ @pytest.mark.asyncio
+ async def test_run_stream_handles_taskresult_directly(self):
+ """Test run_stream when TaskResult is already proper type"""
+ from autogen_agentchat.messages import TextMessage
+
+ team_manager = TeamManager()
+
+ with patch.object(team_manager, "_create_team") as mock_create:
+ mock_team = MagicMock()
+
+ proper_result = TaskResult(
+ messages=[TextMessage(content="done", source="agent")],
+ stop_reason="completed"
+ )
+
+ async def mock_run_stream(*args, **kwargs):
+ yield proper_result
+
+ mock_team.run_stream = mock_run_stream
+ mock_create.return_value = mock_team
+
+ messages = []
+ async for message in team_manager.run_stream(
+ task="test",
+ team_config={"test": "config"}
+ ):
+ messages.append(message)
+
+ assert len(messages) == 1
+ assert isinstance(messages[0], TeamResult)ruff.tomlThis is a new file. TEST_GENERATION_SUMMARY.mdThis is a new file. |
Summary
TeamManagernormalizes results toTaskResultobjects before building aTeamResultTesting
https://chatgpt.com/codex/tasks/task_b_68f47dbb30e8832e8d5074a364dbb367