diff --git a/examples/basic/simple_gpt_5.py b/examples/basic/simple_gpt_5.py new file mode 100644 index 000000000..0bf4b4dc8 --- /dev/null +++ b/examples/basic/simple_gpt_5.py @@ -0,0 +1,30 @@ +import asyncio + +from openai.types.shared import Reasoning + +from agents import Agent, ModelSettings, Runner + +# If you have a certain reason to use Chat Completions, you can configure the model this way, +# and then you can pass the chat_completions_model to the Agent constructor. +# from openai import AsyncOpenAI +# client = AsyncOpenAI() +# from agents import OpenAIChatCompletionsModel +# chat_completions_model = OpenAIChatCompletionsModel(model="gpt-5", openai_client=client) + + +async def main(): + agent = Agent( + name="Knowledgable GPT-5 Assistant", + instructions="You're a knowledgable assistant. You always provide an interesting answer.", + model="gpt-5", + model_settings=ModelSettings( + reasoning=Reasoning(effort="minimal"), # "minimal", "low", "medium", "high" + verbosity="low", # "low", "medium", "high" + ), + ) + result = await Runner.run(agent, "Tell me something about recursion in programming.") + print(result.final_output) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/src/agents/model_settings.py b/src/agents/model_settings.py index 71e66ed84..039030314 100644 --- a/src/agents/model_settings.py +++ b/src/agents/model_settings.py @@ -102,6 +102,10 @@ class ModelSettings: [reasoning models](https://platform.openai.com/docs/guides/reasoning). """ + verbosity: Literal["low", "medium", "high"] | None = None + """Constrains the verbosity of the model's response. + """ + metadata: dict[str, str] | None = None """Metadata to include with the model response call.""" diff --git a/src/agents/models/openai_chatcompletions.py b/src/agents/models/openai_chatcompletions.py index 292636cab..c6d1d7d22 100644 --- a/src/agents/models/openai_chatcompletions.py +++ b/src/agents/models/openai_chatcompletions.py @@ -287,6 +287,7 @@ async def _fetch_response( stream_options=self._non_null_or_not_given(stream_options), store=self._non_null_or_not_given(store), reasoning_effort=self._non_null_or_not_given(reasoning_effort), + verbosity=self._non_null_or_not_given(model_settings.verbosity), top_logprobs=self._non_null_or_not_given(model_settings.top_logprobs), extra_headers={**HEADERS, **(model_settings.extra_headers or {})}, extra_query=model_settings.extra_query, diff --git a/src/agents/models/openai_responses.py b/src/agents/models/openai_responses.py index 50b2bed1a..4352c99c7 100644 --- a/src/agents/models/openai_responses.py +++ b/src/agents/models/openai_responses.py @@ -270,6 +270,11 @@ async def _fetch_response( extra_args = dict(model_settings.extra_args or {}) if model_settings.top_logprobs is not None: extra_args["top_logprobs"] = model_settings.top_logprobs + if model_settings.verbosity is not None: + if response_format != NOT_GIVEN: + response_format["verbosity"] = model_settings.verbosity # type: ignore [index] + else: + response_format = {"verbosity": model_settings.verbosity} return await self._client.responses.create( previous_response_id=self._non_null_or_not_given(previous_response_id), diff --git a/tests/model_settings/test_serialization.py b/tests/model_settings/test_serialization.py index 16def4cad..f099a1a31 100644 --- a/tests/model_settings/test_serialization.py +++ b/tests/model_settings/test_serialization.py @@ -59,6 +59,7 @@ def test_all_fields_serialization() -> None: include_usage=False, response_include=["reasoning.encrypted_content"], top_logprobs=1, + verbosity="low", extra_query={"foo": "bar"}, extra_body={"foo": "bar"}, extra_headers={"foo": "bar"},