Skip to content

Commit ab61d45

Browse files
feat(api): add new text parameters, expiration options
1 parent 572fe98 commit ab61d45

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

49 files changed

+1188
-204
lines changed

.stats.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 109
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-9cadfad609f94f20ebf74fdc06a80302f1a324dc69700a309a8056aabca82fd2.yml
3-
openapi_spec_hash: 3eb8d86c06f0bb5e1190983e5acfc9ba
4-
config_hash: 68337b532875626269c304372a669f67
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-24be531010b354303d741fc9247c1f84f75978f9f7de68aca92cb4f240a04722.yml
3+
openapi_spec_hash: 3e46f439f6a863beadc71577eb4efa15
4+
config_hash: ed87b9139ac595a04a2162d754df2fed

lib/openai/models/batch_create_params.rb

Lines changed: 38 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,14 @@ class BatchCreateParams < OpenAI::Internal::Type::BaseModel
4848
# @return [Hash{Symbol=>String}, nil]
4949
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
5050

51-
# @!method initialize(completion_window:, endpoint:, input_file_id:, metadata: nil, request_options: {})
51+
# @!attribute output_expires_after
52+
# The expiration policy for the output and/or error file that are generated for a
53+
# batch.
54+
#
55+
# @return [OpenAI::Models::BatchCreateParams::OutputExpiresAfter, nil]
56+
optional :output_expires_after, -> { OpenAI::BatchCreateParams::OutputExpiresAfter }
57+
58+
# @!method initialize(completion_window:, endpoint:, input_file_id:, metadata: nil, output_expires_after: nil, request_options: {})
5259
# Some parameter documentations has been truncated, see
5360
# {OpenAI::Models::BatchCreateParams} for more details.
5461
#
@@ -60,6 +67,8 @@ class BatchCreateParams < OpenAI::Internal::Type::BaseModel
6067
#
6168
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
6269
#
70+
# @param output_expires_after [OpenAI::Models::BatchCreateParams::OutputExpiresAfter] The expiration policy for the output and/or error file that are generated for a
71+
#
6372
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
6473

6574
# The time frame within which the batch should be processed. Currently only `24h`
@@ -88,6 +97,34 @@ module Endpoint
8897
# @!method self.values
8998
# @return [Array<Symbol>]
9099
end
100+
101+
class OutputExpiresAfter < OpenAI::Internal::Type::BaseModel
102+
# @!attribute anchor
103+
# Anchor timestamp after which the expiration policy applies. Supported anchors:
104+
# `created_at`. Note that the anchor is the file creation time, not the time the
105+
# batch is created.
106+
#
107+
# @return [Symbol, :created_at]
108+
required :anchor, const: :created_at
109+
110+
# @!attribute seconds
111+
# The number of seconds after the anchor time that the file will expire. Must be
112+
# between 3600 (1 hour) and 2592000 (30 days).
113+
#
114+
# @return [Integer]
115+
required :seconds, Integer
116+
117+
# @!method initialize(seconds:, anchor: :created_at)
118+
# Some parameter documentations has been truncated, see
119+
# {OpenAI::Models::BatchCreateParams::OutputExpiresAfter} for more details.
120+
#
121+
# The expiration policy for the output and/or error file that are generated for a
122+
# batch.
123+
#
124+
# @param seconds [Integer] The number of seconds after the anchor time that the file will expire. Must be b
125+
#
126+
# @param anchor [Symbol, :created_at] Anchor timestamp after which the expiration policy applies. Supported anchors: `
127+
end
91128
end
92129
end
93130
end

lib/openai/models/beta/thread_create_and_run_params.rb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,7 @@ class ThreadCreateAndRunParams < OpenAI::Internal::Type::BaseModel
157157

158158
# @!attribute truncation_strategy
159159
# Controls for how a thread will be truncated prior to the run. Use this to
160-
# control the intial context window of the run.
160+
# control the initial context window of the run.
161161
#
162162
# @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy, nil]
163163
optional :truncation_strategy,
@@ -694,7 +694,7 @@ class TruncationStrategy < OpenAI::Internal::Type::BaseModel
694694
# details.
695695
#
696696
# Controls for how a thread will be truncated prior to the run. Use this to
697-
# control the intial context window of the run.
697+
# control the initial context window of the run.
698698
#
699699
# @param type [Symbol, OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type] The truncation strategy to use for the thread. The default is `auto`. If set to
700700
#

lib/openai/models/beta/threads/run.rb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -195,7 +195,7 @@ class Run < OpenAI::Internal::Type::BaseModel
195195

196196
# @!attribute truncation_strategy
197197
# Controls for how a thread will be truncated prior to the run. Use this to
198-
# control the intial context window of the run.
198+
# control the initial context window of the run.
199199
#
200200
# @return [OpenAI::Models::Beta::Threads::Run::TruncationStrategy, nil]
201201
required :truncation_strategy, -> { OpenAI::Beta::Threads::Run::TruncationStrategy }, nil?: true
@@ -415,7 +415,7 @@ class TruncationStrategy < OpenAI::Internal::Type::BaseModel
415415
# {OpenAI::Models::Beta::Threads::Run::TruncationStrategy} for more details.
416416
#
417417
# Controls for how a thread will be truncated prior to the run. Use this to
418-
# control the intial context window of the run.
418+
# control the initial context window of the run.
419419
#
420420
# @param type [Symbol, OpenAI::Models::Beta::Threads::Run::TruncationStrategy::Type] The truncation strategy to use for the thread. The default is `auto`. If set to
421421
#

lib/openai/models/beta/threads/run_create_params.rb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -184,7 +184,7 @@ class RunCreateParams < OpenAI::Internal::Type::BaseModel
184184

185185
# @!attribute truncation_strategy
186186
# Controls for how a thread will be truncated prior to the run. Use this to
187-
# control the intial context window of the run.
187+
# control the initial context window of the run.
188188
#
189189
# @return [OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy, nil]
190190
optional :truncation_strategy,
@@ -413,7 +413,7 @@ class TruncationStrategy < OpenAI::Internal::Type::BaseModel
413413
# details.
414414
#
415415
# Controls for how a thread will be truncated prior to the run. Use this to
416-
# control the intial context window of the run.
416+
# control the initial context window of the run.
417417
#
418418
# @param type [Symbol, OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy::Type] The truncation strategy to use for the thread. The default is `auto`. If set to
419419
#

lib/openai/models/chat/chat_completion.rb

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -47,9 +47,8 @@ class ChatCompletion < OpenAI::Internal::Type::BaseModel
4747
# - If set to 'default', then the request will be processed with the standard
4848
# pricing and performance for the selected model.
4949
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
50-
# 'priority', then the request will be processed with the corresponding service
51-
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
52-
# Priority processing.
50+
# '[priority](https://openai.com/api-priority-processing/)', then the request
51+
# will be processed with the corresponding service tier.
5352
# - When not set, the default behavior is 'auto'.
5453
#
5554
# When the `service_tier` parameter is set, the response body will include the
@@ -61,6 +60,8 @@ class ChatCompletion < OpenAI::Internal::Type::BaseModel
6160
optional :service_tier, enum: -> { OpenAI::Chat::ChatCompletion::ServiceTier }, nil?: true
6261

6362
# @!attribute system_fingerprint
63+
# @deprecated
64+
#
6465
# This fingerprint represents the backend configuration that the model runs with.
6566
#
6667
# Can be used in conjunction with the `seed` request parameter to understand when
@@ -196,9 +197,8 @@ class Logprobs < OpenAI::Internal::Type::BaseModel
196197
# - If set to 'default', then the request will be processed with the standard
197198
# pricing and performance for the selected model.
198199
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
199-
# 'priority', then the request will be processed with the corresponding service
200-
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
201-
# Priority processing.
200+
# '[priority](https://openai.com/api-priority-processing/)', then the request
201+
# will be processed with the corresponding service tier.
202202
# - When not set, the default behavior is 'auto'.
203203
#
204204
# When the `service_tier` parameter is set, the response body will include the

lib/openai/models/chat/chat_completion_chunk.rb

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -46,9 +46,8 @@ class ChatCompletionChunk < OpenAI::Internal::Type::BaseModel
4646
# - If set to 'default', then the request will be processed with the standard
4747
# pricing and performance for the selected model.
4848
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
49-
# 'priority', then the request will be processed with the corresponding service
50-
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
51-
# Priority processing.
49+
# '[priority](https://openai.com/api-priority-processing/)', then the request
50+
# will be processed with the corresponding service tier.
5251
# - When not set, the default behavior is 'auto'.
5352
#
5453
# When the `service_tier` parameter is set, the response body will include the
@@ -60,6 +59,8 @@ class ChatCompletionChunk < OpenAI::Internal::Type::BaseModel
6059
optional :service_tier, enum: -> { OpenAI::Chat::ChatCompletionChunk::ServiceTier }, nil?: true
6160

6261
# @!attribute system_fingerprint
62+
# @deprecated
63+
#
6364
# This fingerprint represents the backend configuration that the model runs with.
6465
# Can be used in conjunction with the `seed` request parameter to understand when
6566
# backend changes have been made that might impact determinism.
@@ -379,9 +380,8 @@ class Logprobs < OpenAI::Internal::Type::BaseModel
379380
# - If set to 'default', then the request will be processed with the standard
380381
# pricing and performance for the selected model.
381382
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
382-
# 'priority', then the request will be processed with the corresponding service
383-
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
384-
# Priority processing.
383+
# '[priority](https://openai.com/api-priority-processing/)', then the request
384+
# will be processed with the corresponding service tier.
385385
# - When not set, the default behavior is 'auto'.
386386
#
387387
# When the `service_tier` parameter is set, the response body will include the

lib/openai/models/chat/completion_create_params.rb

Lines changed: 46 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -226,6 +226,8 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel
226226
optional :safety_identifier, String
227227

228228
# @!attribute seed
229+
# @deprecated
230+
#
229231
# This feature is in Beta. If specified, our system will make a best effort to
230232
# sample deterministically, such that repeated requests with the same `seed` and
231233
# parameters should return the same result. Determinism is not guaranteed, and you
@@ -244,9 +246,8 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel
244246
# - If set to 'default', then the request will be processed with the standard
245247
# pricing and performance for the selected model.
246248
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
247-
# 'priority', then the request will be processed with the corresponding service
248-
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
249-
# Priority processing.
249+
# '[priority](https://openai.com/api-priority-processing/)', then the request
250+
# will be processed with the corresponding service tier.
250251
# - When not set, the default behavior is 'auto'.
251252
#
252253
# When the `service_tier` parameter is set, the response body will include the
@@ -291,6 +292,11 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel
291292
# @return [Float, nil]
292293
optional :temperature, Float, nil?: true
293294

295+
# @!attribute text
296+
#
297+
# @return [OpenAI::Models::Chat::CompletionCreateParams::Text, nil]
298+
optional :text, -> { OpenAI::Chat::CompletionCreateParams::Text }
299+
294300
# @!attribute tool_choice
295301
# Controls which (if any) tool is called by the model. `none` means the model will
296302
# not call any tool and instead generates a message. `auto` means the model can
@@ -364,7 +370,7 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel
364370
# @return [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions, nil]
365371
optional :web_search_options, -> { OpenAI::Chat::CompletionCreateParams::WebSearchOptions }
366372

367-
# @!method initialize(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {})
373+
# @!method initialize(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {})
368374
# Some parameter documentations has been truncated, see
369375
# {OpenAI::Models::Chat::CompletionCreateParams} for more details.
370376
#
@@ -420,6 +426,8 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel
420426
#
421427
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
422428
#
429+
# @param text [OpenAI::Models::Chat::CompletionCreateParams::Text]
430+
#
423431
# @param tool_choice [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionAllowedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoiceCustom] Controls which (if any) tool is called by the model.
424432
#
425433
# @param tools [Array<OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::Chat::ChatCompletionFunctionTool, OpenAI::Models::Chat::ChatCompletionCustomTool>] A list of tools the model may call. You can provide either
@@ -591,9 +599,8 @@ module ResponseFormat
591599
# - If set to 'default', then the request will be processed with the standard
592600
# pricing and performance for the selected model.
593601
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
594-
# 'priority', then the request will be processed with the corresponding service
595-
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
596-
# Priority processing.
602+
# '[priority](https://openai.com/api-priority-processing/)', then the request
603+
# will be processed with the corresponding service tier.
597604
# - When not set, the default behavior is 'auto'.
598605
#
599606
# When the `service_tier` parameter is set, the response body will include the
@@ -631,6 +638,38 @@ module Stop
631638
StringArray = OpenAI::Internal::Type::ArrayOf[String]
632639
end
633640

641+
class Text < OpenAI::Internal::Type::BaseModel
642+
# @!attribute verbosity
643+
# Constrains the verbosity of the model's response. Lower values will result in
644+
# more concise responses, while higher values will result in more verbose
645+
# responses. Currently supported values are `low`, `medium`, and `high`.
646+
#
647+
# @return [Symbol, OpenAI::Models::Chat::CompletionCreateParams::Text::Verbosity, nil]
648+
optional :verbosity, enum: -> { OpenAI::Chat::CompletionCreateParams::Text::Verbosity }, nil?: true
649+
650+
# @!method initialize(verbosity: nil)
651+
# Some parameter documentations has been truncated, see
652+
# {OpenAI::Models::Chat::CompletionCreateParams::Text} for more details.
653+
#
654+
# @param verbosity [Symbol, OpenAI::Models::Chat::CompletionCreateParams::Text::Verbosity, nil] Constrains the verbosity of the model's response. Lower values will result in
655+
656+
# Constrains the verbosity of the model's response. Lower values will result in
657+
# more concise responses, while higher values will result in more verbose
658+
# responses. Currently supported values are `low`, `medium`, and `high`.
659+
#
660+
# @see OpenAI::Models::Chat::CompletionCreateParams::Text#verbosity
661+
module Verbosity
662+
extend OpenAI::Internal::Type::Enum
663+
664+
LOW = :low
665+
MEDIUM = :medium
666+
HIGH = :high
667+
668+
# @!method self.values
669+
# @return [Array<Symbol>]
670+
end
671+
end
672+
634673
# Constrains the verbosity of the model's response. Lower values will result in
635674
# more concise responses, while higher values will result in more verbose
636675
# responses. Currently supported values are `low`, `medium`, and `high`.

0 commit comments

Comments
 (0)