You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
# Some parameter documentations has been truncated, see
53
60
# {OpenAI::Models::BatchCreateParams} for more details.
54
61
#
@@ -60,6 +67,8 @@ class BatchCreateParams < OpenAI::Internal::Type::BaseModel
60
67
#
61
68
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
62
69
#
70
+
# @param output_expires_after [OpenAI::Models::BatchCreateParams::OutputExpiresAfter] The expiration policy for the output and/or error file that are generated for a
@@ -694,7 +694,7 @@ class TruncationStrategy < OpenAI::Internal::Type::BaseModel
694
694
# details.
695
695
#
696
696
# Controls for how a thread will be truncated prior to the run. Use this to
697
-
# control the intial context window of the run.
697
+
# control the initial context window of the run.
698
698
#
699
699
# @param type [Symbol, OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type] The truncation strategy to use for the thread. The default is `auto`. If set to
@@ -415,7 +415,7 @@ class TruncationStrategy < OpenAI::Internal::Type::BaseModel
415
415
# {OpenAI::Models::Beta::Threads::Run::TruncationStrategy} for more details.
416
416
#
417
417
# Controls for how a thread will be truncated prior to the run. Use this to
418
-
# control the intial context window of the run.
418
+
# control the initial context window of the run.
419
419
#
420
420
# @param type [Symbol, OpenAI::Models::Beta::Threads::Run::TruncationStrategy::Type] The truncation strategy to use for the thread. The default is `auto`. If set to
@@ -413,7 +413,7 @@ class TruncationStrategy < OpenAI::Internal::Type::BaseModel
413
413
# details.
414
414
#
415
415
# Controls for how a thread will be truncated prior to the run. Use this to
416
-
# control the intial context window of the run.
416
+
# control the initial context window of the run.
417
417
#
418
418
# @param type [Symbol, OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy::Type] The truncation strategy to use for the thread. The default is `auto`. If set to
# Some parameter documentations has been truncated, see
369
375
# {OpenAI::Models::Chat::CompletionCreateParams} for more details.
370
376
#
@@ -420,6 +426,8 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel
420
426
#
421
427
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
422
428
#
429
+
# @param text [OpenAI::Models::Chat::CompletionCreateParams::Text]
430
+
#
423
431
# @param tool_choice [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionAllowedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoiceCustom] Controls which (if any) tool is called by the model.
424
432
#
425
433
# @param tools [Array<OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::Chat::ChatCompletionFunctionTool, OpenAI::Models::Chat::ChatCompletionCustomTool>] A list of tools the model may call. You can provide either
@@ -591,9 +599,8 @@ module ResponseFormat
591
599
# - If set to 'default', then the request will be processed with the standard
592
600
# pricing and performance for the selected model.
593
601
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
594
-
# 'priority', then the request will be processed with the corresponding service
595
-
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
596
-
# Priority processing.
602
+
# '[priority](https://openai.com/api-priority-processing/)', then the request
603
+
# will be processed with the corresponding service tier.
597
604
# - When not set, the default behavior is 'auto'.
598
605
#
599
606
# When the `service_tier` parameter is set, the response body will include the
# Some parameter documentations has been truncated, see
652
+
# {OpenAI::Models::Chat::CompletionCreateParams::Text} for more details.
653
+
#
654
+
# @param verbosity [Symbol, OpenAI::Models::Chat::CompletionCreateParams::Text::Verbosity, nil] Constrains the verbosity of the model's response. Lower values will result in
655
+
656
+
# Constrains the verbosity of the model's response. Lower values will result in
657
+
# more concise responses, while higher values will result in more verbose
658
+
# responses. Currently supported values are `low`, `medium`, and `high`.
0 commit comments