Skip to content

Commit 6ee3392

Browse files
feat(api): Add tools and structured outputs to evals
1 parent ac8bf11 commit 6ee3392

19 files changed

+1759
-8
lines changed

.stats.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 109
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-0205acb1015d29b2312a48526734c0399f93026d4fe2dff5c7768f566e333fd2.yml
3-
openapi_spec_hash: 1772cc9056c2f6dfb2a4e9cb77ee6343
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-4865dda2b62927bd141cbc85f81be3d88602f103e2c581e15eb1caded3e3aaa2.yml
3+
openapi_spec_hash: 7d14a9b23ef4ac93ea46d629601b6f6b
44
config_hash: ed1e6b3c5f93d12b80d31167f55c557c

lib/openai/models/evals/create_eval_completions_run_data_source.rb

Lines changed: 67 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -432,6 +432,24 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel
432432
# @return [Integer, nil]
433433
optional :max_completion_tokens, Integer
434434

435+
# @!attribute response_format
436+
# An object specifying the format that the model must output.
437+
#
438+
# Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
439+
# Outputs which ensures the model will match your supplied JSON schema. Learn more
440+
# in the
441+
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
442+
#
443+
# Setting to `{ "type": "json_object" }` enables the older JSON mode, which
444+
# ensures the message the model generates is valid JSON. Using `json_schema` is
445+
# preferred for models that support it.
446+
#
447+
# @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject, nil]
448+
optional :response_format,
449+
union: -> {
450+
OpenAI::Evals::CreateEvalCompletionsRunDataSource::SamplingParams::ResponseFormat
451+
}
452+
435453
# @!attribute seed
436454
# A seed value to initialize the randomness, during sampling.
437455
#
@@ -444,20 +462,68 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel
444462
# @return [Float, nil]
445463
optional :temperature, Float
446464

465+
# @!attribute tools
466+
# A list of tools the model may call. Currently, only functions are supported as a
467+
# tool. Use this to provide a list of functions the model may generate JSON inputs
468+
# for. A max of 128 functions are supported.
469+
#
470+
# @return [Array<OpenAI::Models::Chat::ChatCompletionTool>, nil]
471+
optional :tools, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionTool] }
472+
447473
# @!attribute top_p
448474
# An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
449475
#
450476
# @return [Float, nil]
451477
optional :top_p, Float
452478

453-
# @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, top_p: nil)
479+
# @!method initialize(max_completion_tokens: nil, response_format: nil, seed: nil, temperature: nil, tools: nil, top_p: nil)
480+
# Some parameter documentations has been truncated, see
481+
# {OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams} for
482+
# more details.
483+
#
454484
# @param max_completion_tokens [Integer] The maximum number of tokens in the generated output.
455485
#
486+
# @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
487+
#
456488
# @param seed [Integer] A seed value to initialize the randomness, during sampling.
457489
#
458490
# @param temperature [Float] A higher temperature increases randomness in the outputs.
459491
#
492+
# @param tools [Array<OpenAI::Models::Chat::ChatCompletionTool>] A list of tools the model may call. Currently, only functions are supported as a
493+
#
460494
# @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
495+
496+
# An object specifying the format that the model must output.
497+
#
498+
# Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
499+
# Outputs which ensures the model will match your supplied JSON schema. Learn more
500+
# in the
501+
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
502+
#
503+
# Setting to `{ "type": "json_object" }` enables the older JSON mode, which
504+
# ensures the message the model generates is valid JSON. Using `json_schema` is
505+
# preferred for models that support it.
506+
#
507+
# @see OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams#response_format
508+
module ResponseFormat
509+
extend OpenAI::Internal::Type::Union
510+
511+
# Default response format. Used to generate text responses.
512+
variant -> { OpenAI::ResponseFormatText }
513+
514+
# JSON Schema response format. Used to generate structured JSON responses.
515+
# Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs).
516+
variant -> { OpenAI::ResponseFormatJSONSchema }
517+
518+
# JSON object response format. An older method of generating JSON responses.
519+
# Using `json_schema` is recommended for models that support it. Note that the
520+
# model will not generate JSON without a system or user message instructing it
521+
# to do so.
522+
variant -> { OpenAI::ResponseFormatJSONObject }
523+
524+
# @!method self.variants
525+
# @return [Array(OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject)]
526+
end
461527
end
462528
end
463529
end

lib/openai/models/evals/run_cancel_response.rb

Lines changed: 77 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -616,20 +616,96 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel
616616
# @return [Float, nil]
617617
optional :temperature, Float
618618

619+
# @!attribute text
620+
# Configuration options for a text response from the model. Can be plain text or
621+
# structured JSON data. Learn more:
622+
#
623+
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
624+
# - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
625+
#
626+
# @return [OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text, nil]
627+
optional :text,
628+
-> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text }
629+
630+
# @!attribute tools
631+
# An array of tools the model may call while generating a response. You can
632+
# specify which tool to use by setting the `tool_choice` parameter.
633+
#
634+
# The two categories of tools you can provide the model are:
635+
#
636+
# - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
637+
# capabilities, like
638+
# [web search](https://platform.openai.com/docs/guides/tools-web-search) or
639+
# [file search](https://platform.openai.com/docs/guides/tools-file-search).
640+
# Learn more about
641+
# [built-in tools](https://platform.openai.com/docs/guides/tools).
642+
# - **Function calls (custom tools)**: Functions that are defined by you, enabling
643+
# the model to call your own code. Learn more about
644+
# [function calling](https://platform.openai.com/docs/guides/function-calling).
645+
#
646+
# @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>, nil]
647+
optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
648+
619649
# @!attribute top_p
620650
# An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
621651
#
622652
# @return [Float, nil]
623653
optional :top_p, Float
624654

625-
# @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, top_p: nil)
655+
# @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil)
656+
# Some parameter documentations has been truncated, see
657+
# {OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams}
658+
# for more details.
659+
#
626660
# @param max_completion_tokens [Integer] The maximum number of tokens in the generated output.
627661
#
628662
# @param seed [Integer] A seed value to initialize the randomness, during sampling.
629663
#
630664
# @param temperature [Float] A higher temperature increases randomness in the outputs.
631665
#
666+
# @param text [OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text] Configuration options for a text response from the model. Can be plain
667+
#
668+
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
669+
#
632670
# @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
671+
672+
# @see OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams#text
673+
class Text < OpenAI::Internal::Type::BaseModel
674+
# @!attribute format_
675+
# An object specifying the format that the model must output.
676+
#
677+
# Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
678+
# ensures the model will match your supplied JSON schema. Learn more in the
679+
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
680+
#
681+
# The default format is `{ "type": "text" }` with no additional options.
682+
#
683+
# **Not recommended for gpt-4o and newer models:**
684+
#
685+
# Setting to `{ "type": "json_object" }` enables the older JSON mode, which
686+
# ensures the message the model generates is valid JSON. Using `json_schema` is
687+
# preferred for models that support it.
688+
#
689+
# @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject, nil]
690+
optional :format_,
691+
union: -> {
692+
OpenAI::Responses::ResponseFormatTextConfig
693+
},
694+
api_name: :format
695+
696+
# @!method initialize(format_: nil)
697+
# Some parameter documentations has been truncated, see
698+
# {OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text}
699+
# for more details.
700+
#
701+
# Configuration options for a text response from the model. Can be plain text or
702+
# structured JSON data. Learn more:
703+
#
704+
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
705+
# - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
706+
#
707+
# @param format_ [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
708+
end
633709
end
634710
end
635711

lib/openai/models/evals/run_create_params.rb

Lines changed: 79 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -576,20 +576,98 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel
576576
# @return [Float, nil]
577577
optional :temperature, Float
578578

579+
# @!attribute text
580+
# Configuration options for a text response from the model. Can be plain text or
581+
# structured JSON data. Learn more:
582+
#
583+
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
584+
# - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
585+
#
586+
# @return [OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text, nil]
587+
optional :text,
588+
-> {
589+
OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text
590+
}
591+
592+
# @!attribute tools
593+
# An array of tools the model may call while generating a response. You can
594+
# specify which tool to use by setting the `tool_choice` parameter.
595+
#
596+
# The two categories of tools you can provide the model are:
597+
#
598+
# - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
599+
# capabilities, like
600+
# [web search](https://platform.openai.com/docs/guides/tools-web-search) or
601+
# [file search](https://platform.openai.com/docs/guides/tools-file-search).
602+
# Learn more about
603+
# [built-in tools](https://platform.openai.com/docs/guides/tools).
604+
# - **Function calls (custom tools)**: Functions that are defined by you, enabling
605+
# the model to call your own code. Learn more about
606+
# [function calling](https://platform.openai.com/docs/guides/function-calling).
607+
#
608+
# @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>, nil]
609+
optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
610+
579611
# @!attribute top_p
580612
# An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
581613
#
582614
# @return [Float, nil]
583615
optional :top_p, Float
584616

585-
# @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, top_p: nil)
617+
# @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil)
618+
# Some parameter documentations has been truncated, see
619+
# {OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams}
620+
# for more details.
621+
#
586622
# @param max_completion_tokens [Integer] The maximum number of tokens in the generated output.
587623
#
588624
# @param seed [Integer] A seed value to initialize the randomness, during sampling.
589625
#
590626
# @param temperature [Float] A higher temperature increases randomness in the outputs.
591627
#
628+
# @param text [OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text] Configuration options for a text response from the model. Can be plain
629+
#
630+
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
631+
#
592632
# @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
633+
634+
# @see OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams#text
635+
class Text < OpenAI::Internal::Type::BaseModel
636+
# @!attribute format_
637+
# An object specifying the format that the model must output.
638+
#
639+
# Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
640+
# ensures the model will match your supplied JSON schema. Learn more in the
641+
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
642+
#
643+
# The default format is `{ "type": "text" }` with no additional options.
644+
#
645+
# **Not recommended for gpt-4o and newer models:**
646+
#
647+
# Setting to `{ "type": "json_object" }` enables the older JSON mode, which
648+
# ensures the message the model generates is valid JSON. Using `json_schema` is
649+
# preferred for models that support it.
650+
#
651+
# @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject, nil]
652+
optional :format_,
653+
union: -> {
654+
OpenAI::Responses::ResponseFormatTextConfig
655+
},
656+
api_name: :format
657+
658+
# @!method initialize(format_: nil)
659+
# Some parameter documentations has been truncated, see
660+
# {OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text}
661+
# for more details.
662+
#
663+
# Configuration options for a text response from the model. Can be plain text or
664+
# structured JSON data. Learn more:
665+
#
666+
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
667+
# - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
668+
#
669+
# @param format_ [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
670+
end
593671
end
594672
end
595673

0 commit comments

Comments
 (0)