From f7f4b05882e8cb253896c7e2f2635074784b409e Mon Sep 17 00:00:00 2001 From: karpetrosyan Date: Fri, 4 Jul 2025 16:15:28 +0400 Subject: [PATCH] Sync openapi --- openapi.yaml | 12438 ++++++++++++++++++++++++++++++++++++------------- 1 file changed, 9218 insertions(+), 3220 deletions(-) diff --git a/openapi.yaml b/openapi.yaml index 2e3e33fa..6fba2be4 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -1,4 +1,4 @@ -openapi: 3.0.0 +openapi: 3.1.0 info: title: OpenAI API description: The OpenAI REST API. Please see @@ -34,6 +34,8 @@ tags: description: Manage and run evals in the OpenAI platform. - name: Fine-tuning description: Manage fine-tuning jobs to tailor a model to your specific training data. + - name: Graders + description: Manage and run graders in the OpenAI platform. - name: Batch description: Create large batches of API requests to run asynchronously. - name: Files @@ -587,7 +589,7 @@ paths: async function main() { - const response = await openai.beta.assistants.del("asst_abc123"); + const response = await openai.beta.assistants.delete("asst_abc123"); console.log(response); } @@ -624,77 +626,94 @@ paths: schema: type: string format: binary + text/event-stream: + schema: + $ref: "#/components/schemas/CreateSpeechResponseStreamEvent" x-oaiMeta: name: Create speech group: audio - returns: The audio file content. + returns: The audio file content or a [stream of audio + events](/docs/api-reference/audio/speech-audio-delta-event). examples: - request: - curl: | - curl https://api.openai.com/v1/audio/speech \ - -H "Authorization: Bearer $OPENAI_API_KEY" \ - -H "Content-Type: application/json" \ - -d '{ - "model": "gpt-4o-mini-tts", - "input": "The quick brown fox jumped over the lazy dog.", - "voice": "alloy" - }' \ - --output speech.mp3 - python: | - from pathlib import Path - import openai - - speech_file_path = Path(__file__).parent / "speech.mp3" - with openai.audio.speech.with_streaming_response.create( - model="gpt-4o-mini-tts", - voice="alloy", - input="The quick brown fox jumped over the lazy dog." - ) as response: - response.stream_to_file(speech_file_path) - javascript: > - import fs from "fs"; + - title: Default + request: + curl: | + curl https://api.openai.com/v1/audio/speech \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "model": "gpt-4o-mini-tts", + "input": "The quick brown fox jumped over the lazy dog.", + "voice": "alloy" + }' \ + --output speech.mp3 + python: | + from pathlib import Path + import openai + + speech_file_path = Path(__file__).parent / "speech.mp3" + with openai.audio.speech.with_streaming_response.create( + model="gpt-4o-mini-tts", + voice="alloy", + input="The quick brown fox jumped over the lazy dog." + ) as response: + response.stream_to_file(speech_file_path) + javascript: > + import fs from "fs"; - import path from "path"; + import path from "path"; - import OpenAI from "openai"; + import OpenAI from "openai"; - const openai = new OpenAI(); + const openai = new OpenAI(); - const speechFile = path.resolve("./speech.mp3"); + const speechFile = path.resolve("./speech.mp3"); - async function main() { - const mp3 = await openai.audio.speech.create({ - model: "gpt-4o-mini-tts", - voice: "alloy", - input: "Today is a wonderful day to build something people love!", - }); - console.log(speechFile); - const buffer = Buffer.from(await mp3.arrayBuffer()); - await fs.promises.writeFile(speechFile, buffer); - } + async function main() { + const mp3 = await openai.audio.speech.create({ + model: "gpt-4o-mini-tts", + voice: "alloy", + input: "Today is a wonderful day to build something people love!", + }); + console.log(speechFile); + const buffer = Buffer.from(await mp3.arrayBuffer()); + await fs.promises.writeFile(speechFile, buffer); + } - main(); - csharp: | - using System; - using System.IO; + main(); + csharp: | + using System; + using System.IO; - using OpenAI.Audio; + using OpenAI.Audio; - AudioClient client = new( - model: "gpt-4o-mini-tts", - apiKey: Environment.GetEnvironmentVariable("OPENAI_API_KEY") - ); + AudioClient client = new( + model: "gpt-4o-mini-tts", + apiKey: Environment.GetEnvironmentVariable("OPENAI_API_KEY") + ); - BinaryData speech = client.GenerateSpeech( - text: "The quick brown fox jumped over the lazy dog.", - voice: GeneratedSpeechVoice.Alloy - ); + BinaryData speech = client.GenerateSpeech( + text: "The quick brown fox jumped over the lazy dog.", + voice: GeneratedSpeechVoice.Alloy + ); - using FileStream stream = File.OpenWrite("speech.mp3"); - speech.ToStream().CopyTo(stream); + using FileStream stream = File.OpenWrite("speech.mp3"); + speech.ToStream().CopyTo(stream); + - title: SSE Stream Format + request: + curl: | + curl https://api.openai.com/v1/audio/speech \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "model": "gpt-4o-mini-tts", + "input": "The quick brown fox jumped over the lazy dog.", + "voice": "alloy", + "stream_format": "sse" + }' /audio/transcriptions: post: operationId: createTranscription @@ -786,7 +805,17 @@ paths: Console.WriteLine($"{transcription.Text}"); response: > { - "text": "Imagine the wildest idea that you've ever had, and you're curious about how it might scale to something that's a 100, a 1,000 times bigger. This is a place where you can get to do that." + "text": "Imagine the wildest idea that you've ever had, and you're curious about how it might scale to something that's a 100, a 1,000 times bigger. This is a place where you can get to do that.", + "usage": { + "type": "tokens", + "input_tokens": 14, + "input_token_details": { + "text_tokens": 0, + "audio_tokens": 14 + }, + "output_tokens": 45, + "total_tokens": 59 + } } - title: Streaming request: @@ -888,7 +917,7 @@ paths: data: {"type":"transcript.text.delta","delta":".","logprobs":[{"token":".","logprob":-0.014231676,"bytes":[46]}]} - data: {"type":"transcript.text.done","text":"I see skies of blue and clouds of white, the bright blessed days, the dark sacred nights, and I think to myself, what a wonderful world.","logprobs":[{"token":"I","logprob":-0.00007588794,"bytes":[73]},{"token":" see","logprob":-3.1281633e-7,"bytes":[32,115,101,101]},{"token":" skies","logprob":-2.3392786e-6,"bytes":[32,115,107,105,101,115]},{"token":" of","logprob":-3.1281633e-7,"bytes":[32,111,102]},{"token":" blue","logprob":-1.0280384e-6,"bytes":[32,98,108,117,101]},{"token":" and","logprob":-0.0005108566,"bytes":[32,97,110,100]},{"token":" clouds","logprob":-1.9361265e-7,"bytes":[32,99,108,111,117,100,115]},{"token":" of","logprob":-1.9361265e-7,"bytes":[32,111,102]},{"token":" white","logprob":-7.89631e-7,"bytes":[32,119,104,105,116,101]},{"token":",","logprob":-0.0014890312,"bytes":[44]},{"token":" the","logprob":-0.0110956915,"bytes":[32,116,104,101]},{"token":" bright","logprob":0.0,"bytes":[32,98,114,105,103,104,116]},{"token":" blessed","logprob":-0.000045848617,"bytes":[32,98,108,101,115,115,101,100]},{"token":" days","logprob":-0.000010802739,"bytes":[32,100,97,121,115]},{"token":",","logprob":-0.00001700133,"bytes":[44]},{"token":" the","logprob":-0.0000118755715,"bytes":[32,116,104,101]},{"token":" dark","logprob":-5.5122365e-7,"bytes":[32,100,97,114,107]},{"token":" sacred","logprob":-5.4385737e-6,"bytes":[32,115,97,99,114,101,100]},{"token":" nights","logprob":-4.00813e-6,"bytes":[32,110,105,103,104,116,115]},{"token":",","logprob":-0.0036910512,"bytes":[44]},{"token":" and","logprob":-0.0031903093,"bytes":[32,97,110,100]},{"token":" I","logprob":-1.504853e-6,"bytes":[32,73]},{"token":" think","logprob":-4.3202e-7,"bytes":[32,116,104,105,110,107]},{"token":" to","logprob":-1.9361265e-7,"bytes":[32,116,111]},{"token":" myself","logprob":-1.7432603e-6,"bytes":[32,109,121,115,101,108,102]},{"token":",","logprob":-0.29254505,"bytes":[44]},{"token":" what","logprob":-0.016815351,"bytes":[32,119,104,97,116]},{"token":" a","logprob":-3.1281633e-7,"bytes":[32,97]},{"token":" wonderful","logprob":-2.1008714e-6,"bytes":[32,119,111,110,100,101,114,102,117,108]},{"token":" world","logprob":-8.180258e-6,"bytes":[32,119,111,114,108,100]},{"token":".","logprob":-0.014231676,"bytes":[46]}]} + data: {"type":"transcript.text.done","text":"I see skies of blue and clouds of white, the bright blessed days, the dark sacred nights, and I think to myself, what a wonderful world.","logprobs":[{"token":"I","logprob":-0.00007588794,"bytes":[73]},{"token":" see","logprob":-3.1281633e-7,"bytes":[32,115,101,101]},{"token":" skies","logprob":-2.3392786e-6,"bytes":[32,115,107,105,101,115]},{"token":" of","logprob":-3.1281633e-7,"bytes":[32,111,102]},{"token":" blue","logprob":-1.0280384e-6,"bytes":[32,98,108,117,101]},{"token":" and","logprob":-0.0005108566,"bytes":[32,97,110,100]},{"token":" clouds","logprob":-1.9361265e-7,"bytes":[32,99,108,111,117,100,115]},{"token":" of","logprob":-1.9361265e-7,"bytes":[32,111,102]},{"token":" white","logprob":-7.89631e-7,"bytes":[32,119,104,105,116,101]},{"token":",","logprob":-0.0014890312,"bytes":[44]},{"token":" the","logprob":-0.0110956915,"bytes":[32,116,104,101]},{"token":" bright","logprob":0.0,"bytes":[32,98,114,105,103,104,116]},{"token":" blessed","logprob":-0.000045848617,"bytes":[32,98,108,101,115,115,101,100]},{"token":" days","logprob":-0.000010802739,"bytes":[32,100,97,121,115]},{"token":",","logprob":-0.00001700133,"bytes":[44]},{"token":" the","logprob":-0.0000118755715,"bytes":[32,116,104,101]},{"token":" dark","logprob":-5.5122365e-7,"bytes":[32,100,97,114,107]},{"token":" sacred","logprob":-5.4385737e-6,"bytes":[32,115,97,99,114,101,100]},{"token":" nights","logprob":-4.00813e-6,"bytes":[32,110,105,103,104,116,115]},{"token":",","logprob":-0.0036910512,"bytes":[44]},{"token":" and","logprob":-0.0031903093,"bytes":[32,97,110,100]},{"token":" I","logprob":-1.504853e-6,"bytes":[32,73]},{"token":" think","logprob":-4.3202e-7,"bytes":[32,116,104,105,110,107]},{"token":" to","logprob":-1.9361265e-7,"bytes":[32,116,111]},{"token":" myself","logprob":-1.7432603e-6,"bytes":[32,109,121,115,101,108,102]},{"token":",","logprob":-0.29254505,"bytes":[44]},{"token":" what","logprob":-0.016815351,"bytes":[32,119,104,97,116]},{"token":" a","logprob":-3.1281633e-7,"bytes":[32,97]},{"token":" wonderful","logprob":-2.1008714e-6,"bytes":[32,119,111,110,100,101,114,102,117,108]},{"token":" world","logprob":-8.180258e-6,"bytes":[32,119,111,114,108,100]},{"token":".","logprob":-0.014231676,"bytes":[46]}],"usage":{"input_tokens":14,"input_token_details":{"text_tokens":0,"audio_tokens":14},"output_tokens":45,"total_tokens":59}} - title: Logprobs request: curl: | @@ -977,7 +1006,17 @@ paths: "bytes": [32, 105, 100, 101, 97, 108, 108, 121] }, { "token": ".", "logprob": -0.00011093382, "bytes": [46] } - ] + ], + "usage": { + "type": "tokens", + "input_tokens": 14, + "input_token_details": { + "text_tokens": 0, + "audio_tokens": 14 + }, + "output_tokens": 45, + "total_tokens": 59 + } } - title: Word timestamps request: @@ -1070,7 +1109,11 @@ paths: "start": 7.400000095367432, "end": 7.900000095367432 } - ] + ], + "usage": { + "type": "duration", + "seconds": 9 + } } - title: Segment timestamps request: @@ -1167,7 +1210,11 @@ paths: "no_speech_prob": 0.00985979475080967 }, ... - ] + ], + "usage": { + "type": "duration", + "seconds": 9 + } } /audio/translations: post: @@ -3148,6 +3195,432 @@ paths: "model": "gpt-3.5-turbo-instruct" "system_fingerprint": "fp_44709d6fcb", } + /containers: + get: + summary: List Containers + description: Lists containers. + operationId: ListContainers + parameters: + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: > + Sort order by the `created_at` timestamp of the objects. `asc` for + ascending order and `desc` for descending order. + schema: + type: string + default: desc + enum: + - asc + - desc + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + responses: + "200": + description: Success + content: + application/json: + schema: + $ref: "#/components/schemas/ContainerListResource" + x-oaiMeta: + name: List containers + group: containers + returns: a list of [container](/docs/api-reference/containers/object) objects. + path: get + examples: + request: + curl: | + curl https://api.openai.com/v1/containers \ + -H "Authorization: Bearer $OPENAI_API_KEY" + response: > + { + "object": "list", + "data": [ + { + "id": "cntr_682dfebaacac8198bbfe9c2474fb6f4a085685cbe3cb5863", + "object": "container", + "created_at": 1747844794, + "status": "running", + "expires_after": { + "anchor": "last_active_at", + "minutes": 20 + }, + "last_active_at": 1747844794, + "name": "My Container" + } + ], + "first_id": "container_123", + "last_id": "container_123", + "has_more": false + } + post: + summary: Create Container + description: Creates a container. + operationId: CreateContainer + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/CreateContainerBody" + responses: + "200": + description: Success + content: + application/json: + schema: + $ref: "#/components/schemas/ContainerResource" + x-oaiMeta: + name: Create container + group: containers + returns: The created [container](/docs/api-reference/containers/object) object. + path: post + examples: + request: + curl: | + curl https://api.openai.com/v1/containers \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "My Container" + }' + response: | + { + "id": "cntr_682e30645a488191b6363a0cbefc0f0a025ec61b66250591", + "object": "container", + "created_at": 1747857508, + "status": "running", + "expires_after": { + "anchor": "last_active_at", + "minutes": 20 + }, + "last_active_at": 1747857508, + "name": "My Container" + } + /containers/{container_id}: + get: + summary: Retrieve Container + description: Retrieves a container. + operationId: RetrieveContainer + parameters: + - name: container_id + in: path + required: true + schema: + type: string + responses: + "200": + description: Success + content: + application/json: + schema: + $ref: "#/components/schemas/ContainerResource" + x-oaiMeta: + name: Retrieve container + group: containers + returns: The [container](/docs/api-reference/containers/object) object. + path: get + examples: + request: + curl: | + curl https://api.openai.com/v1/containers/cntr_682dfebaacac8198bbfe9c2474fb6f4a085685cbe3cb5863 \ + -H "Authorization: Bearer $OPENAI_API_KEY" + response: | + { + "id": "cntr_682dfebaacac8198bbfe9c2474fb6f4a085685cbe3cb5863", + "object": "container", + "created_at": 1747844794, + "status": "running", + "expires_after": { + "anchor": "last_active_at", + "minutes": 20 + }, + "last_active_at": 1747844794, + "name": "My Container" + } + delete: + operationId: DeleteContainer + summary: Delete Container + description: Delete a container. + parameters: + - name: container_id + in: path + description: The ID of the container to delete. + required: true + schema: + type: string + responses: + "200": + description: OK + x-oaiMeta: + name: Delete a container + group: containers + returns: Deletion Status + path: delete + examples: + request: + curl: | + curl -X DELETE https://api.openai.com/v1/containers/cntr_682dfebaacac8198bbfe9c2474fb6f4a085685cbe3cb5863 \ + -H "Authorization: Bearer $OPENAI_API_KEY" + response: | + { + "id": "cntr_682dfebaacac8198bbfe9c2474fb6f4a085685cbe3cb5863", + "object": "container.deleted", + "deleted": true + } + /containers/{container_id}/files: + post: + summary: > + Create a Container File + + + You can send either a multipart/form-data request with the raw file + content, or a JSON request with a file ID. + description: | + Creates a container file. + operationId: CreateContainerFile + parameters: + - name: container_id + in: path + required: true + schema: + type: string + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/CreateContainerFileBody" + responses: + "200": + description: Success + content: + application/json: + schema: + $ref: "#/components/schemas/ContainerFileResource" + x-oaiMeta: + name: Create container file + group: containers + returns: The created [container + file](/docs/api-reference/container-files/object) object. + path: post + examples: + request: + curl: | + curl https://api.openai.com/v1/containers/cntr_682e0e7318108198aa783fd921ff305e08e78805b9fdbb04/files \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -F file="@example.txt" + response: > + { + "id": "cfile_682e0e8a43c88191a7978f477a09bdf5", + "object": "container.file", + "created_at": 1747848842, + "bytes": 880, + "container_id": "cntr_682e0e7318108198aa783fd921ff305e08e78805b9fdbb04", + "path": "/mnt/data/88e12fa445d32636f190a0b33daed6cb-tsconfig.json", + "source": "user" + } + get: + summary: List Container files + description: Lists container files. + operationId: ListContainerFiles + parameters: + - name: container_id + in: path + required: true + schema: + type: string + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: > + Sort order by the `created_at` timestamp of the objects. `asc` for + ascending order and `desc` for descending order. + schema: + type: string + default: desc + enum: + - asc + - desc + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + responses: + "200": + description: Success + content: + application/json: + schema: + $ref: "#/components/schemas/ContainerFileListResource" + x-oaiMeta: + name: List container files + group: containers + returns: a list of [container file](/docs/api-reference/container-files/object) + objects. + path: get + examples: + request: + curl: | + curl https://api.openai.com/v1/containers/cntr_682e0e7318108198aa783fd921ff305e08e78805b9fdbb04/files \ + -H "Authorization: Bearer $OPENAI_API_KEY" + response: > + { + "object": "list", + "data": [ + { + "id": "cfile_682e0e8a43c88191a7978f477a09bdf5", + "object": "container.file", + "created_at": 1747848842, + "bytes": 880, + "container_id": "cntr_682e0e7318108198aa783fd921ff305e08e78805b9fdbb04", + "path": "/mnt/data/88e12fa445d32636f190a0b33daed6cb-tsconfig.json", + "source": "user" + } + ], + "first_id": "cfile_682e0e8a43c88191a7978f477a09bdf5", + "has_more": false, + "last_id": "cfile_682e0e8a43c88191a7978f477a09bdf5" + } + /containers/{container_id}/files/{file_id}: + get: + summary: Retrieve Container File + description: Retrieves a container file. + operationId: RetrieveContainerFile + parameters: + - name: container_id + in: path + required: true + schema: + type: string + - name: file_id + in: path + required: true + schema: + type: string + responses: + "200": + description: Success + content: + application/json: + schema: + $ref: "#/components/schemas/ContainerFileResource" + x-oaiMeta: + name: Retrieve container file + group: containers + returns: The [container file](/docs/api-reference/container-files/object) + object. + path: get + examples: + request: + curl: > + curl + https://api.openai.com/v1/containers/container_123/files/file_456 + \ + -H "Authorization: Bearer $OPENAI_API_KEY" + response: > + { + "id": "cfile_682e0e8a43c88191a7978f477a09bdf5", + "object": "container.file", + "created_at": 1747848842, + "bytes": 880, + "container_id": "cntr_682e0e7318108198aa783fd921ff305e08e78805b9fdbb04", + "path": "/mnt/data/88e12fa445d32636f190a0b33daed6cb-tsconfig.json", + "source": "user" + } + delete: + operationId: DeleteContainerFile + summary: Delete Container File + description: Delete a container file. + parameters: + - name: container_id + in: path + required: true + schema: + type: string + - name: file_id + in: path + required: true + schema: + type: string + responses: + "200": + description: OK + x-oaiMeta: + name: Delete a container file + group: containers + returns: Deletion Status + path: delete + examples: + request: + curl: | + curl -X DELETE https://api.openai.com/v1/containers/cntr_682dfebaacac8198bbfe9c2474fb6f4a085685cbe3cb5863/files/cfile_682e0e8a43c88191a7978f477a09bdf5 \ + -H "Authorization: Bearer $OPENAI_API_KEY" + response: | + { + "id": "cfile_682e0e8a43c88191a7978f477a09bdf5", + "object": "container.file.deleted", + "deleted": true + } + /containers/{container_id}/files/{file_id}/content: + get: + summary: Retrieve Container File Content + description: Retrieves a container file content. + operationId: RetrieveContainerFileContent + parameters: + - name: container_id + in: path + required: true + schema: + type: string + - name: file_id + in: path + required: true + schema: + type: string + responses: + "200": + description: Success + x-oaiMeta: + name: Retrieve container file content + group: containers + returns: The contents of the container file. + path: get + examples: + request: + curl: | + curl https://api.openai.com/v1/containers/container_123/files/cfile_456/content \ + -H "Authorization: Bearer $OPENAI_API_KEY" + response: | + /embeddings: post: operationId: createEmbedding @@ -3317,6 +3790,19 @@ paths: curl https://api.openai.com/v1/evals?limit=1 \ -H "Authorization: Bearer $OPENAI_API_KEY" \ -H "Content-Type: application/json" + python: | + from openai import OpenAI + client = OpenAI() + + evals = client.evals.list(limit=1) + print(evals) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + const evals = await openai.evals.list({ limit: 1 }); + console.log(evals); response: > { "object": "list", @@ -3398,9 +3884,10 @@ paths: Create the structure of an evaluation that can be used to test a model's performance. - An evaluation is a set of testing criteria and a datasource. After - creating an evaluation, you can run it on different models and model - parameters. We support several types of graders and datasources. + An evaluation is a set of testing criteria and the config for a data + source, which dictates the schema of the data used in the evaluation. + After creating an evaluation, you can run it on different models and + model parameters. We support several types of graders and datasources. For more information, see the [Evals guide](/docs/guides/evals). requestBody: @@ -3461,6 +3948,63 @@ paths: } ] }' + python: > + from openai import OpenAI + + client = OpenAI() + + + eval_obj = client.evals.create( + name="Sentiment", + data_source_config={ + "type": "stored_completions", + "metadata": {"usecase": "chatbot"} + }, + testing_criteria=[ + { + "type": "label_model", + "model": "o3-mini", + "input": [ + {"role": "developer", "content": "Classify the sentiment of the following statement as one of 'positive', 'neutral', or 'negative'"}, + {"role": "user", "content": "Statement: {{item.input}}"} + ], + "passing_labels": ["positive"], + "labels": ["positive", "neutral", "negative"], + "name": "Example label grader" + } + ] + ) + + print(eval_obj) + node.js: > + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + const evalObj = await openai.evals.create({ + name: "Sentiment", + data_source_config: { + type: "stored_completions", + metadata: { usecase: "chatbot" } + }, + testing_criteria: [ + { + type: "label_model", + model: "o3-mini", + input: [ + { role: "developer", content: "Classify the sentiment of the following statement as one of 'positive', 'neutral', or 'negative'" }, + { role: "user", content: "Statement: {{item.input}}" } + ], + passing_labels: ["positive"], + labels: ["positive", "neutral", "negative"], + name: "Example label grader" + } + ] + }); + + console.log(evalObj); response: > { "object": "eval", @@ -3557,6 +4101,27 @@ paths: curl https://api.openai.com/v1/evals/eval_67abd54d9b0081909a86353f6fb9317a \ -H "Authorization: Bearer $OPENAI_API_KEY" \ -H "Content-Type: application/json" + python: > + from openai import OpenAI + + client = OpenAI() + + + eval_obj = + client.evals.retrieve("eval_67abd54d9b0081909a86353f6fb9317a") + + print(eval_obj) + node.js: > + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + const evalObj = await + openai.evals.retrieve("eval_67abd54d9b0081909a86353f6fb9317a"); + + console.log(evalObj); response: | { "object": "eval", @@ -3647,6 +4212,29 @@ paths: -H "Authorization: Bearer $OPENAI_API_KEY" \ -H "Content-Type: application/json" \ -d '{"name": "Updated Eval", "metadata": {"description": "Updated description"}}' + python: | + from openai import OpenAI + client = OpenAI() + + updated_eval = client.evals.update( + "eval_67abd54d9b0081909a86353f6fb9317a", + name="Updated Eval", + metadata={"description": "Updated description"} + ) + print(updated_eval) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + const updatedEval = await openai.evals.update( + "eval_67abd54d9b0081909a86353f6fb9317a", + { + name: "Updated Eval", + metadata: { description: "Updated description" } + } + ); + console.log(updatedEval); response: | { "object": "eval", @@ -3741,6 +4329,19 @@ paths: curl https://api.openai.com/v1/evals/eval_abc123 \ -X DELETE \ -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + deleted = client.evals.delete("eval_abc123") + print(deleted) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + const deleted = await openai.evals.delete("eval_abc123"); + console.log(deleted); response: | { "object": "eval.deleted", @@ -3817,6 +4418,27 @@ paths: curl https://api.openai.com/v1/evals/egroup_67abd54d9b0081909a86353f6fb9317a/runs \ -H "Authorization: Bearer $OPENAI_API_KEY" \ -H "Content-Type: application/json" + python: > + from openai import OpenAI + + client = OpenAI() + + + runs = + client.evals.runs.list("egroup_67abd54d9b0081909a86353f6fb9317a") + + print(runs) + node.js: > + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + const runs = await + openai.evals.runs.list("egroup_67abd54d9b0081909a86353f6fb9317a"); + + console.log(runs); response: > { "object": "list", @@ -3902,8 +4524,9 @@ paths: tags: - Evals summary: > - Create a new evaluation run. This is the endpoint that will kick off - grading. + Kicks off a new run for a given evaluation, specifying the data source, + and what model configuration to use to test. The datasource will be + validated against the schema specified in the config of the evaluation. parameters: - in: path name: eval_id @@ -3942,7 +4565,102 @@ paths: -X POST \ -H "Authorization: Bearer $OPENAI_API_KEY" \ -H "Content-Type: application/json" \ - -d '{"name":"gpt-4o-mini","data_source":{"type":"completions","input_messages":{"type":"template","template":[{"role":"developer","content":"Categorize a given news headline into one of the following topics: Technology, Markets, World, Business, or Sports.\n\n# Steps\n\n1. Analyze the content of the news headline to understand its primary focus.\n2. Extract the subject matter, identifying any key indicators or keywords.\n3. Use the identified indicators to determine the most suitable category out of the five options: Technology, Markets, World, Business, or Sports.\n4. Ensure only one category is selected per headline.\n\n# Output Format\n\nRespond with the chosen category as a single word. For instance: \"Technology\", \"Markets\", \"World\", \"Business\", or \"Sports\".\n\n# Examples\n\n**Input**: \"Apple Unveils New iPhone Model, Featuring Advanced AI Features\" \n**Output**: \"Technology\"\n\n**Input**: \"Global Stocks Mixed as Investors Await Central Bank Decisions\" \n**Output**: \"Markets\"\n\n**Input**: \"War in Ukraine: Latest Updates on Negotiation Status\" \n**Output**: \"World\"\n\n**Input**: \"Microsoft in Talks to Acquire Gaming Company for $2 Billion\" \n**Output**: \"Business\"\n\n**Input**: \"Manchester United Secures Win in Premier League Football Match\" \n**Output**: \"Sports\" \n\n# Notes\n\n- If the headline appears to fit into more than one category, choose the most dominant theme.\n- Keywords or phrases such as \"stocks\", \"company acquisition\", \"match\", or technological brands can be good indicators for classification.\n"} , {"role":"user","content":"{{item.input}}"}]},"sampling_params":{"temperature":1,"max_completions_tokens":2048,"top_p":1,"seed":42},"model":"gpt-4o-mini","source":{"type":"file_content","content":[{"item":{"input":"Tech Company Launches Advanced Artificial Intelligence Platform","ground_truth":"Technology"}}]}}' + -d '{"name":"gpt-4o-mini","data_source":{"type":"completions","input_messages":{"type":"template","template":[{"role":"developer","content":"Categorize a given news headline into one of the following topics: Technology, Markets, World, Business, or Sports.\n\n# Steps\n\n1. Analyze the content of the news headline to understand its primary focus.\n2. Extract the subject matter, identifying any key indicators or keywords.\n3. Use the identified indicators to determine the most suitable category out of the five options: Technology, Markets, World, Business, or Sports.\n4. Ensure only one category is selected per headline.\n\n# Output Format\n\nRespond with the chosen category as a single word. For instance: \"Technology\", \"Markets\", \"World\", \"Business\", or \"Sports\".\n\n# Examples\n\n**Input**: \"Apple Unveils New iPhone Model, Featuring Advanced AI Features\" \n**Output**: \"Technology\"\n\n**Input**: \"Global Stocks Mixed as Investors Await Central Bank Decisions\" \n**Output**: \"Markets\"\n\n**Input**: \"War in Ukraine: Latest Updates on Negotiation Status\" \n**Output**: \"World\"\n\n**Input**: \"Microsoft in Talks to Acquire Gaming Company for $2 Billion\" \n**Output**: \"Business\"\n\n**Input**: \"Manchester United Secures Win in Premier League Football Match\" \n**Output**: \"Sports\" \n\n# Notes\n\n- If the headline appears to fit into more than one category, choose the most dominant theme.\n- Keywords or phrases such as \"stocks\", \"company acquisition\", \"match\", or technological brands can be good indicators for classification.\n"} , {"role":"user","content":"{{item.input}}"}]} ,"sampling_params":{"temperature":1,"max_completions_tokens":2048,"top_p":1,"seed":42},"model":"gpt-4o-mini","source":{"type":"file_content","content":[{"item":{"input":"Tech Company Launches Advanced Artificial Intelligence Platform","ground_truth":"Technology"}}]}}' + python: > + from openai import OpenAI + + client = OpenAI() + + + run = client.evals.runs.create( + "eval_67e579652b548190aaa83ada4b125f47", + name="gpt-4o-mini", + data_source={ + "type": "completions", + "input_messages": { + "type": "template", + "template": [ + { + "role": "developer", + "content": "Categorize a given news headline into one of the following topics: Technology, Markets, World, Business, or Sports.\n\n# Steps\n\n1. Analyze the content of the news headline to understand its primary focus.\n2. Extract the subject matter, identifying any key indicators or keywords.\n3. Use the identified indicators to determine the most suitable category out of the five options: Technology, Markets, World, Business, or Sports.\n4. Ensure only one category is selected per headline.\n\n# Output Format\n\nRespond with the chosen category as a single word. For instance: \"Technology\", \"Markets\", \"World\", \"Business\", or \"Sports\".\n\n# Examples\n\n**Input**: \"Apple Unveils New iPhone Model, Featuring Advanced AI Features\" \n**Output**: \"Technology\"\n\n**Input**: \"Global Stocks Mixed as Investors Await Central Bank Decisions\" \n**Output**: \"Markets\"\n\n**Input**: \"War in Ukraine: Latest Updates on Negotiation Status\" \n**Output**: \"World\"\n\n**Input**: \"Microsoft in Talks to Acquire Gaming Company for $2 Billion\" \n**Output**: \"Business\"\n\n**Input**: \"Manchester United Secures Win in Premier League Football Match\" \n**Output**: \"Sports\" \n\n# Notes\n\n- If the headline appears to fit into more than one category, choose the most dominant theme.\n- Keywords or phrases such as \"stocks\", \"company acquisition\", \"match\", or technological brands can be good indicators for classification.\n" + }, + { + "role": "user", + "content": "{{item.input}}" + } + ] + }, + "sampling_params": { + "temperature": 1, + "max_completions_tokens": 2048, + "top_p": 1, + "seed": 42 + }, + "model": "gpt-4o-mini", + "source": { + "type": "file_content", + "content": [ + { + "item": { + "input": "Tech Company Launches Advanced Artificial Intelligence Platform", + "ground_truth": "Technology" + } + } + ] + } + } + ) + + print(run) + node.js: > + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + const run = await openai.evals.runs.create( + "eval_67e579652b548190aaa83ada4b125f47", + { + name: "gpt-4o-mini", + data_source: { + type: "completions", + input_messages: { + type: "template", + template: [ + { + role: "developer", + content: "Categorize a given news headline into one of the following topics: Technology, Markets, World, Business, or Sports.\n\n# Steps\n\n1. Analyze the content of the news headline to understand its primary focus.\n2. Extract the subject matter, identifying any key indicators or keywords.\n3. Use the identified indicators to determine the most suitable category out of the five options: Technology, Markets, World, Business, or Sports.\n4. Ensure only one category is selected per headline.\n\n# Output Format\n\nRespond with the chosen category as a single word. For instance: \"Technology\", \"Markets\", \"World\", \"Business\", or \"Sports\".\n\n# Examples\n\n**Input**: \"Apple Unveils New iPhone Model, Featuring Advanced AI Features\" \n**Output**: \"Technology\"\n\n**Input**: \"Global Stocks Mixed as Investors Await Central Bank Decisions\" \n**Output**: \"Markets\"\n\n**Input**: \"War in Ukraine: Latest Updates on Negotiation Status\" \n**Output**: \"World\"\n\n**Input**: \"Microsoft in Talks to Acquire Gaming Company for $2 Billion\" \n**Output**: \"Business\"\n\n**Input**: \"Manchester United Secures Win in Premier League Football Match\" \n**Output**: \"Sports\" \n\n# Notes\n\n- If the headline appears to fit into more than one category, choose the most dominant theme.\n- Keywords or phrases such as \"stocks\", \"company acquisition\", \"match\", or technological brands can be good indicators for classification.\n" + }, + { + role: "user", + content: "{{item.input}}" + } + ] + }, + sampling_params: { + temperature: 1, + max_completions_tokens: 2048, + top_p: 1, + seed: 42 + }, + model: "gpt-4o-mini", + source: { + type: "file_content", + content: [ + { + item: { + input: "Tech Company Launches Advanced Artificial Intelligence Platform", + ground_truth: "Technology" + } + } + ] + } + } + } + ); + + console.log(run); response: > { "object": "eval.run", @@ -4045,6 +4763,25 @@ paths: curl https://api.openai.com/v1/evals/eval_67abd54d9b0081909a86353f6fb9317a/runs/evalrun_67abd54d60ec8190832b46859da808f7 \ -H "Authorization: Bearer $OPENAI_API_KEY" \ -H "Content-Type: application/json" + python: | + from openai import OpenAI + client = OpenAI() + + run = client.evals.runs.retrieve( + "eval_67abd54d9b0081909a86353f6fb9317a", + "evalrun_67abd54d60ec8190832b46859da808f7" + ) + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + const run = await openai.evals.runs.retrieve( + "evalrun_67abd54d60ec8190832b46859da808f7", + { eval_id: "eval_67abd54d9b0081909a86353f6fb9317a" } + ); + console.log(run); response: > { "object": "eval.run", @@ -4231,6 +4968,25 @@ paths: -X POST \ -H "Authorization: Bearer $OPENAI_API_KEY" \ -H "Content-Type: application/json" + python: | + from openai import OpenAI + client = OpenAI() + + canceled_run = client.evals.runs.cancel( + "eval_67abd54d9b0081909a86353f6fb9317a", + "evalrun_67abd54d60ec8190832b46859da808f7" + ) + print(canceled_run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + const canceledRun = await openai.evals.runs.cancel( + "evalrun_67abd54d60ec8190832b46859da808f7", + { eval_id: "eval_67abd54d9b0081909a86353f6fb9317a" } + ); + console.log(canceledRun); response: > { "object": "eval.run", @@ -4433,6 +5189,25 @@ paths: -X DELETE \ -H "Authorization: Bearer $OPENAI_API_KEY" \ -H "Content-Type: application/json" + python: | + from openai import OpenAI + client = OpenAI() + + deleted = client.evals.runs.delete( + "eval_123abc", + "evalrun_abc456" + ) + print(deleted) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + const deleted = await openai.evals.runs.delete( + "eval_123abc", + "evalrun_abc456" + ); + console.log(deleted); response: | { "object": "eval.run.deleted", @@ -4517,6 +5292,25 @@ paths: curl https://api.openai.com/v1/evals/egroup_67abd54d9b0081909a86353f6fb9317a/runs/erun_67abd54d60ec8190832b46859da808f7/output_items \ -H "Authorization: Bearer $OPENAI_API_KEY" \ -H "Content-Type: application/json" + python: | + from openai import OpenAI + client = OpenAI() + + output_items = client.evals.runs.output_items.list( + "egroup_67abd54d9b0081909a86353f6fb9317a", + "erun_67abd54d60ec8190832b46859da808f7" + ) + print(output_items) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + const outputItems = await openai.evals.runs.outputItems.list( + "egroup_67abd54d9b0081909a86353f6fb9317a", + "erun_67abd54d60ec8190832b46859da808f7" + ); + console.log(outputItems); response: > { "object": "list", @@ -4633,6 +5427,29 @@ paths: curl https://api.openai.com/v1/evals/eval_67abd54d9b0081909a86353f6fb9317a/runs/evalrun_67abd54d60ec8190832b46859da808f7/output_items/outputitem_67abd55eb6548190bb580745d5644a33 \ -H "Authorization: Bearer $OPENAI_API_KEY" \ -H "Content-Type: application/json" + python: | + from openai import OpenAI + client = OpenAI() + + output_item = client.evals.runs.output_items.retrieve( + "eval_67abd54d9b0081909a86353f6fb9317a", + "evalrun_67abd54d60ec8190832b46859da808f7", + "outputitem_67abd55eb6548190bb580745d5644a33" + ) + print(output_item) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + const outputItem = await openai.evals.runs.outputItems.retrieve( + "outputitem_67abd55eb6548190bb580745d5644a33", + { + eval_id: "eval_67abd54d9b0081909a86353f6fb9317a", + run_id: "evalrun_67abd54d60ec8190832b46859da808f7", + } + ); + console.log(outputItem); response: > { "object": "eval.run.output_item", @@ -4920,7 +5737,7 @@ paths: const openai = new OpenAI(); async function main() { - const file = await openai.files.del("file-abc123"); + const file = await openai.files.delete("file-abc123"); console.log(file); } @@ -5033,6 +5850,153 @@ paths: } main(); + /fine_tuning/alpha/graders/run: + post: + operationId: runGrader + tags: + - Fine-tuning + summary: | + Run a grader. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/RunGraderRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunGraderResponse" + x-oaiMeta: + name: Run grader + beta: true + group: graders + returns: The results from the grader run. + examples: + request: + curl: > + curl -X POST + https://api.openai.com/v1/fine_tuning/alpha/graders/run \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "grader": { + "type": "score_model", + "name": "Example score model grader", + "input": [ + { + "role": "user", + "content": "Score how close the reference answer is to the model answer. Score 1.0 if they are the same and 0.0 if they are different. Return just a floating point score\n\nReference answer: {{item.reference_answer}}\n\nModel answer: {{sample.output_text}}" + } + ], + "model": "gpt-4o-2024-08-06", + "sampling_params": { + "temperature": 1, + "top_p": 1, + "seed": 42 + } + }, + "item": { + "reference_answer": "fuzzy wuzzy was a bear" + }, + "model_sample": "fuzzy wuzzy was a bear" + }' + response: | + { + "reward": 1.0, + "metadata": { + "name": "Example score model grader", + "type": "score_model", + "errors": { + "formula_parse_error": false, + "sample_parse_error": false, + "truncated_observation_error": false, + "unresponsive_reward_error": false, + "invalid_variable_error": false, + "other_error": false, + "python_grader_server_error": false, + "python_grader_server_error_type": null, + "python_grader_runtime_error": false, + "python_grader_runtime_error_details": null, + "model_grader_server_error": false, + "model_grader_refusal_error": false, + "model_grader_parse_error": false, + "model_grader_server_error_details": null + }, + "execution_time": 4.365238428115845, + "scores": {}, + "token_usage": { + "prompt_tokens": 190, + "total_tokens": 324, + "completion_tokens": 134, + "cached_tokens": 0 + }, + "sampled_model_name": "gpt-4o-2024-08-06" + }, + "sub_rewards": {}, + "model_grader_token_usage_per_model": { + "gpt-4o-2024-08-06": { + "prompt_tokens": 190, + "total_tokens": 324, + "completion_tokens": 134, + "cached_tokens": 0 + } + } + } + /fine_tuning/alpha/graders/validate: + post: + operationId: validateGrader + tags: + - Fine-tuning + summary: | + Validate a grader. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ValidateGraderRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ValidateGraderResponse" + x-oaiMeta: + name: Validate grader + beta: true + group: graders + returns: The validated grader object. + examples: + request: + curl: > + curl https://api.openai.com/v1/fine_tuning/alpha/graders/validate + \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "grader": { + "type": "string_check", + "name": "Example string check grader", + "input": "{{sample.output_text}}", + "reference": "{{item.label}}", + "operation": "eq" + } + }' + response: | + { + "grader": { + "type": "string_check", + "name": "Example string check grader", + "input": "{{sample.output_text}}", + "reference": "{{item.label}}", + "operation": "eq" + } + } /fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions: get: operationId: listFineTuningCheckpointPermissions @@ -5247,7 +6211,7 @@ paths: the name of the fine-tuned models once complete. - [Learn more about fine-tuning](/docs/guides/fine-tuning) + [Learn more about fine-tuning](/docs/guides/model-optimization) requestBody: required: true content: @@ -5340,27 +6304,38 @@ paths: } } }' - python: | + python: > from openai import OpenAI + + from openai.types.fine_tuning import SupervisedMethod, + SupervisedHyperparameters + + client = OpenAI() + client.fine_tuning.jobs.create( training_file="file-abc123", model="gpt-4o-mini", method={ "type": "supervised", - "supervised": { - "hyperparameters": { - "n_epochs": 2 - } - } + "supervised": SupervisedMethod( + hyperparameters=SupervisedHyperparameters( + n_epochs=2 + ) + ) } ) - node.js: | + node.js: > import OpenAI from "openai"; + import { SupervisedMethod, SupervisedHyperparameters } from + "openai/resources/fine-tuning/methods"; + + const openai = new OpenAI(); + async function main() { const fineTune = await openai.fineTuning.jobs.create({ training_file: "file-abc123", @@ -5378,12 +6353,13 @@ paths: console.log(fineTune); } + main(); response: | { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-4o-mini-2024-07-18", + "model": "gpt-4o-mini", "created_at": 1721764800, "fine_tuned_model": null, "organization_id": "org-123", @@ -5391,20 +6367,37 @@ paths: "status": "queued", "validation_file": null, "training_file": "file-abc123", - "hyperparameters": {"n_epochs": 2}, + "hyperparameters": { + "batch_size": "auto", + "learning_rate_multiplier": "auto", + "n_epochs": 2 + }, "method": { "type": "supervised", "supervised": { "hyperparameters": { "batch_size": "auto", "learning_rate_multiplier": "auto", - "n_epochs": 2, + "n_epochs": 2 } } }, - "metadata": null + "metadata": null, + "error": { + "code": null, + "message": null, + "param": null + }, + "finished_at": null, + "seed": 683058546, + "trained_tokens": null, + "estimated_finish": null, + "integrations": [], + "user_provided_suffix": null, + "usage_metrics": null, + "shared_with_openai": false } - - title: Validation file + - title: DPO request: curl: | curl https://api.openai.com/v1/fine_tuning/jobs \ @@ -5413,57 +6406,177 @@ paths: -d '{ "training_file": "file-abc123", "validation_file": "file-abc123", - "model": "gpt-4o-mini" + "model": "gpt-4o-mini", + "method": { + "type": "dpo", + "dpo": { + "hyperparameters": { + "beta": 0.1 + } + } + } }' - python: | + python: | + from openai import OpenAI + from openai.types.fine_tuning import DpoMethod, DpoHyperparameters + + client = OpenAI() + + client.fine_tuning.jobs.create( + training_file="file-abc", + validation_file="file-123", + model="gpt-4o-mini", + method={ + "type": "dpo", + "dpo": DpoMethod( + hyperparameters=DpoHyperparameters(beta=0.1) + ) + } + ) + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc", + "model": "gpt-4o-mini", + "created_at": 1746130590, + "fine_tuned_model": null, + "organization_id": "org-abc", + "result_files": [], + "status": "queued", + "validation_file": "file-123", + "training_file": "file-abc", + "method": { + "type": "dpo", + "dpo": { + "hyperparameters": { + "beta": 0.1, + "batch_size": "auto", + "learning_rate_multiplier": "auto", + "n_epochs": "auto" + } + } + }, + "metadata": null, + "error": { + "code": null, + "message": null, + "param": null + }, + "finished_at": null, + "hyperparameters": null, + "seed": 1036326793, + "estimated_finish": null, + "integrations": [], + "user_provided_suffix": null, + "usage_metrics": null, + "shared_with_openai": false + } + - title: Reinforcement + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "training_file": "file-abc", + "validation_file": "file-123", + "model": "o4-mini", + "method": { + "type": "reinforcement", + "reinforcement": { + "grader": { + "type": "string_check", + "name": "Example string check grader", + "input": "{{sample.output_text}}", + "reference": "{{item.label}}", + "operation": "eq" + }, + "hyperparameters": { + "reasoning_effort": "medium" + } + } + } + }' + python: > from openai import OpenAI - client = OpenAI() - client.fine_tuning.jobs.create( - training_file="file-abc123", - validation_file="file-def456", - model="gpt-4o-mini" - ) - node.js: | - import OpenAI from "openai"; + from openai.types.fine_tuning import ReinforcementMethod, + ReinforcementHyperparameters - const openai = new OpenAI(); + from openai.types.graders import StringCheckGrader - async function main() { - const fineTune = await openai.fineTuning.jobs.create({ - training_file: "file-abc123", - validation_file: "file-abc123" - }); - console.log(fineTune); - } + client = OpenAI() - main(); - response: | + + client.fine_tuning.jobs.create( + training_file="file-abc", + validation_file="file-123", + model="o4-mini", + method={ + "type": "reinforcement", + "reinforcement": ReinforcementMethod( + grader=StringCheckGrader( + name="Example string check grader", + type="string_check", + input="{{item.label}}", + operation="eq", + reference="{{sample.output_text}}" + ), + hyperparameters=ReinforcementHyperparameters( + reasoning_effort="medium", + ) + ) + }, + seed=42, + ) + response: |+ { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-4o-mini-2024-07-18", + "model": "o4-mini", "created_at": 1721764800, + "finished_at": null, "fine_tuned_model": null, "organization_id": "org-123", "result_files": [], - "status": "queued", - "validation_file": "file-abc123", - "training_file": "file-abc123", + "status": "validating_files", + "validation_file": "file-123", + "training_file": "file-abc", + "trained_tokens": null, + "error": {}, + "user_provided_suffix": null, + "seed": 950189191, + "estimated_finish": null, + "integrations": [], "method": { - "type": "supervised", - "supervised": { + "type": "reinforcement", + "reinforcement": { "hyperparameters": { "batch_size": "auto", "learning_rate_multiplier": "auto", "n_epochs": "auto", - } + "eval_interval": "auto", + "eval_samples": "auto", + "compute_multiplier": "auto", + "reasoning_effort": "medium" + }, + "grader": { + "type": "string_check", + "name": "Example string check grader", + "input": "{{sample.output_text}}", + "reference": "{{item.label}}", + "operation": "eq" + }, + "response_format": null } }, - "metadata": null + "metadata": null, + "usage_metrics": null, + "shared_with_openai": false } - - title: DPO + + - title: Validation file request: curl: | curl https://api.openai.com/v1/fine_tuning/jobs \ @@ -5472,16 +6585,32 @@ paths: -d '{ "training_file": "file-abc123", "validation_file": "file-abc123", - "model": "gpt-4o-mini", - "method": { - "type": "dpo", - "dpo": { - "hyperparameters": { - "beta": 0.1, - } - } - } + "model": "gpt-4o-mini" }' + python: | + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.create( + training_file="file-abc123", + validation_file="file-def456", + model="gpt-4o-mini" + ) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTuning.jobs.create({ + training_file: "file-abc123", + validation_file: "file-abc123" + }); + + console.log(fineTune); + } + + main(); response: | { "object": "fine_tuning.job", @@ -5495,10 +6624,9 @@ paths: "validation_file": "file-abc123", "training_file": "file-abc123", "method": { - "type": "dpo", - "dpo": { + "type": "supervised", + "supervised": { "hyperparameters": { - "beta": 0.1, "batch_size": "auto", "learning_rate_multiplier": "auto", "n_epochs": "auto", @@ -5664,7 +6792,7 @@ paths: summary: | Get info about a fine-tuning job. - [Learn more about fine-tuning](/docs/guides/fine-tuning) + [Learn more about fine-tuning](/docs/guides/model-optimization) parameters: - in: path name: fine_tuning_job_id @@ -5862,7 +6990,7 @@ paths: -H "Authorization: Bearer $OPENAI_API_KEY" response: > { - "object": "list" + "object": "list", "data": [ { "object": "fine_tuning.job.checkpoint", @@ -5874,7 +7002,7 @@ paths: "full_valid_mean_token_accuracy": 0.874 }, "fine_tuning_job_id": "ftjob-abc123", - "step_number": 2000, + "step_number": 2000 }, { "object": "fine_tuning.job.checkpoint", @@ -5886,8 +7014,8 @@ paths: "full_valid_mean_token_accuracy": 0.781 }, "fine_tuning_job_id": "ftjob-abc123", - "step_number": 1000, - }, + "step_number": 1000 + } ], "first_id": "ftckpt_zc4Q7MP6XxulcVzj4MZdwsAB", "last_id": "ftckpt_enQCFmOTGj3syEpYVhBRLTSy", @@ -5989,6 +7117,138 @@ paths: ], "has_more": true } + /fine_tuning/jobs/{fine_tuning_job_id}/pause: + post: + operationId: pauseFineTuningJob + tags: + - Fine-tuning + summary: | + Pause a fine-tune job. + parameters: + - in: path + name: fine_tuning_job_id + required: true + schema: + type: string + example: ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tuning job to pause. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/FineTuningJob" + x-oaiMeta: + name: Pause fine-tuning + group: fine-tuning + returns: The paused [fine-tuning](/docs/api-reference/fine-tuning/object) + object. + examples: + request: + curl: > + curl -X POST + https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/pause \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.pause("ftjob-abc123") + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const fineTune = await openai.fineTuning.jobs.pause("ftjob-abc123"); + + console.log(fineTune); + } + + main(); + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "status": "paused", + "validation_file": "file-abc123", + "training_file": "file-abc123" + } + /fine_tuning/jobs/{fine_tuning_job_id}/resume: + post: + operationId: resumeFineTuningJob + tags: + - Fine-tuning + summary: | + Resume a fine-tune job. + parameters: + - in: path + name: fine_tuning_job_id + required: true + schema: + type: string + example: ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tuning job to resume. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/FineTuningJob" + x-oaiMeta: + name: Resume fine-tuning + group: fine-tuning + returns: The resumed [fine-tuning](/docs/api-reference/fine-tuning/object) + object. + examples: + request: + curl: > + curl -X POST + https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/resume \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.resume("ftjob-abc123") + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const fineTune = await openai.fineTuning.jobs.resume("ftjob-abc123"); + + console.log(fineTune); + } + + main(); + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "status": "queued", + "validation_file": "file-abc123", + "training_file": "file-abc123" + } /images/edits: post: operationId: createImageEdit @@ -6485,8 +7745,8 @@ paths: async function main() { - const model = await openai.models.del("ft:gpt-4o-mini:acemeco:suffix:abc123"); - + const model = await openai.models.delete("ft:gpt-4o-mini:acemeco:suffix:abc123"); + console.log(model); } @@ -7460,7 +8720,7 @@ paths: tags: - Certificates parameters: - - name: cert_id + - name: certificate_id in: path description: Unique ID of the certificate to retrieve. required: true @@ -8325,6 +9585,12 @@ paths: tags: - Certificates parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string - name: limit in: query description: > @@ -8403,6 +9669,13 @@ paths: operationId: activateProjectCertificates tags: - Certificates + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string requestBody: description: The certificate activation payload. required: true @@ -8461,15 +9734,19 @@ paths: } /organization/projects/{project_id}/certificates/deactivate: post: - summary: > - Deactivate certificates at the project level. - - - You can atomically and idempotently deactivate up to 10 certificates at - a time. + summary: | + Deactivate certificates at the project level. You can atomically and + idempotently deactivate up to 10 certificates at a time. operationId: deactivateProjectCertificates tags: - Certificates + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string requestBody: description: The certificate deactivation payload. required: true @@ -10520,6 +11797,8 @@ paths: "tool_choice": "none", "temperature": 0.7, "max_response_output_tokens": 200, + "speed": 1.1, + "tracing": "auto", "client_secret": { "value": "ek_abc123", "expires_at": 1234567890 @@ -11777,6 +13056,21 @@ paths: description: | Additional fields to include in the response. See the `include` parameter for Response creation above for more information. + - in: query + name: stream + schema: + type: boolean + description: | + If set to true, the model response data will be streamed to the client + as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the [Streaming section below](/docs/api-reference/responses-streaming) + for more information. + - in: query + name: starting_after + schema: + type: integer + description: | + The sequence number of the event after which to start streaming. responses: "200": description: OK @@ -11803,7 +13097,7 @@ paths: const client = new OpenAI(); const response = await client.responses.retrieve("resp_123"); - console.log(response); + console.log(response); python: | from openai import OpenAI client = OpenAI() @@ -11905,13 +13199,13 @@ paths: import OpenAI from "openai"; const client = new OpenAI(); - const response = await client.responses.del("resp_123"); - console.log(response); + const response = await client.responses.delete("resp_123"); + console.log(response); python: | from openai import OpenAI client = OpenAI() - response = client.responses.del("resp_123") + response = client.responses.delete("resp_123") print(response) response: | { @@ -11919,6 +13213,116 @@ paths: "object": "response", "deleted": true } + /responses/{response_id}/cancel: + post: + operationId: cancelResponse + tags: + - Responses + summary: | + Cancels a model response with the given ID. Only responses created with + the `background` parameter set to `true` can be cancelled. + [Learn more](/docs/guides/background). + parameters: + - in: path + name: response_id + required: true + schema: + type: string + example: resp_677efb5139a88190b512bc3fef8e535d + description: The ID of the response to cancel. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Response" + "404": + description: Not Found + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + x-oaiMeta: + name: Cancel a response + group: responses + returns: | + A [Response](/docs/api-reference/responses/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/responses/resp_123/cancel \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" + javascript: | + import OpenAI from "openai"; + const client = new OpenAI(); + + const response = await client.responses.cancel("resp_123"); + console.log(response); + python: | + from openai import OpenAI + client = OpenAI() + + response = client.responses.cancel("resp_123") + print(response) + response: > + { + "id": "resp_67cb71b351908190a308f3859487620d06981a8637e6bc44", + "object": "response", + "created_at": 1741386163, + "status": "completed", + "error": null, + "incomplete_details": null, + "instructions": null, + "max_output_tokens": null, + "model": "gpt-4o-2024-08-06", + "output": [ + { + "type": "message", + "id": "msg_67cb71b3c2b0819084d481baaaf148f206981a8637e6bc44", + "status": "completed", + "role": "assistant", + "content": [ + { + "type": "output_text", + "text": "Silent circuits hum, \nThoughts emerge in data streams— \nDigital dawn breaks.", + "annotations": [] + } + ] + } + ], + "parallel_tool_calls": true, + "previous_response_id": null, + "reasoning": { + "effort": null, + "summary": null + }, + "store": true, + "temperature": 1.0, + "text": { + "format": { + "type": "text" + } + }, + "tool_choice": "auto", + "tools": [], + "top_p": 1.0, + "truncation": "disabled", + "usage": { + "input_tokens": 32, + "input_tokens_details": { + "cached_tokens": 0 + }, + "output_tokens": 18, + "output_tokens_details": { + "reasoning_tokens": 0 + }, + "total_tokens": 50 + }, + "user": null, + "metadata": {} + } /responses/{response_id}/input_items: get: operationId: listInputItems @@ -11951,7 +13355,7 @@ paths: - asc - desc description: | - The order to return the input items in. Default is `asc`. + The order to return the input items in. Default is `desc`. - `asc`: Return the input items in ascending order. - `desc`: Return the input items in descending order. - in: query @@ -12725,16 +14129,19 @@ paths: response = client.beta.threads.delete("thread_abc123") print(response) - node.js: |- + node.js: >- import OpenAI from "openai"; + const openai = new OpenAI(); + async function main() { - const response = await openai.beta.threads.del("thread_abc123"); + const response = await openai.beta.threads.delete("thread_abc123"); console.log(response); } + main(); response: | { @@ -13040,8 +14447,8 @@ paths: async function main() { const message = await openai.beta.threads.messages.retrieve( - "thread_abc123", - "msg_abc123" + "msg_abc123", + { thread_id: "thread_abc123" } ); console.log(message); @@ -13217,15 +14624,17 @@ paths: thread_id="thread_abc123", ) print(deleted_message) - node.js: |- + node.js: >- import OpenAI from "openai"; + const openai = new OpenAI(); + async function main() { - const deletedMessage = await openai.beta.threads.messages.del( - "thread_abc123", - "msg_abc123" + const deletedMessage = await openai.beta.threads.messages.delete( + "msg_abc123", + { thread_id: "thread_abc123" } ); console.log(deletedMessage); @@ -13849,129 +15258,129 @@ paths: async function main() { const run = await openai.beta.threads.runs.retrieve( - "thread_abc123", - "run_abc123" - ); - - console.log(run); - } - - main(); - response: | - { - "id": "run_abc123", - "object": "thread.run", - "created_at": 1699075072, - "assistant_id": "asst_abc123", - "thread_id": "thread_abc123", - "status": "completed", - "started_at": 1699075072, - "expires_at": null, - "cancelled_at": null, - "failed_at": null, - "completed_at": 1699075073, - "last_error": null, - "model": "gpt-4o", - "instructions": null, - "incomplete_details": null, - "tools": [ - { - "type": "code_interpreter" - } - ], - "metadata": {}, - "usage": { - "prompt_tokens": 123, - "completion_tokens": 456, - "total_tokens": 579 - }, - "temperature": 1.0, - "top_p": 1.0, - "max_prompt_tokens": 1000, - "max_completion_tokens": 1000, - "truncation_strategy": { - "type": "auto", - "last_messages": null - }, - "response_format": "auto", - "tool_choice": "auto", - "parallel_tool_calls": true - } - post: - operationId: modifyRun - tags: - - Assistants - summary: Modifies a run. - parameters: - - in: path - name: thread_id - required: true - schema: - type: string - description: The ID of the [thread](/docs/api-reference/threads) that was run. - - in: path - name: run_id - required: true - schema: - type: string - description: The ID of the run to modify. - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/ModifyRunRequest" - responses: - "200": - description: OK - content: - application/json: - schema: - $ref: "#/components/schemas/RunObject" - x-oaiMeta: - name: Modify run - group: threads - beta: true - returns: The modified [run](/docs/api-reference/runs/object) object matching the - specified ID. - examples: - request: - curl: > - curl - https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123 \ - -H "Authorization: Bearer $OPENAI_API_KEY" \ - -H "Content-Type: application/json" \ - -H "OpenAI-Beta: assistants=v2" \ - -d '{ - "metadata": { - "user_id": "user_abc123" - } - }' - python: | - from openai import OpenAI - client = OpenAI() - - run = client.beta.threads.runs.update( - thread_id="thread_abc123", - run_id="run_abc123", - metadata={"user_id": "user_abc123"}, - ) - - print(run) - node.js: | - import OpenAI from "openai"; - - const openai = new OpenAI(); - - async function main() { - const run = await openai.beta.threads.runs.update( - "thread_abc123", "run_abc123", - { - metadata: { - user_id: "user_abc123", - }, - } + { thread_id: "thread_abc123" } + ); + + console.log(run); + } + + main(); + response: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699075072, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699075072, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699075073, + "last_error": null, + "model": "gpt-4o", + "instructions": null, + "incomplete_details": null, + "tools": [ + { + "type": "code_interpreter" + } + ], + "metadata": {}, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + }, + "temperature": 1.0, + "top_p": 1.0, + "max_prompt_tokens": 1000, + "max_completion_tokens": 1000, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true + } + post: + operationId: modifyRun + tags: + - Assistants + summary: Modifies a run. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) that was run. + - in: path + name: run_id + required: true + schema: + type: string + description: The ID of the run to modify. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ModifyRunRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Modify run + group: threads + beta: true + returns: The modified [run](/docs/api-reference/runs/object) object matching the + specified ID. + examples: + request: + curl: > + curl + https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "metadata": { + "user_id": "user_abc123" + } + }' + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.runs.update( + thread_id="thread_abc123", + run_id="run_abc123", + metadata={"user_id": "user_abc123"}, + ) + + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.runs.update( + "run_abc123", + { + thread_id: "thread_abc123", + metadata: { + user_id: "user_abc123", + }, + } ); console.log(run); @@ -14084,8 +15493,8 @@ paths: async function main() { const run = await openai.beta.threads.runs.cancel( - "thread_abc123", - "run_abc123" + "run_abc123", + { thread_id: "thread_abc123" } ); console.log(run); @@ -14232,8 +15641,8 @@ paths: async function main() { const runStep = await openai.beta.threads.runs.steps.list( - "thread_abc123", - "run_abc123" + "run_abc123", + { thread_id: "thread_abc123" } ); console.log(runStep); } @@ -14348,9 +15757,8 @@ paths: async function main() { const runStep = await openai.beta.threads.runs.steps.retrieve( - "thread_abc123", - "run_abc123", - "step_abc123" + "step_abc123", + { thread_id: "thread_abc123", run_id: "run_abc123" } ); console.log(runStep); } @@ -14465,9 +15873,9 @@ paths: async function main() { const run = await openai.beta.threads.runs.submitToolOutputs( - "thread_123", "run_123", { + thread_id: "thread_123", tool_outputs: [ { tool_call_id: "call_001", @@ -14577,9 +15985,9 @@ paths: async function main() { const stream = await openai.beta.threads.runs.submitToolOutputs( - "thread_123", "run_123", { + thread_id: "thread_123", tool_outputs: [ { tool_call_id: "call_001", @@ -15268,7 +16676,7 @@ paths: const openai = new OpenAI(); async function main() { - const deletedVectorStore = await openai.vectorStores.del( + const deletedVectorStore = await openai.vectorStores.delete( "vs_abc123" ); console.log(deletedVectorStore); @@ -15434,8 +16842,8 @@ paths: async function main() { const vectorStoreFileBatch = await openai.vectorStores.fileBatches.retrieve( - "vs_abc123", - "vsfb_abc123" + "vsfb_abc123", + { vector_store_id: "vs_abc123" } ); console.log(vectorStoreFileBatch); } @@ -15517,8 +16925,8 @@ paths: async function main() { const deletedVectorStoreFileBatch = await openai.vectorStores.fileBatches.cancel( - "vs_abc123", - "vsfb_abc123" + "vsfb_abc123", + { vector_store_id: "vs_abc123" } ); console.log(deletedVectorStoreFileBatch); } @@ -15645,8 +17053,8 @@ paths: async function main() { const vectorStoreFiles = await openai.vectorStores.fileBatches.listFiles( - "vs_abc123", - "vsfb_abc123" + "vsfb_abc123", + { vector_store_id: "vs_abc123" } ); console.log(vectorStoreFiles); } @@ -15935,8 +17343,8 @@ paths: async function main() { const vectorStoreFile = await openai.vectorStores.files.retrieve( - "vs_abc123", - "file-abc123" + "file-abc123", + { vector_store_id: "vs_abc123" } ); console.log(vectorStoreFile); } @@ -16007,9 +17415,9 @@ paths: async function main() { - const deletedVectorStoreFile = await openai.vectorStores.files.del( - "vs_abc123", - "file-abc123" + const deletedVectorStoreFile = await openai.vectorStores.files.delete( + "file-abc123", + { vector_store_id: "vs_abc123" } ); console.log(deletedVectorStoreFile); } @@ -16206,6 +17614,245 @@ paths: "has_more": false, "next_page": null } +webhooks: + batch_cancelled: + post: + description: | + Sent when a batch has been cancelled. + requestBody: + description: The event payload sent by the API. + content: + application/json: + schema: + $ref: "#/components/schemas/WebhookBatchCancelled" + responses: + "200": + description: > + Return a 200 status code to acknowledge receipt of the event. + Non-200 + + status codes will be retried. + batch_completed: + post: + description: | + Sent when a batch has completed processing. + requestBody: + description: The event payload sent by the API. + content: + application/json: + schema: + $ref: "#/components/schemas/WebhookBatchCompleted" + responses: + "200": + description: > + Return a 200 status code to acknowledge receipt of the event. + Non-200 + + status codes will be retried. + batch_expired: + post: + description: | + Sent when a batch has expired before completion. + requestBody: + description: The event payload sent by the API. + content: + application/json: + schema: + $ref: "#/components/schemas/WebhookBatchExpired" + responses: + "200": + description: > + Return a 200 status code to acknowledge receipt of the event. + Non-200 + + status codes will be retried. + batch_failed: + post: + description: | + Sent when a batch has failed. + requestBody: + description: The event payload sent by the API. + content: + application/json: + schema: + $ref: "#/components/schemas/WebhookBatchFailed" + responses: + "200": + description: > + Return a 200 status code to acknowledge receipt of the event. + Non-200 + + status codes will be retried. + eval_run_canceled: + post: + description: | + Sent when an eval run has been canceled. + requestBody: + description: The event payload sent by the API. + content: + application/json: + schema: + $ref: "#/components/schemas/WebhookEvalRunCanceled" + responses: + "200": + description: > + Return a 200 status code to acknowledge receipt of the event. + Non-200 + + status codes will be retried. + eval_run_failed: + post: + description: | + Sent when an eval run has failed. + requestBody: + description: The event payload sent by the API. + content: + application/json: + schema: + $ref: "#/components/schemas/WebhookEvalRunFailed" + responses: + "200": + description: > + Return a 200 status code to acknowledge receipt of the event. + Non-200 + + status codes will be retried. + eval_run_succeeded: + post: + description: | + Sent when an eval run has succeeded. + requestBody: + description: The event payload sent by the API. + content: + application/json: + schema: + $ref: "#/components/schemas/WebhookEvalRunSucceeded" + responses: + "200": + description: > + Return a 200 status code to acknowledge receipt of the event. + Non-200 + + status codes will be retried. + fine_tuning_job_cancelled: + post: + description: | + Sent when a fine-tuning job has been cancelled. + requestBody: + description: The event payload sent by the API. + content: + application/json: + schema: + $ref: "#/components/schemas/WebhookFineTuningJobCancelled" + responses: + "200": + description: > + Return a 200 status code to acknowledge receipt of the event. + Non-200 + + status codes will be retried. + fine_tuning_job_failed: + post: + description: | + Sent when a fine-tuning job has failed. + requestBody: + description: The event payload sent by the API. + content: + application/json: + schema: + $ref: "#/components/schemas/WebhookFineTuningJobFailed" + responses: + "200": + description: > + Return a 200 status code to acknowledge receipt of the event. + Non-200 + + status codes will be retried. + fine_tuning_job_succeeded: + post: + description: | + Sent when a fine-tuning job has succeeded. + requestBody: + description: The event payload sent by the API. + content: + application/json: + schema: + $ref: "#/components/schemas/WebhookFineTuningJobSucceeded" + responses: + "200": + description: > + Return a 200 status code to acknowledge receipt of the event. + Non-200 + + status codes will be retried. + response_cancelled: + post: + description: | + Sent when a background response has been cancelled. + requestBody: + description: The event payload sent by the API. + content: + application/json: + schema: + $ref: "#/components/schemas/WebhookResponseCancelled" + responses: + "200": + description: > + Return a 200 status code to acknowledge receipt of the event. + Non-200 + + status codes will be retried. + response_completed: + post: + description: | + Sent when a background response has completed successfully. + requestBody: + description: The event payload sent by the API. + content: + application/json: + schema: + $ref: "#/components/schemas/WebhookResponseCompleted" + responses: + "200": + description: > + Return a 200 status code to acknowledge receipt of the event. + Non-200 + + status codes will be retried. + response_failed: + post: + description: | + Sent when a background response has failed. + requestBody: + description: The event payload sent by the API. + content: + application/json: + schema: + $ref: "#/components/schemas/WebhookResponseFailed" + responses: + "200": + description: > + Return a 200 status code to acknowledge receipt of the event. + Non-200 + + status codes will be retried. + response_incomplete: + post: + description: | + Sent when a background response is incomplete. + requestBody: + description: The event payload sent by the API. + content: + application/json: + schema: + $ref: "#/components/schemas/WebhookResponseIncomplete" + responses: + "200": + description: > + Return a 200 status code to acknowledge receipt of the event. + Non-200 + + status codes will be retried. components: schemas: AddUploadPartRequest: @@ -16905,18 +18552,24 @@ components: name: type: string description: The organization name. - settings: - type: object - properties: - threads_ui_visibility: - type: string - description: Visibility of the threads page which shows messages created with - the Assistants API and Playground. One of `ANY_ROLE`, - `OWNERS`, or `NONE`. - usage_dashboard_visibility: - type: string - description: Visibility of the usage dashboard which shows activity and costs - for your organization. One of `ANY_ROLE` or `OWNERS`. + threads_ui_visibility: + type: string + description: Visibility of the threads page which shows messages created with + the Assistants API and Playground. One of `ANY_ROLE`, + `OWNERS`, or `NONE`. + usage_dashboard_visibility: + type: string + description: Visibility of the usage dashboard which shows activity and costs + for your organization. One of `ANY_ROLE` or `OWNERS`. + api_call_logging: + type: string + description: How your organization logs data from supported API calls. One of + `disabled`, `enabled_per_call`, `enabled_for_all_projects`, + or `enabled_for_selected_projects` + api_call_logging_project_ids: + type: string + description: The list of project ids if api_call_logging is set to + `enabled_for_selected_projects` project.created: type: object description: The details for events with this `type`. @@ -18633,6 +20286,44 @@ components: required: - type - files + CodeInterpreterOutputImage: + type: object + title: Code interpreter output image + description: | + The image output from the code interpreter. + properties: + type: + type: string + enum: + - image + default: image + x-stainless-const: true + description: The type of the output. Always 'image'. + url: + type: string + description: The URL of the image output from the code interpreter. + required: + - type + - url + CodeInterpreterOutputLogs: + type: object + title: Code interpreter output logs + description: | + The logs output from the code interpreter. + properties: + type: + type: string + enum: + - logs + default: logs + x-stainless-const: true + description: The type of the output. Always 'logs'. + logs: + type: string + description: The logs output from the code interpreter. + required: + - type + - logs CodeInterpreterTextOutput: type: object title: Code interpreter text output @@ -18653,52 +20344,114 @@ components: required: - type - logs + CodeInterpreterTool: + type: object + title: Code interpreter + description: | + A tool that runs Python code to help generate a response to a prompt. + properties: + type: + type: string + enum: + - code_interpreter + description: | + The type of the code interpreter tool. Always `code_interpreter`. + x-stainless-const: true + container: + description: > + The code interpreter container. Can be a container ID or an object + that + + specifies uploaded file IDs to make available to your code. + oneOf: + - type: string + description: The container ID. + - $ref: "#/components/schemas/CodeInterpreterToolAuto" + required: + - type + - container + CodeInterpreterToolAuto: + type: object + title: CodeInterpreterContainerAuto + description: > + Configuration for a code interpreter container. Optionally specify the + IDs + + of the files to run the code on. + required: + - type + properties: + type: + type: string + enum: + - auto + description: Always `auto`. + x-stainless-const: true + file_ids: + type: array + items: + type: string + description: | + An optional list of uploaded files to make available to your code. CodeInterpreterToolCall: type: object title: Code interpreter tool call description: | A tool call to run code. properties: - id: - type: string - description: | - The unique ID of the code interpreter tool call. type: type: string enum: - code_interpreter_call + default: code_interpreter_call + x-stainless-const: true description: > The type of the code interpreter tool call. Always `code_interpreter_call`. - x-stainless-const: true - code: + id: type: string description: | - The code to run. + The unique ID of the code interpreter tool call. status: type: string enum: - in_progress - - interpreting - completed + - incomplete + - interpreting + - failed description: | The status of the code interpreter tool call. - results: + container_id: + type: string + description: | + The ID of the container used to run the code. + code: + type: string + nullable: true + description: | + The code to run, or null if not available. + outputs: type: array items: - $ref: "#/components/schemas/CodeInterpreterToolOutput" - description: | - The results of the code interpreter tool call. + oneOf: + - $ref: "#/components/schemas/CodeInterpreterOutputLogs" + - $ref: "#/components/schemas/CodeInterpreterOutputImage" + discriminator: + propertyName: type + nullable: true + description: > + The outputs generated by the code interpreter, such as logs or + images. + + Can be null if no outputs are available. required: - - id - type - - code + - id - status - - results - CodeInterpreterToolOutput: - oneOf: - - $ref: "#/components/schemas/CodeInterpreterTextOutput" - - $ref: "#/components/schemas/CodeInterpreterFileOutput" + - container_id + - code + - outputs ComparisonFilter: type: object additionalProperties: false @@ -19014,6 +20767,170 @@ components: - id - code - message + ContainerFileListResource: + type: object + properties: + object: + type: string + enum: + - list + description: The type of object returned, must be 'list'. + data: + type: array + description: A list of container files. + items: + $ref: "#/components/schemas/ContainerFileResource" + first_id: + type: string + description: The ID of the first file in the list. + last_id: + type: string + description: The ID of the last file in the list. + has_more: + type: boolean + description: Whether there are more files available. + required: + - object + - data + - first_id + - last_id + - has_more + ContainerFileResource: + type: object + title: The container file object + properties: + id: + type: string + description: Unique identifier for the file. + object: + type: string + description: The type of this object (`container.file`). + container_id: + type: string + description: The container this file belongs to. + created_at: + type: integer + description: Unix timestamp (in seconds) when the file was created. + bytes: + type: integer + description: Size of the file in bytes. + path: + type: string + description: Path of the file in the container. + source: + type: string + description: Source of the file (e.g., `user`, `assistant`). + required: + - id + - object + - created_at + - bytes + - container_id + - path + - source + x-oaiMeta: + name: The container file object + example: > + { + "id": "cfile_682e0e8a43c88191a7978f477a09bdf5", + "object": "container.file", + "created_at": 1747848842, + "bytes": 880, + "container_id": "cntr_682e0e7318108198aa783fd921ff305e08e78805b9fdbb04", + "path": "/mnt/data/88e12fa445d32636f190a0b33daed6cb-tsconfig.json", + "source": "user" + } + ContainerListResource: + type: object + properties: + object: + type: string + enum: + - list + description: The type of object returned, must be 'list'. + data: + type: array + description: A list of containers. + items: + $ref: "#/components/schemas/ContainerResource" + first_id: + type: string + description: The ID of the first container in the list. + last_id: + type: string + description: The ID of the last container in the list. + has_more: + type: boolean + description: Whether there are more containers available. + required: + - object + - data + - first_id + - last_id + - has_more + ContainerResource: + type: object + title: The container object + properties: + id: + type: string + description: Unique identifier for the container. + object: + type: string + description: The type of this object. + name: + type: string + description: Name of the container. + created_at: + type: integer + description: Unix timestamp (in seconds) when the container was created. + status: + type: string + description: Status of the container (e.g., active, deleted). + expires_after: + type: object + description: > + The container will expire after this time period. + + The anchor is the reference point for the expiration. + + The minutes is the number of minutes after the anchor before the + container expires. + properties: + anchor: + type: string + description: The reference point for the expiration. + enum: + - last_active_at + minutes: + type: integer + description: The number of minutes after the anchor before the container + expires. + required: + - id + - object + - name + - created_at + - status + - id + - name + - created_at + - status + x-oaiMeta: + name: The container object + example: | + { + "id": "cntr_682dfebaacac8198bbfe9c2474fb6f4a085685cbe3cb5863", + "object": "container", + "created_at": 1747844794, + "status": "running", + "expires_after": { + "anchor": "last_active_at", + "minutes": 20 + }, + "last_active_at": 1747844794, + "name": "My Container" + } Content: description: | Multi-modal input and output contents. @@ -19467,7 +21384,11 @@ components: use in our [model distillation](/docs/guides/distillation) or - [evals](/docs/guides/evals) products. + [evals](/docs/guides/evals) products. + + + Supports text and image inputs. Note: image inputs over 10MB + will be dropped. stream: description: | If set to true, the model response data will be streamed to the client @@ -20281,13 +22202,53 @@ components: "total_tokens": 12 } } + CreateContainerBody: + type: object + properties: + name: + type: string + description: Name of the container to create. + file_ids: + type: array + description: IDs of files to copy to the container. + items: + type: string + expires_after: + type: object + description: Container expiration time in seconds relative to the 'anchor' time. + properties: + anchor: + type: string + enum: + - last_active_at + description: Time anchor for the expiration time. Currently only + 'last_active_at' is supported. + minutes: + type: integer + required: + - anchor + - minutes + required: + - name + CreateContainerFileBody: + type: object + properties: + file_id: + type: string + description: Name of the file to create. + file: + description: | + The File object (not file name) to be uploaded. + type: string + format: binary + required: [] CreateEmbeddingRequest: type: object additionalProperties: false properties: input: description: | - Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. Some models may also impose a limit on total number of tokens summed across inputs. + Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for all embedding models), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. In addition to the per-input token limit, all embedding models enforce a maximum of 300,000 tokens summed across all inputs in a single request. example: The quick brown fox jumped over the lazy dog oneOf: - type: string @@ -20413,6 +22374,10 @@ components: default: completions description: The type of run data source. Always `completions`. input_messages: + description: Used when sampling from a model. Dictates the structure of the + messages passed into the model. Can either be a reference to a + prebuilt trajectory (ie, `item.input_trajectory`), or a template + with variable references to the `item` namespace. oneOf: - type: object title: TemplateInputMessages @@ -20425,7 +22390,7 @@ components: template: type: array description: A list of chat messages forming the prompt or context. May include - variable references to the "item" namespace, ie + variable references to the `item` namespace, ie {{item.name}}. items: oneOf: @@ -20444,7 +22409,8 @@ components: description: The type of input messages. Always `item_reference`. item_reference: type: string - description: A reference to a variable in the "item" namespace. Ie, "item.name" + description: A reference to a variable in the `item` namespace. Ie, + "item.input_trajectory" required: - type - item_reference @@ -20467,11 +22433,49 @@ components: type: integer description: A seed value to initialize the randomness, during sampling. default: 42 + response_format: + description: > + An object specifying the format that the model must output. + + + Setting to `{ "type": "json_schema", "json_schema": {...} }` + enables + + Structured Outputs which ensures the model will match your + supplied JSON + + schema. Learn more in the [Structured Outputs + + guide](/docs/guides/structured-outputs). + + + Setting to `{ "type": "json_object" }` enables the older JSON + mode, which + + ensures the message the model generates is valid JSON. Using + `json_schema` + + is preferred for models that support it. + oneOf: + - $ref: "#/components/schemas/ResponseFormatText" + - $ref: "#/components/schemas/ResponseFormatJsonSchema" + - $ref: "#/components/schemas/ResponseFormatJsonObject" + tools: + type: array + description: > + A list of tools the model may call. Currently, only functions + are supported as a tool. Use this to provide a list of functions + the model may generate JSON inputs for. A max of 128 functions + are supported. + items: + $ref: "#/components/schemas/ChatCompletionTool" model: type: string description: The name of the model to use for generating completions (e.g. "o3-mini"). source: + description: Determines what populates the `item` namespace in this run's data + source. oneOf: - $ref: "#/components/schemas/EvalJsonlFileContentSource" - $ref: "#/components/schemas/EvalJsonlFileIdSource" @@ -20558,7 +22562,7 @@ components: CreateEvalItem: title: CreateEvalItem description: A chat message that makes up the prompt or context. May include - variable references to the "item" namespace, ie {{item.name}}. + variable references to the `item` namespace, ie {{item.name}}. type: object oneOf: - type: object @@ -20591,6 +22595,7 @@ components: description: The type of data source. Always `jsonl`. x-stainless-const: true source: + description: Determines what populates the `item` namespace in the data source. oneOf: - $ref: "#/components/schemas/EvalJsonlFileContentSource" - $ref: "#/components/schemas/EvalJsonlFileIdSource" @@ -20632,7 +22637,7 @@ components: input: type: array description: A list of chat messages forming the prompt or context. May include - variable references to the "item" namespace, ie {{item.name}}. + variable references to the `item` namespace, ie {{item.name}}. items: $ref: "#/components/schemas/CreateEvalItem" labels: @@ -20678,8 +22683,8 @@ components: type: object title: LogsDataSourceConfig description: > - A data source config which specifies the metadata property of your - stored completions query. + A data source config which specifies the metadata property of your logs + query. This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. @@ -20723,19 +22728,24 @@ components: data_source_config: type: object description: The configuration for the data source used for the evaluation runs. + Dictates the schema of the data used in the evaluation. oneOf: - $ref: "#/components/schemas/CreateEvalCustomDataSourceConfig" - $ref: "#/components/schemas/CreateEvalLogsDataSourceConfig" + - $ref: "#/components/schemas/CreateEvalStoredCompletionsDataSourceConfig" testing_criteria: type: array - description: A list of graders for all eval runs in this group. + description: A list of graders for all eval runs in this group. Graders can + reference variables in the data source using double curly braces + notation, like `{{item.variable_name}}`. To reference the model's + output, use the `sample` namespace (ie, `{{sample.output_text}}`). items: oneOf: - $ref: "#/components/schemas/CreateEvalLabelModelGrader" - - $ref: "#/components/schemas/EvalStringCheckGrader" - - $ref: "#/components/schemas/EvalTextSimilarityGrader" - - $ref: "#/components/schemas/EvalPythonGrader" - - $ref: "#/components/schemas/EvalScoreModelGrader" + - $ref: "#/components/schemas/EvalGraderStringCheck" + - $ref: "#/components/schemas/EvalGraderTextSimilarity" + - $ref: "#/components/schemas/EvalGraderPython" + - $ref: "#/components/schemas/EvalGraderScoreModel" required: - data_source_config - testing_criteria @@ -20749,12 +22759,17 @@ components: type: type: string enum: - - completions - default: completions - description: The type of run data source. Always `completions`. + - responses + default: responses + description: The type of run data source. Always `responses`. input_messages: + description: Used when sampling from a model. Dictates the structure of the + messages passed into the model. Can either be a reference to a + prebuilt trajectory (ie, `item.input_trajectory`), or a template + with variable references to the `item` namespace. oneOf: - type: object + title: InputMessagesTemplate properties: type: type: string @@ -20764,7 +22779,7 @@ components: template: type: array description: A list of chat messages forming the prompt or context. May include - variable references to the "item" namespace, ie + variable references to the `item` namespace, ie {{item.name}}. items: oneOf: @@ -20785,6 +22800,7 @@ components: - type - template - type: object + title: InputMessagesItemReference properties: type: type: string @@ -20793,7 +22809,7 @@ components: description: The type of input messages. Always `item_reference`. item_reference: type: string - description: A reference to a variable in the "item" namespace. Ie, "item.name" + description: A reference to a variable in the `item` namespace. Ie, "item.name" required: - type - item_reference @@ -20816,11 +22832,51 @@ components: type: integer description: A seed value to initialize the randomness, during sampling. default: 42 + tools: + type: array + description: > + An array of tools the model may call while generating a + response. You + + can specify which tool to use by setting the `tool_choice` + parameter. + + + The two categories of tools you can provide the model are: + + + - **Built-in tools**: Tools that are provided by OpenAI that + extend the + model's capabilities, like [web search](/docs/guides/tools-web-search) + or [file search](/docs/guides/tools-file-search). Learn more about + [built-in tools](/docs/guides/tools). + - **Function calls (custom tools)**: Functions that are defined + by you, + enabling the model to call your own code. Learn more about + [function calling](/docs/guides/function-calling). + items: + $ref: "#/components/schemas/Tool" + text: + type: object + description: > + Configuration options for a text response from the model. Can be + plain + + text or structured JSON data. Learn more: + + - [Text inputs and outputs](/docs/guides/text) + + - [Structured Outputs](/docs/guides/structured-outputs) + properties: + format: + $ref: "#/components/schemas/TextResponseFormatConfiguration" model: type: string description: The name of the model to use for generating completions (e.g. "o3-mini"). source: + description: Determines what populates the `item` namespace in this run's data + source. oneOf: - $ref: "#/components/schemas/EvalJsonlFileContentSource" - $ref: "#/components/schemas/EvalJsonlFileIdSource" @@ -20835,14 +22891,14 @@ components: { "name": "gpt-4o-mini-2024-07-18", "data_source": { - "type": "completions", + "type": "responses", "input_messages": { "type": "item_reference", "item_reference": "item.input" }, "model": "gpt-4o-mini-2024-07-18", "source": { - "type": "stored_completions", + "type": "responses", "model": "gpt-4o-mini-2024-07-18" } } @@ -20865,6 +22921,40 @@ components: - $ref: "#/components/schemas/CreateEvalResponsesRunDataSource" required: - data_source + CreateEvalStoredCompletionsDataSourceConfig: + type: object + title: StoredCompletionsDataSourceConfig + description: | + Deprecated in favor of LogsDataSourceConfig. + properties: + type: + type: string + enum: + - stored_completions + default: stored_completions + description: The type of data source. Always `stored_completions`. + x-stainless-const: true + metadata: + type: object + description: Metadata filters for the stored completions data source. + additionalProperties: true + example: | + { + "use_case": "customer_support_agent" + } + required: + - type + deprecated: true + x-oaiMeta: + name: The stored completions data source object for evals + group: evals + example: | + { + "type": "stored_completions", + "metadata": { + "use_case": "customer_support_agent" + } + } CreateFileRequest: type: object additionalProperties: false @@ -20943,8 +23033,8 @@ components: format. - See the [fine-tuning guide](/docs/guides/fine-tuning) for more - details. + See the [fine-tuning guide](/docs/guides/model-optimization) for + more details. type: string example: file-abc123 hyperparameters: @@ -21034,8 +23124,8 @@ components: file with the purpose `fine-tune`. - See the [fine-tuning guide](/docs/guides/fine-tuning) for more - details. + See the [fine-tuning guide](/docs/guides/model-optimization) for + more details. type: string nullable: true example: file-abc123 @@ -21143,7 +23233,7 @@ components: For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less - than 25MB. You can provide up to 16 images. + than 50MB. You can provide up to 16 images. For `dall-e-2`, you can only provide one image, and it should be a @@ -21164,6 +23254,32 @@ components: dimensions as `image`. type: string format: binary + background: + type: string + enum: + - transparent + - opaque + - auto + default: auto + example: transparent + nullable: true + description: > + Allows to set transparency for the background of the generated + image(s). + + This parameter is only supported for `gpt-image-1`. Must be one of + + `transparent`, `opaque` or `auto` (default value). When `auto` is + used, the + + model will automatically determine the best background for the + image. + + + If `transparent`, the output format needs to support transparency, + so it + + should be set to either `png` (default value) or `webp`. model: anyOf: - type: string @@ -21216,6 +23332,36 @@ components: image has been generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` will always return base64-encoded images. + output_format: + type: string + enum: + - png + - jpeg + - webp + default: png + example: png + nullable: true + description: > + The format in which the generated images are returned. This + parameter is + + only supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or + `webp`. + + The default value is `png`. + output_compression: + type: integer + default: 100 + example: 100 + nullable: true + description: > + The compression level (0-100%) for the generated images. This + parameter + + is only supported for `gpt-image-1` with the `webp` or `jpeg` + output + + formats, and defaults to 100. user: type: string example: user-1234 @@ -21534,6 +23680,18 @@ components: CreateModelResponseProperties: allOf: - $ref: "#/components/schemas/ModelResponseProperties" + - type: object + properties: + top_logprobs: + description: > + An integer between 0 and 20 specifying the number of most likely + tokens to + + return at each token position, each with an associated log + probability. + type: integer + minimum: 0 + maximum: 20 CreateModerationRequest: type: object properties: @@ -22028,13 +24186,13 @@ components: title: Text input description: > A text input to the model, equivalent to a text input with - the + the `user` role. - type: array title: Input item list description: | - A list of one or many input items to the model, containing + A list of one or many input items to the model, containing different content types. items: $ref: "#/components/schemas/InputItem" @@ -22046,13 +24204,26 @@ components: supported values are: + - `code_interpreter_call.outputs`: Includes the outputs of + python code execution + in code interpreter tool call items. + - `computer_call_output.output.image_url`: Include image urls + from the computer call output. + - `file_search_call.results`: Include the search results of the file search tool call. - `message.input_image.image_url`: Include image urls from the input message. - - `computer_call_output.output.image_url`: Include image urls - from the computer call output. + - `message.output_text.logprobs`: Include logprobs with + assistant messages. + + - `reasoning.encrypted_content`: Includes an encrypted version + of reasoning + tokens in reasoning item outputs. This enables reasoning items to be used in + multi-turn conversations when using the Responses API statelessly (like + when the `store` parameter is set to `false`, or when an organization is + enrolled in the zero data retention program). items: $ref: "#/components/schemas/Includable" nullable: true @@ -22071,6 +24242,21 @@ components: API. default: true nullable: true + instructions: + type: string + nullable: true + description: > + A system (or developer) message inserted into the model's + context. + + + When using along with `previous_response_id`, the instructions + from a previous + + response will not be carried over to the next response. This + makes it simple + + to swap out system (or developer) messages in new responses. stream: description: | If set to true, the model response data will be streamed to the client @@ -22080,9 +24266,6 @@ components: type: boolean nullable: true default: false - required: - - model - - input CreateRunRequest: type: object additionalProperties: false @@ -22257,10 +24440,24 @@ components: default: 1 minimum: 0.25 maximum: 4 + stream_format: + description: The format to stream the audio in. Supported formats are `sse` and + `audio`. `sse` is not supported for `tts-1` or `tts-1-hd`. + type: string + default: audio + enum: + - sse + - audio required: - model - input - voice + CreateSpeechResponseStreamEvent: + anyOf: + - $ref: "#/components/schemas/SpeechAudioDeltaEvent" + - $ref: "#/components/schemas/SpeechAudioDoneEvent" + discriminator: + propertyName: type CreateThreadAndRunRequest: type: object additionalProperties: false @@ -22661,6 +24858,25 @@ components: type: boolean nullable: true default: false + chunking_strategy: + description: 'Controls how the audio is cut into chunks. When set to `"auto"`, + the server first normalizes loudness and then uses voice activity + detection (VAD) to choose boundaries. `server_vad` object can be + provided to tweak VAD detection parameters manually. If unset, the + audio is transcribed as a single block. ' + anyOf: + - type: string + enum: + - auto + default: + - auto + description: > + Automatically set chunking parameters based on the audio. Must + be set to `"auto"`. + x-stainless-const: true + - $ref: "#/components/schemas/VadConfig" + nullable: true + x-oaiTypeLabel: string required: - file - model @@ -22694,6 +24910,14 @@ components: items: type: number description: The bytes of the token. + usage: + type: object + description: Token usage statistics for the request. + oneOf: + - $ref: "#/components/schemas/TranscriptTextUsageTokens" + title: Token Usage + - $ref: "#/components/schemas/TranscriptTextUsageDuration" + title: Duration Usage required: - text x-oaiMeta: @@ -22701,7 +24925,17 @@ components: group: audio example: > { - "text": "Imagine the wildest idea that you've ever had, and you're curious about how it might scale to something that's a 100, a 1,000 times bigger. This is a place where you can get to do that." + "text": "Imagine the wildest idea that you've ever had, and you're curious about how it might scale to something that's a 100, a 1,000 times bigger. This is a place where you can get to do that.", + "usage": { + "type": "tokens", + "input_tokens": 14, + "input_token_details": { + "text_tokens": 10, + "audio_tokens": 4 + }, + "output_tokens": 101, + "total_tokens": 115 + } } CreateTranscriptionResponseStreamEvent: anyOf: @@ -22733,6 +24967,8 @@ components: description: Segments of the transcribed text and their corresponding details. items: $ref: "#/components/schemas/TranscriptionSegment" + usage: + $ref: "#/components/schemas/TranscriptTextUsageDuration" required: - language - duration @@ -22762,7 +24998,11 @@ components: "no_speech_prob": 0.00985979475080967 }, ... - ] + ], + "usage": { + "type": "duration", + "seconds": 9 + } } CreateTranslationRequest: type: object @@ -23242,6 +25482,7 @@ components: guide](/docs/guides/embeddings). items: type: number + format: float object: type: string description: The object type, which is always "embedding". @@ -23318,7 +25559,7 @@ components: Like: - Improve the quality of my chatbot - See how well my chatbot handles customer support - - Check if o3-mini is better at my usecase than gpt-4o + - Check if o4-mini is better at my usecase than gpt-4o properties: object: type: string @@ -23339,6 +25580,7 @@ components: description: Configuration of data sources used in runs of the evaluation. oneOf: - $ref: "#/components/schemas/EvalCustomDataSourceConfig" + - $ref: "#/components/schemas/EvalLogsDataSourceConfig" - $ref: "#/components/schemas/EvalStoredCompletionsDataSourceConfig" testing_criteria: default: eval @@ -23346,11 +25588,11 @@ components: type: array items: oneOf: - - $ref: "#/components/schemas/EvalLabelModelGrader" - - $ref: "#/components/schemas/EvalStringCheckGrader" - - $ref: "#/components/schemas/EvalTextSimilarityGrader" - - $ref: "#/components/schemas/EvalPythonGrader" - - $ref: "#/components/schemas/EvalScoreModelGrader" + - $ref: "#/components/schemas/EvalGraderLabelModel" + - $ref: "#/components/schemas/EvalGraderStringCheck" + - $ref: "#/components/schemas/EvalGraderTextSimilarity" + - $ref: "#/components/schemas/EvalGraderPython" + - $ref: "#/components/schemas/EvalGraderScoreModel" created_at: type: integer description: The Unix timestamp (in seconds) for when the eval was created. @@ -23483,6 +25725,79 @@ components: "required": ["item"] } } + EvalGraderLabelModel: + type: object + title: LabelModelGrader + allOf: + - $ref: "#/components/schemas/GraderLabelModel" + EvalGraderPython: + type: object + title: PythonGrader + allOf: + - $ref: "#/components/schemas/GraderPython" + - type: object + properties: + pass_threshold: + type: number + description: The threshold for the score. + x-oaiMeta: + name: Eval Python Grader + group: graders + example: | + { + "type": "python", + "name": "Example python grader", + "image_tag": "2025-05-08", + "source": """ + def grade(sample: dict, item: dict) -> float: + \""" + Returns 1.0 if `output_text` equals `label`, otherwise 0.0. + \""" + output = sample.get("output_text") + label = item.get("label") + return 1.0 if output == label else 0.0 + """, + "pass_threshold": 0.8 + } + EvalGraderScoreModel: + type: object + title: ScoreModelGrader + allOf: + - $ref: "#/components/schemas/GraderScoreModel" + - type: object + properties: + pass_threshold: + type: number + description: The threshold for the score. + EvalGraderStringCheck: + type: object + title: StringCheckGrader + allOf: + - $ref: "#/components/schemas/GraderStringCheck" + EvalGraderTextSimilarity: + type: object + title: TextSimilarityGrader + allOf: + - $ref: "#/components/schemas/GraderTextSimilarity" + - type: object + properties: + pass_threshold: + type: number + description: The threshold for the score. + required: + - pass_threshold + x-oaiMeta: + name: Text Similarity Grader + group: graders + example: | + { + "type": "text_similarity", + "name": "Example text similarity grader", + "input": "{{sample.output_text}}", + "reference": "{{item.label}}", + "pass_threshold": 0.8, + "evaluation_metric": "fuzzy_match" + } EvalItem: type: object title: Eval message object @@ -23595,84 +25910,6 @@ components: required: - type - id - EvalLabelModelGrader: - type: object - title: LabelModelGrader - description: > - A LabelModelGrader object which uses a model to assign labels to each - item - - in the evaluation. - properties: - type: - description: The object type, which is always `label_model`. - type: string - enum: - - label_model - x-stainless-const: true - name: - type: string - description: The name of the grader. - model: - type: string - description: The model to use for the evaluation. Must support structured outputs. - input: - type: array - items: - $ref: "#/components/schemas/EvalItem" - labels: - type: array - items: - type: string - description: The labels to assign to each item in the evaluation. - passing_labels: - type: array - items: - type: string - description: The labels that indicate a passing result. Must be a subset of - labels. - required: - - type - - model - - input - - passing_labels - - labels - - name - x-oaiMeta: - name: The eval label model grader object - group: evals - example: > - { - "name": "First label grader", - "type": "label_model", - "model": "gpt-4o-2024-08-06", - "input": [ - { - "type": "message", - "role": "system", - "content": { - "type": "input_text", - "text": "Classify the sentiment of the following statement as one of positive, neutral, or negative" - } - }, - { - "type": "message", - "role": "user", - "content": { - "type": "input_text", - "text": "Statement: {{item.response}}" - } - } - ], - "passing_labels": [ - "positive" - ], - "labels": [ - "positive", - "neutral", - "negative" - ] - } EvalList: type: object title: EvalList @@ -23763,44 +26000,62 @@ components: "last_id": "eval_67abd54d9b0081909a86353f6fb9317a", "has_more": true } - EvalPythonGrader: + EvalLogsDataSourceConfig: type: object - title: PythonGrader - description: | - A PythonGrader object that runs a python script on the input. + title: LogsDataSourceConfig + description: > + A LogsDataSourceConfig which specifies the metadata property of your + logs query. + + This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, + etc. + + The schema returned by this data source config is used to defined what + variables are available in your evals. + + `item` and `sample` are both defined when using this data source config. properties: type: type: string enum: - - python - description: The object type, which is always `python`. + - logs + default: logs + description: The type of data source. Always `logs`. x-stainless-const: true - name: - type: string - description: The name of the grader. - source: - type: string - description: The source code of the python script. - pass_threshold: - type: number - description: The threshold for the score. - image_tag: - type: string - description: The image tag to use for the python script. + metadata: + $ref: "#/components/schemas/Metadata" + schema: + type: object + description: | + The json schema for the run data source items. + Learn how to build JSON schemas [here](https://json-schema.org/). + additionalProperties: true required: - type - - name - - source + - schema x-oaiMeta: - name: The eval python grader object + name: The logs data source object for evals group: evals example: | { - "type": "string_check", - "name": "Example string check grader", - "input": "{{sample.output_text}}", - "reference": "{{item.label}}", - "operation": "eq" + "type": "logs", + "metadata": { + "language": "english" + }, + "schema": { + "type": "object", + "properties": { + "item": { + "type": "object" + }, + "sample": { + "type": "object" + } + }, + "required": [ + "item", + "sample" + } } EvalResponsesSource: type: object @@ -23826,8 +26081,8 @@ components: instructions_search: type: string nullable: true - description: Optional search string for instructions. This is a query parameter - used to select responses. + description: Optional string to search the 'instructions' field. This is a query + parameter used to select responses. created_after: type: integer minimum: 0 @@ -23840,11 +26095,6 @@ components: nullable: true description: Only include items created before this timestamp (inclusive). This is a query parameter used to select responses. - has_tool_calls: - type: boolean - nullable: true - description: Whether the response has tool calls. This is a query parameter used - to select responses. reasoning_effort: $ref: "#/components/schemas/ReasoningEffort" nullable: true @@ -23867,11 +26117,13 @@ components: nullable: true description: List of user identifiers. This is a query parameter used to select responses. - allow_parallel_tool_calls: - type: boolean + tools: + type: array + items: + type: string nullable: true - description: Whether to allow parallel tool calls. This is a query parameter - used to select responses. + description: List of tool names. This is a query parameter used to select + responses. required: - type x-oaiMeta: @@ -23884,7 +26136,8 @@ components: "temperature": 0.7, "top_p": 1.0, "users": ["user1", "user2"], - "allow_parallel_tool_calls": true + "tools": ["tool1", "tool2"], + "instructions_search": "You are a coding assistant" } EvalRun: type: object @@ -24560,73 +26813,11 @@ components: "last_id": "outputitem_67abd55eb6548190bb580745d5644a33", "has_more": false } - EvalScoreModelGrader: - type: object - title: ScoreModelGrader - description: > - A ScoreModelGrader object that uses a model to assign a score to the - input. - properties: - type: - type: string - enum: - - score_model - description: The object type, which is always `score_model`. - x-stainless-const: true - name: - type: string - description: The name of the grader. - model: - type: string - description: The model to use for the evaluation. - sampling_params: - type: object - description: The sampling parameters for the model. - input: - type: array - items: - $ref: "#/components/schemas/EvalItem" - description: The input text. This may include template strings. - pass_threshold: - type: number - description: The threshold for the score. - range: - type: array - items: - type: number - min_items: 2 - max_items: 2 - description: The range of the score. Defaults to `[0, 1]`. - required: - - type - - name - - input - - model - x-oaiMeta: - name: The eval score model grader object - group: evals - example: | - { - "type": "score_model", - "name": "Example score model grader", - "input": "{{sample.output_text}}", - "reference": "{{item.label}}", - "operation": "eq" - } EvalStoredCompletionsDataSourceConfig: type: object title: StoredCompletionsDataSourceConfig - description: > - A StoredCompletionsDataSourceConfig which specifies the metadata - property of your stored completions query. - - This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, - etc. - - The schema returned by this data source config is used to defined what - variables are available in your evals. - - `item` and `sample` are both defined when using this data source config. + description: | + Deprecated in favor of LogsDataSourceConfig. properties: type: type: string @@ -24646,6 +26837,7 @@ components: required: - type - schema + deprecated: true x-oaiMeta: name: The stored completions data source object for evals group: evals @@ -24717,115 +26909,6 @@ components: "limit": 100, "metadata": {} } - EvalStringCheckGrader: - type: object - title: StringCheckGrader - description: > - A StringCheckGrader object that performs a string comparison between - input and reference using a specified operation. - properties: - type: - type: string - enum: - - string_check - description: The object type, which is always `string_check`. - x-stainless-const: true - name: - type: string - description: The name of the grader. - input: - type: string - description: The input text. This may include template strings. - reference: - type: string - description: The reference text. This may include template strings. - operation: - type: string - enum: - - eq - - ne - - like - - ilike - description: The string check operation to perform. One of `eq`, `ne`, `like`, - or `ilike`. - required: - - type - - name - - input - - reference - - operation - x-oaiMeta: - name: The eval string check grader object - group: evals - example: | - { - "type": "string_check", - "name": "Example string check grader", - "input": "{{sample.output_text}}", - "reference": "{{item.label}}", - "operation": "eq" - } - EvalTextSimilarityGrader: - type: object - title: TextSimilarityGrader - description: > - A TextSimilarityGrader object which grades text based on similarity - metrics. - properties: - type: - type: string - enum: - - text_similarity - default: text_similarity - description: The type of grader. - x-stainless-const: true - name: - type: string - description: The name of the grader. - input: - type: string - description: The text being graded. - reference: - type: string - description: The text being graded against. - pass_threshold: - type: number - description: A float score where a value greater than or equal indicates a - passing grade. - evaluation_metric: - type: string - enum: - - fuzzy_match - - bleu - - gleu - - meteor - - rouge_1 - - rouge_2 - - rouge_3 - - rouge_4 - - rouge_5 - - rouge_l - description: The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, - `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or - `rouge_l`. - required: - - type - - input - - reference - - pass_threshold - - evaluation_metric - x-oaiMeta: - name: The eval text similarity grader object - group: evals - example: | - { - "type": "text_similarity", - "name": "example text similarity grader", - "input": "The graded text", - "reference": "The reference text", - "pass_threshold": 0.8, - "evaluation_metric": "fuzzy_match" - } FilePath: type: object title: File path @@ -24962,8 +27045,14 @@ components: - role FineTuneChatRequestInput: type: object - description: The per-line training example of a fine-tuning input file for chat + description: > + The per-line training example of a fine-tuning input file for chat models using the supervised method. + + Input messages may contain text or image content only. Audio and file + input messages + + are not currently supported for fine-tuning. properties: messages: type: array @@ -25032,105 +27121,100 @@ components: } ] } - FineTuneCompletionRequestInput: + FineTuneDPOHyperparameters: type: object - description: The per-line training example of a fine-tuning input file for - completions models + description: The hyperparameters used for the DPO fine-tuning job. properties: - prompt: - type: string - description: The input prompt for this training example. - completion: - type: string - description: The desired completion for this training example. - x-oaiMeta: - name: Training format for completions models - example: | - { - "prompt": "What is the answer to 2+2", - "completion": "4" - } + beta: + description: > + The beta value for the DPO method. A higher beta value will increase + the weight of the penalty between the policy and reference model. + oneOf: + - type: string + enum: + - auto + x-stainless-const: true + - type: number + minimum: 0 + maximum: 2 + exclusiveMinimum: true + default: auto + batch_size: + description: > + Number of examples in each batch. A larger batch size means that + model parameters are updated less frequently, but with lower + variance. + oneOf: + - type: string + enum: + - auto + x-stainless-const: true + - type: integer + minimum: 1 + maximum: 256 + default: auto + learning_rate_multiplier: + description: > + Scaling factor for the learning rate. A smaller learning rate may be + useful to avoid overfitting. + oneOf: + - type: string + enum: + - auto + x-stainless-const: true + - type: number + minimum: 0 + exclusiveMinimum: true + default: auto + n_epochs: + description: > + The number of epochs to train the model for. An epoch refers to one + full cycle through the training dataset. + oneOf: + - type: string + enum: + - auto + x-stainless-const: true + - type: integer + minimum: 1 + maximum: 50 + default: auto FineTuneDPOMethod: type: object description: Configuration for the DPO fine-tuning method. properties: hyperparameters: - type: object - description: The hyperparameters used for the fine-tuning job. - properties: - beta: - description: > - The beta value for the DPO method. A higher beta value will - increase the weight of the penalty between the policy and - reference model. - oneOf: - - type: string - enum: - - auto - x-stainless-const: true - - type: number - minimum: 0 - maximum: 2 - exclusiveMinimum: true - default: auto - batch_size: - description: > - Number of examples in each batch. A larger batch size means that - model parameters are updated less frequently, but with lower - variance. - oneOf: - - type: string - enum: - - auto - x-stainless-const: true - - type: integer - minimum: 1 - maximum: 256 - default: auto - learning_rate_multiplier: - description: > - Scaling factor for the learning rate. A smaller learning rate - may be useful to avoid overfitting. - oneOf: - - type: string - enum: - - auto - x-stainless-const: true - - type: number - minimum: 0 - exclusiveMinimum: true - default: auto - n_epochs: - description: > - The number of epochs to train the model for. An epoch refers to - one full cycle through the training dataset. - oneOf: - - type: string - enum: - - auto - x-stainless-const: true - - type: integer - minimum: 1 - maximum: 50 - default: auto + $ref: "#/components/schemas/FineTuneDPOHyperparameters" FineTuneMethod: type: object description: The method used for fine-tuning. properties: type: type: string - description: The type of method. Is either `supervised` or `dpo`. + description: The type of method. Is either `supervised`, `dpo`, or + `reinforcement`. enum: - supervised - dpo + - reinforcement supervised: $ref: "#/components/schemas/FineTuneSupervisedMethod" dpo: $ref: "#/components/schemas/FineTuneDPOMethod" + reinforcement: + $ref: "#/components/schemas/FineTuneReinforcementMethod" + required: + - type FineTunePreferenceRequestInput: type: object - description: The per-line training example of a fine-tuning input file for chat + description: > + The per-line training example of a fine-tuning input file for chat models using the dpo method. + + Input messages may contain text or image content only. Audio and file + input messages + + are not currently supported for fine-tuning. properties: input: type: object @@ -25152,14 +27236,14 @@ components: $ref: "#/components/schemas/ChatCompletionTool" parallel_tool_calls: $ref: "#/components/schemas/ParallelToolCalls" - preferred_completion: + preferred_output: type: array description: The preferred completion message for the output. maxItems: 1 items: oneOf: - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" - non_preferred_completion: + non_preferred_output: type: array description: The non-preferred completion message for the output. maxItems: 1 @@ -25175,67 +27259,224 @@ components: { "role": "user", "content": "What is the weather in San Francisco?" } ] }, - "preferred_completion": [ + "preferred_output": [ { "role": "assistant", "content": "The weather in San Francisco is 70 degrees Fahrenheit." } ], - "non_preferred_completion": [ + "non_preferred_output": [ { "role": "assistant", "content": "The weather in San Francisco is 21 degrees Celsius." } ] } + FineTuneReinforcementHyperparameters: + type: object + description: The hyperparameters used for the reinforcement fine-tuning job. + properties: + batch_size: + description: > + Number of examples in each batch. A larger batch size means that + model parameters are updated less frequently, but with lower + variance. + oneOf: + - type: string + enum: + - auto + x-stainless-const: true + - type: integer + minimum: 1 + maximum: 256 + default: auto + learning_rate_multiplier: + description: > + Scaling factor for the learning rate. A smaller learning rate may be + useful to avoid overfitting. + oneOf: + - type: string + enum: + - auto + x-stainless-const: true + - type: number + minimum: 0 + exclusiveMinimum: true + default: auto + n_epochs: + description: > + The number of epochs to train the model for. An epoch refers to one + full cycle through the training dataset. + oneOf: + - type: string + enum: + - auto + x-stainless-const: true + - type: integer + minimum: 1 + maximum: 50 + default: auto + reasoning_effort: + description: | + Level of reasoning effort. + type: string + enum: + - default + - low + - medium + - high + default: default + compute_multiplier: + description: > + Multiplier on amount of compute used for exploring search space + during training. + oneOf: + - type: string + enum: + - auto + x-stainless-const: true + - type: number + minimum: 0.00001 + maximum: 10 + exclusiveMinimum: true + default: auto + eval_interval: + description: | + The number of training steps between evaluation runs. + oneOf: + - type: string + enum: + - auto + x-stainless-const: true + - type: integer + minimum: 1 + default: auto + eval_samples: + description: | + Number of evaluation samples to generate per training step. + oneOf: + - type: string + enum: + - auto + x-stainless-const: true + - type: integer + minimum: 1 + default: auto + FineTuneReinforcementMethod: + type: object + description: Configuration for the reinforcement fine-tuning method. + properties: + grader: + type: object + description: The grader used for the fine-tuning job. + oneOf: + - $ref: "#/components/schemas/GraderStringCheck" + - $ref: "#/components/schemas/GraderTextSimilarity" + - $ref: "#/components/schemas/GraderPython" + - $ref: "#/components/schemas/GraderScoreModel" + - $ref: "#/components/schemas/GraderMulti" + hyperparameters: + $ref: "#/components/schemas/FineTuneReinforcementHyperparameters" + required: + - grader + FineTuneReinforcementRequestInput: + type: object + unevaluatedProperties: true + description: > + Per-line training example for reinforcement fine-tuning. Note that + `messages` and `tools` are the only reserved keywords. + + Any other arbitrary key-value data can be included on training + datapoints and will be available to reference during grading under the + `{{ item.XXX }}` template variable. + + Input messages may contain text or image content only. Audio and file + input messages + + are not currently supported for fine-tuning. + required: + - messages + properties: + messages: + type: array + minItems: 1 + items: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestDeveloperMessage" + - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" + - $ref: "#/components/schemas/FineTuneChatCompletionRequestAssistantMessage" + - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" + tools: + type: array + description: A list of tools the model may generate JSON inputs for. + items: + $ref: "#/components/schemas/ChatCompletionTool" + x-oaiMeta: + name: Training format for reasoning models using the reinforcement method + example: > + { + "messages": [ + { + "role": "user", + "content": "Your task is to take a chemical in SMILES format and predict the number of hydrobond bond donors and acceptors according to Lipinkski's rule. CCN(CC)CCC(=O)c1sc(N)nc1C" + }, + ], + # Any other JSON data can be inserted into an example and referenced during RFT grading + "reference_answer": { + "donor_bond_counts": 5, + "acceptor_bond_counts": 7 + } + } + FineTuneSupervisedHyperparameters: + type: object + description: The hyperparameters used for the fine-tuning job. + properties: + batch_size: + description: > + Number of examples in each batch. A larger batch size means that + model parameters are updated less frequently, but with lower + variance. + oneOf: + - type: string + enum: + - auto + x-stainless-const: true + - type: integer + minimum: 1 + maximum: 256 + default: auto + learning_rate_multiplier: + description: > + Scaling factor for the learning rate. A smaller learning rate may be + useful to avoid overfitting. + oneOf: + - type: string + enum: + - auto + x-stainless-const: true + - type: number + minimum: 0 + exclusiveMinimum: true + default: auto + n_epochs: + description: > + The number of epochs to train the model for. An epoch refers to one + full cycle through the training dataset. + oneOf: + - type: string + enum: + - auto + x-stainless-const: true + - type: integer + minimum: 1 + maximum: 50 + default: auto FineTuneSupervisedMethod: type: object description: Configuration for the supervised fine-tuning method. properties: hyperparameters: - type: object - description: The hyperparameters used for the fine-tuning job. - properties: - batch_size: - description: > - Number of examples in each batch. A larger batch size means that - model parameters are updated less frequently, but with lower - variance. - oneOf: - - type: string - enum: - - auto - x-stainless-const: true - - type: integer - minimum: 1 - maximum: 256 - default: auto - learning_rate_multiplier: - description: > - Scaling factor for the learning rate. A smaller learning rate - may be useful to avoid overfitting. - oneOf: - - type: string - enum: - - auto - x-stainless-const: true - - type: number - minimum: 0 - exclusiveMinimum: true - default: auto - n_epochs: - description: > - The number of epochs to train the model for. An epoch refers to - one full cycle through the training dataset. - oneOf: - - type: string - enum: - - auto - x-stainless-const: true - - type: integer - minimum: 1 - maximum: 50 - default: auto + $ref: "#/components/schemas/FineTuneSupervisedHyperparameters" FineTuningCheckpointPermission: type: object title: FineTuningCheckpointPermission @@ -25384,6 +27625,7 @@ components: only be returned when running `supervised` jobs. properties: batch_size: + nullable: true description: > Number of examples in each batch. A larger batch size means that model parameters @@ -25829,749 +28071,998 @@ components: The unique ID of the function tool call. required: - id - Image: + GraderLabelModel: type: object - description: Represents the content or the URL of an image generated by the - OpenAI API. + title: LabelModelGrader + description: > + A LabelModelGrader object which uses a model to assign labels to each + item + + in the evaluation. properties: - b64_json: + type: + description: The object type, which is always `label_model`. type: string - description: The base64-encoded JSON of the generated image. Default value for - `gpt-image-1`, and only present if `response_format` is set to - `b64_json` for `dall-e-2` and `dall-e-3`. - url: + enum: + - label_model + x-stainless-const: true + name: type: string - description: When using `dall-e-2` or `dall-e-3`, the URL of the generated image - if `response_format` is set to `url` (default value). Unsupported - for `gpt-image-1`. - revised_prompt: + description: The name of the grader. + model: type: string - description: For `dall-e-3` only, the revised prompt that was used to generate - the image. - ImagesResponse: - type: object - title: Image generation response - description: The response from the image generation endpoint. - properties: - created: - type: integer - description: The Unix timestamp (in seconds) of when the image was created. - data: + description: The model to use for the evaluation. Must support structured outputs. + input: type: array - description: The list of generated images. items: - $ref: "#/components/schemas/Image" - usage: - type: object - description: > - For `gpt-image-1` only, the token usage information for the image - generation. - required: - - total_tokens - - input_tokens - - output_tokens - - input_tokens_details - properties: - total_tokens: - type: integer - description: The total number of tokens (images and text) used for the image - generation. - input_tokens: - type: integer - description: The number of tokens (images and text) in the input prompt. - output_tokens: - type: integer - description: The number of image tokens in the output image. - input_tokens_details: - type: object - description: The input tokens detailed information for the image generation. - required: - - text_tokens - - image_tokens - properties: - text_tokens: - type: integer - description: The number of text tokens in the input prompt. - image_tokens: - type: integer - description: The number of image tokens in the input prompt. + $ref: "#/components/schemas/EvalItem" + labels: + type: array + items: + type: string + description: The labels to assign to each item in the evaluation. + passing_labels: + type: array + items: + type: string + description: The labels that indicate a passing result. Must be a subset of + labels. required: - - created + - type + - model + - input + - passing_labels + - labels + - name x-oaiMeta: - name: The image generation response - group: images - example: | + name: Label Model Grader + group: graders + example: > { - "created": 1713833628, - "data": [ + "name": "First label grader", + "type": "label_model", + "model": "gpt-4o-2024-08-06", + "input": [ { - "b64_json": "..." + "type": "message", + "role": "system", + "content": { + "type": "input_text", + "text": "Classify the sentiment of the following statement as one of positive, neutral, or negative" + } + }, + { + "type": "message", + "role": "user", + "content": { + "type": "input_text", + "text": "Statement: {{item.response}}" + } } ], - "usage": { - "total_tokens": 100, - "input_tokens": 50, - "output_tokens": 50, - "input_tokens_details": { - "text_tokens": 10, - "image_tokens": 40 - } - } + "passing_labels": [ + "positive" + ], + "labels": [ + "positive", + "neutral", + "negative" + ] } - Includable: - type: string - description: > - Specify additional output data to include in the model response. - Currently - - supported values are: - - - `file_search_call.results`: Include the search results of - the file search tool call. - - `message.input_image.image_url`: Include image urls from the input - message. - - - `computer_call_output.output.image_url`: Include image urls from the - computer call output. - enum: - - file_search_call.results - - message.input_image.image_url - - computer_call_output.output.image_url - InputAudio: + GraderMulti: type: object - title: Audio input - description: | - An audio input to the model. + title: MultiGrader + description: A MultiGrader object combines the output of multiple graders to + produce a single score. properties: type: type: string - description: | - The type of the input item. Always `input_audio`. enum: - - input_audio + - multi + default: multi + description: The object type, which is always `multi`. x-stainless-const: true - data: + name: type: string - description: | - Base64-encoded audio data. - format: + description: The name of the grader. + graders: + oneOf: + - $ref: "#/components/schemas/GraderStringCheck" + - $ref: "#/components/schemas/GraderTextSimilarity" + - $ref: "#/components/schemas/GraderPython" + - $ref: "#/components/schemas/GraderScoreModel" + - $ref: "#/components/schemas/GraderLabelModel" + calculate_output: type: string - description: > - The format of the audio data. Currently supported formats are `mp3` - and - - `wav`. - enum: - - mp3 - - wav + description: A formula to calculate the output based on grader results. required: + - name - type - - data - - format - InputContent: - oneOf: - - $ref: "#/components/schemas/InputTextContent" - - $ref: "#/components/schemas/InputImageContent" - - $ref: "#/components/schemas/InputFileContent" - InputItem: - oneOf: - - $ref: "#/components/schemas/EasyInputMessage" - - type: object - title: Item - description: | - An item representing part of the context for the response to be - generated by the model. Can contain text, images, and audio inputs, - as well as previous assistant responses and tool call outputs. - $ref: "#/components/schemas/Item" - - $ref: "#/components/schemas/ItemReferenceParam" - discriminator: - propertyName: type - InputMessage: + - graders + - calculate_output + x-oaiMeta: + name: Multi Grader + group: graders + example: > + { + "type": "multi", + "name": "example multi grader", + "graders": [ + { + "type": "text_similarity", + "name": "example text similarity grader", + "input": "The graded text", + "reference": "The reference text", + "evaluation_metric": "fuzzy_match" + }, + { + "type": "string_check", + "name": "Example string check grader", + "input": "{{sample.output_text}}", + "reference": "{{item.label}}", + "operation": "eq" + } + ], + "calculate_output": "0.5 * text_similarity_score + 0.5 * string_check_score)" + } + GraderPython: type: object - title: Input message - description: > - A message input to the model with a role indicating instruction - following - - hierarchy. Instructions given with the `developer` or `system` role take - - precedence over instructions given with the `user` role. + title: PythonGrader + description: | + A PythonGrader object that runs a python script on the input. properties: type: type: string - description: | - The type of the message input. Always set to `message`. enum: - - message + - python + description: The object type, which is always `python`. x-stainless-const: true - role: + name: type: string - description: > - The role of the message input. One of `user`, `system`, or - `developer`. - enum: - - user - - system - - developer - status: + description: The name of the grader. + source: type: string - description: | - The status of item. One of `in_progress`, `completed`, or - `incomplete`. Populated when items are returned via API. - enum: - - in_progress - - completed - - incomplete - content: - $ref: "#/components/schemas/InputMessageContentList" + description: The source code of the python script. + image_tag: + type: string + description: The image tag to use for the python script. required: - - role - - content - InputMessageContentList: - type: array - title: Input item content list - description: > - A list of one or many input items to the model, containing different - content - - types. - items: - $ref: "#/components/schemas/InputContent" - InputMessageResource: - allOf: - - $ref: "#/components/schemas/InputMessage" - - type: object - properties: - id: - type: string - description: | - The unique ID of the message input. - required: - - id - Invite: + - type + - name + - source + x-oaiMeta: + name: Python Grader + group: graders + example: | + { + "type": "python", + "name": "Example python grader", + "image_tag": "2025-05-08", + "source": """ + def grade(sample: dict, item: dict) -> float: + \""" + Returns 1.0 if `output_text` equals `label`, otherwise 0.0. + \""" + output = sample.get("output_text") + label = item.get("label") + return 1.0 if output == label else 0.0 + """, + } + GraderScoreModel: type: object - description: Represents an individual `invite` to the organization. + title: ScoreModelGrader + description: > + A ScoreModelGrader object that uses a model to assign a score to the + input. properties: - object: + type: type: string enum: - - organization.invite - description: The object type, which is always `organization.invite` + - score_model + description: The object type, which is always `score_model`. x-stainless-const: true - id: - type: string - description: The identifier, which can be referenced in API endpoints - email: - type: string - description: The email address of the individual to whom the invite was sent - role: + name: type: string - enum: - - owner - - reader - description: "`owner` or `reader`" - status: + description: The name of the grader. + model: type: string - enum: - - accepted - - expired - - pending - description: "`accepted`,`expired`, or `pending`" - invited_at: - type: integer - description: The Unix timestamp (in seconds) of when the invite was sent. - expires_at: - type: integer - description: The Unix timestamp (in seconds) of when the invite expires. - accepted_at: - type: integer - description: The Unix timestamp (in seconds) of when the invite was accepted. - projects: + description: The model to use for the evaluation. + sampling_params: + type: object + description: The sampling parameters for the model. + input: type: array - description: The projects that were granted membership upon acceptance of the - invite. items: - type: object - properties: - id: - type: string - description: Project's public ID - role: - type: string - enum: - - member - - owner - description: Project membership role - required: - - object - - id - - email - - role - - status - - invited_at - - expires_at + $ref: "#/components/schemas/EvalItem" + description: The input text. This may include template strings. + range: + type: array + items: + type: number + min_items: 2 + max_items: 2 + description: The range of the score. Defaults to `[0, 1]`. + required: + - type + - name + - input + - model x-oaiMeta: - name: The invite object - example: | + name: Score Model Grader + group: graders + example: > { - "object": "organization.invite", - "id": "invite-abc", - "email": "user@example.com", - "role": "owner", - "status": "accepted", - "invited_at": 1711471533, - "expires_at": 1711471533, - "accepted_at": 1711471533, - "projects": [ - { - "id": "project-xyz", - "role": "member" - } - ] + "type": "score_model", + "name": "Example score model grader", + "input": [ + { + "role": "user", + "content": ( + "Score how close the reference answer is to the model answer. Score 1.0 if they are the same and 0.0 if they are different." + " Return just a floating point score\n\n" + " Reference answer: {{item.label}}\n\n" + " Model answer: {{sample.output_text}}" + ), + } + ], + "model": "gpt-4o-2024-08-06", + "sampling_params": { + "temperature": 1, + "top_p": 1, + "seed": 42, + }, } - InviteDeleteResponse: - type: object - properties: - object: - type: string - enum: - - organization.invite.deleted - description: The object type, which is always `organization.invite.deleted` - x-stainless-const: true - id: - type: string - deleted: - type: boolean - required: - - object - - id - - deleted - InviteListResponse: + GraderStringCheck: type: object + title: StringCheckGrader + description: > + A StringCheckGrader object that performs a string comparison between + input and reference using a specified operation. properties: - object: + type: type: string enum: - - list - description: The object type, which is always `list` + - string_check + description: The object type, which is always `string_check`. x-stainless-const: true - data: - type: array - items: - $ref: "#/components/schemas/Invite" - first_id: + name: type: string - description: The first `invite_id` in the retrieved `list` - last_id: + description: The name of the grader. + input: type: string - description: The last `invite_id` in the retrieved `list` - has_more: - type: boolean - description: The `has_more` property is used for pagination to indicate there - are additional results. - required: - - object - - data - InviteRequest: - type: object - properties: - email: + description: The input text. This may include template strings. + reference: type: string - description: Send an email to this address - role: + description: The reference text. This may include template strings. + operation: type: string enum: - - reader - - owner - description: "`owner` or `reader`" - projects: - type: array - description: An array of projects to which membership is granted at the same - time the org invite is accepted. If omitted, the user will be - invited to the default project for compatibility with legacy - behavior. - items: - type: object - properties: - id: - type: string - description: Project's public ID - role: - type: string - enum: - - member - - owner - description: Project membership role - required: - - id - - role + - eq + - ne + - like + - ilike + description: The string check operation to perform. One of `eq`, `ne`, `like`, + or `ilike`. required: - - email - - role - Item: - type: object - description: | - Content item used to generate a response. - oneOf: - - $ref: "#/components/schemas/InputMessage" - - $ref: "#/components/schemas/OutputMessage" - - $ref: "#/components/schemas/FileSearchToolCall" - - $ref: "#/components/schemas/ComputerToolCall" - - $ref: "#/components/schemas/ComputerCallOutputItemParam" - - $ref: "#/components/schemas/WebSearchToolCall" - - $ref: "#/components/schemas/FunctionToolCall" - - $ref: "#/components/schemas/FunctionCallOutputItemParam" - - $ref: "#/components/schemas/ReasoningItem" - discriminator: - propertyName: type - ItemResource: - description: | - Content item used to generate a response. - oneOf: - - $ref: "#/components/schemas/InputMessageResource" - - $ref: "#/components/schemas/OutputMessage" - - $ref: "#/components/schemas/FileSearchToolCall" - - $ref: "#/components/schemas/ComputerToolCall" - - $ref: "#/components/schemas/ComputerToolCallOutputResource" - - $ref: "#/components/schemas/WebSearchToolCall" - - $ref: "#/components/schemas/FunctionToolCallResource" - - $ref: "#/components/schemas/FunctionToolCallOutputResource" - discriminator: - propertyName: type - KeyPress: + - type + - name + - input + - reference + - operation + x-oaiMeta: + name: String Check Grader + group: graders + example: | + { + "type": "string_check", + "name": "Example string check grader", + "input": "{{sample.output_text}}", + "reference": "{{item.label}}", + "operation": "eq" + } + GraderTextSimilarity: type: object - title: KeyPress - description: | - A collection of keypresses the model would like to perform. + title: TextSimilarityGrader + description: > + A TextSimilarityGrader object which grades text based on similarity + metrics. properties: type: type: string enum: - - keypress - default: keypress - description: | - Specifies the event type. For a keypress action, this property is - always set to `keypress`. + - text_similarity + default: text_similarity + description: The type of grader. x-stainless-const: true - keys: - type: array - items: - type: string - description: | - One of the keys the model is requesting to be pressed. - description: > - The combination of keys the model is requesting to be pressed. This - is an - - array of strings, each representing a key. - required: - - type - - keys - ListAssistantsResponse: - type: object - properties: - object: + name: type: string - example: list - data: - type: array - items: - $ref: "#/components/schemas/AssistantObject" - first_id: + description: The name of the grader. + input: type: string - example: asst_abc123 - last_id: + description: The text being graded. + reference: type: string - example: asst_abc456 - has_more: - type: boolean - example: false + description: The text being graded against. + evaluation_metric: + type: string + enum: + - fuzzy_match + - bleu + - gleu + - meteor + - rouge_1 + - rouge_2 + - rouge_3 + - rouge_4 + - rouge_5 + - rouge_l + description: The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, + `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or + `rouge_l`. required: - - object - - data - - first_id - - last_id - - has_more + - type + - name + - input + - reference + - evaluation_metric x-oaiMeta: - name: List assistants response object - group: chat - example: > + name: Text Similarity Grader + group: graders + example: | { - "object": "list", - "data": [ - { - "id": "asst_abc123", - "object": "assistant", - "created_at": 1698982736, - "name": "Coding Tutor", - "description": null, - "model": "gpt-4o", - "instructions": "You are a helpful assistant designed to make me better at coding!", - "tools": [], - "tool_resources": {}, - "metadata": {}, - "top_p": 1.0, - "temperature": 1.0, - "response_format": "auto" - }, - { - "id": "asst_abc456", - "object": "assistant", - "created_at": 1698982718, - "name": "My Assistant", - "description": null, - "model": "gpt-4o", - "instructions": "You are a helpful assistant designed to make me better at coding!", - "tools": [], - "tool_resources": {}, - "metadata": {}, - "top_p": 1.0, - "temperature": 1.0, - "response_format": "auto" - }, - { - "id": "asst_abc789", - "object": "assistant", - "created_at": 1698982643, - "name": null, - "description": null, - "model": "gpt-4o", - "instructions": null, - "tools": [], - "tool_resources": {}, - "metadata": {}, - "top_p": 1.0, - "temperature": 1.0, - "response_format": "auto" - } - ], - "first_id": "asst_abc123", - "last_id": "asst_abc789", - "has_more": false + "type": "text_similarity", + "name": "Example text similarity grader", + "input": "{{sample.output_text}}", + "reference": "{{item.label}}", + "evaluation_metric": "fuzzy_match" } - ListAuditLogsResponse: + Image: type: object + description: Represents the content or the URL of an image generated by the + OpenAI API. properties: - object: + b64_json: type: string - enum: - - list - x-stainless-const: true - data: - type: array - items: - $ref: "#/components/schemas/AuditLog" - first_id: + description: The base64-encoded JSON of the generated image. Default value for + `gpt-image-1`, and only present if `response_format` is set to + `b64_json` for `dall-e-2` and `dall-e-3`. + url: type: string - example: audit_log-defb456h8dks - last_id: + description: When using `dall-e-2` or `dall-e-3`, the URL of the generated image + if `response_format` is set to `url` (default value). Unsupported + for `gpt-image-1`. + revised_prompt: type: string - example: audit_log-hnbkd8s93s - has_more: - type: boolean - required: - - object - - data - - first_id - - last_id - - has_more - ListBatchesResponse: + description: For `dall-e-3` only, the revised prompt that was used to generate + the image. + ImageGenTool: type: object + title: Image generation tool + description: | + A tool that generates images using a model like `gpt-image-1`. properties: - data: - type: array - items: - $ref: "#/components/schemas/Batch" - first_id: - type: string - example: batch_abc123 - last_id: - type: string - example: batch_abc456 - has_more: - type: boolean - object: + type: type: string enum: - - list + - image_generation + description: | + The type of the image generation tool. Always `image_generation`. x-stainless-const: true - required: - - object - - data - - has_more - ListCertificatesResponse: - type: object - properties: - data: - type: array - items: - $ref: "#/components/schemas/Certificate" - first_id: + model: type: string - example: cert_abc - last_id: + enum: + - gpt-image-1 + description: | + The image generation model to use. Default: `gpt-image-1`. + default: gpt-image-1 + quality: type: string - example: cert_abc - has_more: - type: boolean - object: + enum: + - low + - medium + - high + - auto + description: | + The quality of the generated image. One of `low`, `medium`, `high`, + or `auto`. Default: `auto`. + default: auto + size: type: string enum: - - list - x-stainless-const: true - required: - - object - - data - - has_more - ListFilesResponse: - type: object - properties: - object: + - 1024x1024 + - 1024x1536 + - 1536x1024 + - auto + description: | + The size of the generated image. One of `1024x1024`, `1024x1536`, + `1536x1024`, or `auto`. Default: `auto`. + default: auto + output_format: type: string - example: list - data: - type: array - items: - $ref: "#/components/schemas/OpenAIFile" - first_id: + enum: + - png + - webp + - jpeg + description: | + The output format of the generated image. One of `png`, `webp`, or + `jpeg`. Default: `png`. + default: png + output_compression: + type: integer + minimum: 0 + maximum: 100 + description: | + Compression level for the output image. Default: 100. + default: 100 + moderation: type: string - example: file-abc123 - last_id: + enum: + - auto + - low + description: | + Moderation level for the generated image. Default: `auto`. + default: auto + background: type: string - example: file-abc456 - has_more: - type: boolean - example: false + enum: + - transparent + - opaque + - auto + description: | + Background type for the generated image. One of `transparent`, + `opaque`, or `auto`. Default: `auto`. + default: auto + input_image_mask: + type: object + description: | + Optional mask for inpainting. Contains `image_url` + (string, optional) and `file_id` (string, optional). + properties: + image_url: + type: string + description: | + Base64-encoded mask image. + file_id: + type: string + description: | + File ID for the mask image. + required: [] + additionalProperties: false + partial_images: + type: integer + minimum: 0 + maximum: 3 + description: > + Number of partial images to generate in streaming mode, from 0 + (default value) to 3. + default: 0 required: - - object - - data - - first_id - - last_id - - has_more - ListFineTuningCheckpointPermissionResponse: + - type + ImageGenToolCall: type: object + title: Image generation call + description: | + An image generation request made by the model. properties: - data: - type: array - items: - $ref: "#/components/schemas/FineTuningCheckpointPermission" - object: + type: type: string enum: - - list + - image_generation_call + description: > + The type of the image generation call. Always + `image_generation_call`. x-stainless-const: true - first_id: + id: type: string - nullable: true - last_id: + description: | + The unique ID of the image generation call. + status: + type: string + enum: + - in_progress + - completed + - generating + - failed + description: | + The status of the image generation call. + result: type: string + description: | + The generated image encoded in base64. nullable: true - has_more: - type: boolean required: - - object - - data - - has_more - ListFineTuningJobCheckpointsResponse: + - type + - id + - status + - result + ImagesResponse: type: object + title: Image generation response + description: The response from the image generation endpoint. properties: + created: + type: integer + description: The Unix timestamp (in seconds) of when the image was created. data: type: array + description: The list of generated images. items: - $ref: "#/components/schemas/FineTuningJobCheckpoint" - object: + $ref: "#/components/schemas/Image" + background: type: string + description: The background parameter used for the image generation. Either + `transparent` or `opaque`. enum: - - list + - transparent + - opaque + output_format: + type: string + description: The output format of the image generation. Either `png`, `webp`, or + `jpeg`. + enum: + - png + - webp + - jpeg + size: + type: string + description: The size of the image generated. Either `1024x1024`, `1024x1536`, + or `1536x1024`. + enum: + - 1024x1024 + - 1024x1536 + - 1536x1024 + quality: + type: string + description: The quality of the image generated. Either `low`, `medium`, or + `high`. + enum: + - low + - medium + - high + usage: + type: object + description: > + For `gpt-image-1` only, the token usage information for the image + generation. + required: + - total_tokens + - input_tokens + - output_tokens + - input_tokens_details + properties: + total_tokens: + type: integer + description: The total number of tokens (images and text) used for the image + generation. + input_tokens: + type: integer + description: The number of tokens (images and text) in the input prompt. + output_tokens: + type: integer + description: The number of image tokens in the output image. + input_tokens_details: + type: object + description: The input tokens detailed information for the image generation. + required: + - text_tokens + - image_tokens + properties: + text_tokens: + type: integer + description: The number of text tokens in the input prompt. + image_tokens: + type: integer + description: The number of image tokens in the input prompt. + required: + - created + x-oaiMeta: + name: The image generation response + group: images + example: | + { + "created": 1713833628, + "data": [ + { + "b64_json": "..." + } + ], + "background": "transparent", + "output_format": "png", + "size": "1024x1024", + "quality": "high", + "usage": { + "total_tokens": 100, + "input_tokens": 50, + "output_tokens": 50, + "input_tokens_details": { + "text_tokens": 10, + "image_tokens": 40 + } + } + } + Includable: + type: string + description: > + Specify additional output data to include in the model response. + Currently + + supported values are: + + - `code_interpreter_call.outputs`: Includes the outputs of python code + execution + in code interpreter tool call items. + - `computer_call_output.output.image_url`: Include image urls from the + computer call output. + + - `file_search_call.results`: Include the search results of + the file search tool call. + - `message.input_image.image_url`: Include image urls from the input + message. + + - `message.output_text.logprobs`: Include logprobs with assistant + messages. + + - `reasoning.encrypted_content`: Includes an encrypted version of + reasoning + tokens in reasoning item outputs. This enables reasoning items to be used in + multi-turn conversations when using the Responses API statelessly (like + when the `store` parameter is set to `false`, or when an organization is + enrolled in the zero data retention program). + enum: + - code_interpreter_call.outputs + - computer_call_output.output.image_url + - file_search_call.results + - message.input_image.image_url + - message.output_text.logprobs + - reasoning.encrypted_content + InputAudio: + type: object + title: Audio input + description: | + An audio input to the model. + properties: + type: + type: string + description: | + The type of the input item. Always `input_audio`. + enum: + - input_audio x-stainless-const: true - first_id: + data: type: string - nullable: true - last_id: + description: | + Base64-encoded audio data. + format: type: string - nullable: true - has_more: - type: boolean + description: > + The format of the audio data. Currently supported formats are `mp3` + and + + `wav`. + enum: + - mp3 + - wav required: - - object + - type - data - - has_more - ListFineTuningJobEventsResponse: + - format + InputContent: + oneOf: + - $ref: "#/components/schemas/InputTextContent" + - $ref: "#/components/schemas/InputImageContent" + - $ref: "#/components/schemas/InputFileContent" + InputItem: + oneOf: + - $ref: "#/components/schemas/EasyInputMessage" + - type: object + title: Item + description: | + An item representing part of the context for the response to be + generated by the model. Can contain text, images, and audio inputs, + as well as previous assistant responses and tool call outputs. + $ref: "#/components/schemas/Item" + - $ref: "#/components/schemas/ItemReferenceParam" + discriminator: + propertyName: type + InputMessage: type: object + title: Input message + description: > + A message input to the model with a role indicating instruction + following + + hierarchy. Instructions given with the `developer` or `system` role take + + precedence over instructions given with the `user` role. properties: - data: + type: + type: string + description: | + The type of the message input. Always set to `message`. + enum: + - message + x-stainless-const: true + role: + type: string + description: > + The role of the message input. One of `user`, `system`, or + `developer`. + enum: + - user + - system + - developer + status: + type: string + description: | + The status of item. One of `in_progress`, `completed`, or + `incomplete`. Populated when items are returned via API. + enum: + - in_progress + - completed + - incomplete + content: + $ref: "#/components/schemas/InputMessageContentList" + required: + - role + - content + InputMessageContentList: + type: array + title: Input item content list + description: > + A list of one or many input items to the model, containing different + content + + types. + items: + $ref: "#/components/schemas/InputContent" + InputMessageResource: + allOf: + - $ref: "#/components/schemas/InputMessage" + - type: object + properties: + id: + type: string + description: | + The unique ID of the message input. + required: + - id + Invite: + type: object + description: Represents an individual `invite` to the organization. + properties: + object: + type: string + enum: + - organization.invite + description: The object type, which is always `organization.invite` + x-stainless-const: true + id: + type: string + description: The identifier, which can be referenced in API endpoints + email: + type: string + description: The email address of the individual to whom the invite was sent + role: + type: string + enum: + - owner + - reader + description: "`owner` or `reader`" + status: + type: string + enum: + - accepted + - expired + - pending + description: "`accepted`,`expired`, or `pending`" + invited_at: + type: integer + description: The Unix timestamp (in seconds) of when the invite was sent. + expires_at: + type: integer + description: The Unix timestamp (in seconds) of when the invite expires. + accepted_at: + type: integer + description: The Unix timestamp (in seconds) of when the invite was accepted. + projects: type: array + description: The projects that were granted membership upon acceptance of the + invite. items: - $ref: "#/components/schemas/FineTuningJobEvent" + type: object + properties: + id: + type: string + description: Project's public ID + role: + type: string + enum: + - member + - owner + description: Project membership role + required: + - object + - id + - email + - role + - status + - invited_at + - expires_at + x-oaiMeta: + name: The invite object + example: | + { + "object": "organization.invite", + "id": "invite-abc", + "email": "user@example.com", + "role": "owner", + "status": "accepted", + "invited_at": 1711471533, + "expires_at": 1711471533, + "accepted_at": 1711471533, + "projects": [ + { + "id": "project-xyz", + "role": "member" + } + ] + } + InviteDeleteResponse: + type: object + properties: object: type: string enum: - - list + - organization.invite.deleted + description: The object type, which is always `organization.invite.deleted` x-stainless-const: true - has_more: + id: + type: string + deleted: type: boolean required: - object - - data - - has_more - ListMessagesResponse: + - id + - deleted + InviteListResponse: + type: object properties: object: type: string - example: list + enum: + - list + description: The object type, which is always `list` + x-stainless-const: true data: type: array items: - $ref: "#/components/schemas/MessageObject" + $ref: "#/components/schemas/Invite" first_id: type: string - example: msg_abc123 + description: The first `invite_id` in the retrieved `list` last_id: type: string - example: msg_abc123 + description: The last `invite_id` in the retrieved `list` has_more: type: boolean - example: false + description: The `has_more` property is used for pagination to indicate there + are additional results. required: - object - data - - first_id - - last_id - - has_more - ListModelsResponse: + InviteRequest: type: object properties: - object: + email: + type: string + description: Send an email to this address + role: type: string enum: - - list - x-stainless-const: true - data: + - reader + - owner + description: "`owner` or `reader`" + projects: type: array + description: An array of projects to which membership is granted at the same + time the org invite is accepted. If omitted, the user will be + invited to the default project for compatibility with legacy + behavior. items: - $ref: "#/components/schemas/Model" + type: object + properties: + id: + type: string + description: Project's public ID + role: + type: string + enum: + - member + - owner + description: Project membership role + required: + - id + - role required: - - object - - data - ListPaginatedFineTuningJobsResponse: + - email + - role + Item: + type: object + description: | + Content item used to generate a response. + oneOf: + - $ref: "#/components/schemas/InputMessage" + - $ref: "#/components/schemas/OutputMessage" + - $ref: "#/components/schemas/FileSearchToolCall" + - $ref: "#/components/schemas/ComputerToolCall" + - $ref: "#/components/schemas/ComputerCallOutputItemParam" + - $ref: "#/components/schemas/WebSearchToolCall" + - $ref: "#/components/schemas/FunctionToolCall" + - $ref: "#/components/schemas/FunctionCallOutputItemParam" + - $ref: "#/components/schemas/ReasoningItem" + - $ref: "#/components/schemas/ImageGenToolCall" + - $ref: "#/components/schemas/CodeInterpreterToolCall" + - $ref: "#/components/schemas/LocalShellToolCall" + - $ref: "#/components/schemas/LocalShellToolCallOutput" + - $ref: "#/components/schemas/MCPListTools" + - $ref: "#/components/schemas/MCPApprovalRequest" + - $ref: "#/components/schemas/MCPApprovalResponse" + - $ref: "#/components/schemas/MCPToolCall" + discriminator: + propertyName: type + ItemResource: + description: | + Content item used to generate a response. + oneOf: + - $ref: "#/components/schemas/InputMessageResource" + - $ref: "#/components/schemas/OutputMessage" + - $ref: "#/components/schemas/FileSearchToolCall" + - $ref: "#/components/schemas/ComputerToolCall" + - $ref: "#/components/schemas/ComputerToolCallOutputResource" + - $ref: "#/components/schemas/WebSearchToolCall" + - $ref: "#/components/schemas/FunctionToolCallResource" + - $ref: "#/components/schemas/FunctionToolCallOutputResource" + - $ref: "#/components/schemas/ImageGenToolCall" + - $ref: "#/components/schemas/CodeInterpreterToolCall" + - $ref: "#/components/schemas/LocalShellToolCall" + - $ref: "#/components/schemas/LocalShellToolCallOutput" + - $ref: "#/components/schemas/MCPListTools" + - $ref: "#/components/schemas/MCPApprovalRequest" + - $ref: "#/components/schemas/MCPApprovalResponseResource" + - $ref: "#/components/schemas/MCPToolCall" + discriminator: + propertyName: type + KeyPress: type: object + title: KeyPress + description: | + A collection of keypresses the model would like to perform. properties: - data: - type: array - items: - $ref: "#/components/schemas/FineTuningJob" - has_more: - type: boolean - object: + type: type: string enum: - - list + - keypress + default: keypress + description: | + Specifies the event type. For a keypress action, this property is + always set to `keypress`. x-stainless-const: true + keys: + type: array + items: + type: string + description: | + One of the keys the model is requesting to be pressed. + description: > + The combination of keys the model is requesting to be pressed. This + is an + + array of strings, each representing a key. required: - - object - - data - - has_more - ListRunStepsResponse: + - type + - keys + ListAssistantsResponse: + type: object properties: object: type: string @@ -26579,13 +29070,13 @@ components: data: type: array items: - $ref: "#/components/schemas/RunStepObject" + $ref: "#/components/schemas/AssistantObject" first_id: type: string - example: step_abc123 + example: asst_abc123 last_id: type: string - example: step_abc456 + example: asst_abc456 has_more: type: boolean example: false @@ -26595,32 +29086,335 @@ components: - first_id - last_id - has_more - ListRunsResponse: + x-oaiMeta: + name: List assistants response object + group: chat + example: > + { + "object": "list", + "data": [ + { + "id": "asst_abc123", + "object": "assistant", + "created_at": 1698982736, + "name": "Coding Tutor", + "description": null, + "model": "gpt-4o", + "instructions": "You are a helpful assistant designed to make me better at coding!", + "tools": [], + "tool_resources": {}, + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + }, + { + "id": "asst_abc456", + "object": "assistant", + "created_at": 1698982718, + "name": "My Assistant", + "description": null, + "model": "gpt-4o", + "instructions": "You are a helpful assistant designed to make me better at coding!", + "tools": [], + "tool_resources": {}, + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + }, + { + "id": "asst_abc789", + "object": "assistant", + "created_at": 1698982643, + "name": null, + "description": null, + "model": "gpt-4o", + "instructions": null, + "tools": [], + "tool_resources": {}, + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + } + ], + "first_id": "asst_abc123", + "last_id": "asst_abc789", + "has_more": false + } + ListAuditLogsResponse: type: object properties: object: type: string - example: list + enum: + - list + x-stainless-const: true data: type: array items: - $ref: "#/components/schemas/RunObject" + $ref: "#/components/schemas/AuditLog" first_id: type: string - example: run_abc123 + example: audit_log-defb456h8dks last_id: type: string - example: run_abc456 + example: audit_log-hnbkd8s93s has_more: type: boolean - example: false required: - object - data - first_id - last_id - has_more - ListVectorStoreFilesResponse: + ListBatchesResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/Batch" + first_id: + type: string + example: batch_abc123 + last_id: + type: string + example: batch_abc456 + has_more: + type: boolean + object: + type: string + enum: + - list + x-stainless-const: true + required: + - object + - data + - has_more + ListCertificatesResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/Certificate" + first_id: + type: string + example: cert_abc + last_id: + type: string + example: cert_abc + has_more: + type: boolean + object: + type: string + enum: + - list + x-stainless-const: true + required: + - object + - data + - has_more + ListFilesResponse: + type: object + properties: + object: + type: string + example: list + data: + type: array + items: + $ref: "#/components/schemas/OpenAIFile" + first_id: + type: string + example: file-abc123 + last_id: + type: string + example: file-abc456 + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + ListFineTuningCheckpointPermissionResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/FineTuningCheckpointPermission" + object: + type: string + enum: + - list + x-stainless-const: true + first_id: + type: string + nullable: true + last_id: + type: string + nullable: true + has_more: + type: boolean + required: + - object + - data + - has_more + ListFineTuningJobCheckpointsResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/FineTuningJobCheckpoint" + object: + type: string + enum: + - list + x-stainless-const: true + first_id: + type: string + nullable: true + last_id: + type: string + nullable: true + has_more: + type: boolean + required: + - object + - data + - has_more + ListFineTuningJobEventsResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/FineTuningJobEvent" + object: + type: string + enum: + - list + x-stainless-const: true + has_more: + type: boolean + required: + - object + - data + - has_more + ListMessagesResponse: + properties: + object: + type: string + example: list + data: + type: array + items: + $ref: "#/components/schemas/MessageObject" + first_id: + type: string + example: msg_abc123 + last_id: + type: string + example: msg_abc123 + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + ListModelsResponse: + type: object + properties: + object: + type: string + enum: + - list + x-stainless-const: true + data: + type: array + items: + $ref: "#/components/schemas/Model" + required: + - object + - data + ListPaginatedFineTuningJobsResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/FineTuningJob" + has_more: + type: boolean + object: + type: string + enum: + - list + x-stainless-const: true + required: + - object + - data + - has_more + ListRunStepsResponse: + properties: + object: + type: string + example: list + data: + type: array + items: + $ref: "#/components/schemas/RunStepObject" + first_id: + type: string + example: step_abc123 + last_id: + type: string + example: step_abc456 + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + ListRunsResponse: + type: object + properties: + object: + type: string + example: list + data: + type: array + items: + $ref: "#/components/schemas/RunObject" + first_id: + type: string + example: run_abc123 + last_id: + type: string + example: run_abc456 + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + ListVectorStoreFilesResponse: properties: object: type: string @@ -26668,6 +29462,139 @@ components: - first_id - last_id - has_more + LocalShellExecAction: + type: object + title: Local shell exec action + description: | + Execute a shell command on the server. + properties: + type: + type: string + enum: + - exec + description: | + The type of the local shell action. Always `exec`. + x-stainless-const: true + command: + type: array + items: + type: string + description: | + The command to run. + timeout_ms: + type: integer + description: | + Optional timeout in milliseconds for the command. + nullable: true + working_directory: + type: string + description: | + Optional working directory to run the command in. + nullable: true + env: + type: object + additionalProperties: + type: string + description: | + Environment variables to set for the command. + user: + type: string + description: | + Optional user to run the command as. + nullable: true + required: + - type + - command + - env + LocalShellTool: + type: object + title: Local shell tool + description: > + A tool that allows the model to execute shell commands in a local + environment. + properties: + type: + type: string + enum: + - local_shell + description: The type of the local shell tool. Always `local_shell`. + x-stainless-const: true + required: + - type + LocalShellToolCall: + type: object + title: Local shell call + description: | + A tool call to run a command on the local shell. + properties: + type: + type: string + enum: + - local_shell_call + description: | + The type of the local shell call. Always `local_shell_call`. + x-stainless-const: true + id: + type: string + description: | + The unique ID of the local shell call. + call_id: + type: string + description: | + The unique ID of the local shell tool call generated by the model. + action: + $ref: "#/components/schemas/LocalShellExecAction" + status: + type: string + enum: + - in_progress + - completed + - incomplete + description: | + The status of the local shell call. + required: + - type + - id + - call_id + - action + - status + LocalShellToolCallOutput: + type: object + title: Local shell call output + description: | + The output of a local shell tool call. + properties: + type: + type: string + enum: + - local_shell_call_output + description: > + The type of the local shell tool call output. Always + `local_shell_call_output`. + x-stainless-const: true + id: + type: string + description: | + The unique ID of the local shell tool call generated by the model. + output: + type: string + description: | + A JSON string of the output of the local shell tool call. + status: + type: string + enum: + - in_progress + - completed + - incomplete + description: > + The status of the item. One of `in_progress`, `completed`, or + `incomplete`. + nullable: true + required: + - id + - type + - call_id + - output LogProbProperties: type: object description: | @@ -26691,142 +29618,460 @@ components: - token - logprob - bytes - MessageContentImageFileObject: - title: Image file + MCPApprovalRequest: type: object - description: References an image [File](/docs/api-reference/files) in the - content of a message. + title: MCP approval request + description: | + A request for human approval of a tool invocation. properties: type: - description: Always `image_file`. type: string enum: - - image_file + - mcp_approval_request + description: | + The type of the item. Always `mcp_approval_request`. x-stainless-const: true - image_file: - type: object - properties: - file_id: - description: The [File](/docs/api-reference/files) ID of the image in the - message content. Set `purpose="vision"` when uploading the File - if you need to later display the file content. - type: string - detail: - type: string - description: Specifies the detail level of the image if specified by the user. - `low` uses fewer tokens, you can opt in to high resolution using - `high`. - enum: - - auto - - low - - high - default: auto - required: - - file_id + id: + type: string + description: | + The unique ID of the approval request. + server_label: + type: string + description: | + The label of the MCP server making the request. + name: + type: string + description: | + The name of the tool to run. + arguments: + type: string + description: | + A JSON string of arguments for the tool. required: - type - - image_file - MessageContentImageUrlObject: - title: Image URL + - id + - server_label + - name + - arguments + MCPApprovalResponse: type: object - description: References an image URL in the content of a message. + title: MCP approval response + description: | + A response to an MCP approval request. properties: type: type: string enum: - - image_url - description: The type of the content part. + - mcp_approval_response + description: | + The type of the item. Always `mcp_approval_response`. x-stainless-const: true - image_url: - type: object - properties: - url: - type: string - description: "The external URL of the image, must be a supported image types: - jpeg, jpg, png, gif, webp." - format: uri - detail: - type: string - description: Specifies the detail level of the image. `low` uses fewer tokens, - you can opt in to high resolution using `high`. Default value is - `auto` - enum: - - auto - - low - - high - default: auto - required: - - url + id: + type: string + description: | + The unique ID of the approval response + nullable: true + approval_request_id: + type: string + description: | + The ID of the approval request being answered. + approve: + type: boolean + description: | + Whether the request was approved. + reason: + type: string + description: | + Optional reason for the decision. + nullable: true required: - type - - image_url - MessageContentRefusalObject: - title: Refusal + - request_id + - approve + - approval_request_id + MCPApprovalResponseResource: type: object - description: The refusal content generated by the assistant. + title: MCP approval response + description: | + A response to an MCP approval request. properties: type: - description: Always `refusal`. type: string enum: - - refusal + - mcp_approval_response + description: | + The type of the item. Always `mcp_approval_response`. x-stainless-const: true - refusal: + id: type: string - nullable: false + description: | + The unique ID of the approval response + approval_request_id: + type: string + description: | + The ID of the approval request being answered. + approve: + type: boolean + description: | + Whether the request was approved. + reason: + type: string + description: | + Optional reason for the decision. + nullable: true required: - type - - refusal - MessageContentTextAnnotationsFileCitationObject: - title: File citation + - id + - request_id + - approve + - approval_request_id + MCPListTools: type: object - description: A citation within the message that points to a specific quote from - a specific File associated with the assistant or the message. Generated - when the assistant uses the "file_search" tool to search files. + title: MCP list tools + description: | + A list of tools available on an MCP server. properties: type: - description: Always `file_citation`. type: string enum: - - file_citation + - mcp_list_tools + description: | + The type of the item. Always `mcp_list_tools`. x-stainless-const: true - text: - description: The text in the message content that needs to be replaced. + id: type: string - file_citation: - type: object - properties: - file_id: - description: The ID of the specific File the citation is from. - type: string - required: - - file_id - start_index: - type: integer - minimum: 0 - end_index: - type: integer - minimum: 0 + description: | + The unique ID of the list. + server_label: + type: string + description: | + The label of the MCP server. + tools: + type: array + items: + $ref: "#/components/schemas/MCPListToolsTool" + description: | + The tools available on the server. + error: + type: string + description: | + Error message if the server could not list tools. + nullable: true required: - type - - text - - file_citation - - start_index - - end_index - MessageContentTextAnnotationsFilePathObject: - title: File path + - id + - server_label + - tools + MCPListToolsTool: type: object - description: A URL for the file that's generated when the assistant used the - `code_interpreter` tool to generate a file. + title: MCP list tools tool + description: | + A tool available on an MCP server. + properties: + name: + type: string + description: | + The name of the tool. + description: + type: string + description: | + The description of the tool. + nullable: true + input_schema: + type: object + description: | + The JSON schema describing the tool's input. + annotations: + type: object + description: | + Additional annotations about the tool. + nullable: true + required: + - name + - input_schema + MCPTool: + type: object + title: MCP tool + description: > + Give the model access to additional tools via remote Model Context + Protocol + + (MCP) servers. [Learn more about MCP](/docs/guides/tools-remote-mcp). properties: type: - description: Always `file_path`. type: string enum: - - file_path + - mcp + description: The type of the MCP tool. Always `mcp`. x-stainless-const: true - text: - description: The text in the message content that needs to be replaced. + server_label: + type: string + description: | + A label for this MCP server, used to identify it in tool calls. + server_url: + type: string + description: | + The URL for the MCP server. + headers: + type: object + additionalProperties: + type: string + nullable: true + description: > + Optional HTTP headers to send to the MCP server. Use for + authentication + + or other purposes. + allowed_tools: + description: | + List of allowed tool names or a filter object. + oneOf: + - type: array + title: MCP allowed tools + description: A string array of allowed tool names + items: + type: string + - type: object + title: MCP allowed tools filter + description: | + A filter object to specify which tools are allowed. + properties: + tool_names: + type: array + title: MCP allowed tools + items: + type: string + description: List of allowed tool names. + required: [] + additionalProperties: false + nullable: true + require_approval: + description: Specify which of the MCP server's tools require approval. + oneOf: + - type: object + title: MCP tool approval filter + properties: + always: + type: object + description: | + A list of tools that always require approval. + properties: + tool_names: + type: array + items: + type: string + description: List of tools that require approval. + never: + type: object + description: | + A list of tools that never require approval. + properties: + tool_names: + type: array + items: + type: string + description: List of tools that do not require approval. + additionalProperties: false + - type: string + title: MCP tool approval setting + description: > + Specify a single approval policy for all tools. One of `always` + or + + `never`. When set to `always`, all tools will require approval. + When + + set to `never`, all tools will not require approval. + enum: + - always + - never + default: always + nullable: true + required: + - type + - server_label + - server_url + MCPToolCall: + type: object + title: MCP tool call + description: | + An invocation of a tool on an MCP server. + properties: + type: + type: string + enum: + - mcp_call + description: | + The type of the item. Always `mcp_call`. + x-stainless-const: true + id: + type: string + description: | + The unique ID of the tool call. + server_label: + type: string + description: | + The label of the MCP server running the tool. + name: + type: string + description: | + The name of the tool that was run. + arguments: + type: string + description: | + A JSON string of the arguments passed to the tool. + output: + type: string + description: | + The output from the tool call. + nullable: true + error: + type: string + description: | + The error from the tool call, if any. + nullable: true + required: + - type + - id + - server_label + - name + - arguments + MessageContentImageFileObject: + title: Image file + type: object + description: References an image [File](/docs/api-reference/files) in the + content of a message. + properties: + type: + description: Always `image_file`. + type: string + enum: + - image_file + x-stainless-const: true + image_file: + type: object + properties: + file_id: + description: The [File](/docs/api-reference/files) ID of the image in the + message content. Set `purpose="vision"` when uploading the File + if you need to later display the file content. + type: string + detail: + type: string + description: Specifies the detail level of the image if specified by the user. + `low` uses fewer tokens, you can opt in to high resolution using + `high`. + enum: + - auto + - low + - high + default: auto + required: + - file_id + required: + - type + - image_file + MessageContentImageUrlObject: + title: Image URL + type: object + description: References an image URL in the content of a message. + properties: + type: + type: string + enum: + - image_url + description: The type of the content part. + x-stainless-const: true + image_url: + type: object + properties: + url: + type: string + description: "The external URL of the image, must be a supported image types: + jpeg, jpg, png, gif, webp." + format: uri + detail: + type: string + description: Specifies the detail level of the image. `low` uses fewer tokens, + you can opt in to high resolution using `high`. Default value is + `auto` + enum: + - auto + - low + - high + default: auto + required: + - url + required: + - type + - image_url + MessageContentRefusalObject: + title: Refusal + type: object + description: The refusal content generated by the assistant. + properties: + type: + description: Always `refusal`. + type: string + enum: + - refusal + x-stainless-const: true + refusal: + type: string + nullable: false + required: + - type + - refusal + MessageContentTextAnnotationsFileCitationObject: + title: File citation + type: object + description: A citation within the message that points to a specific quote from + a specific File associated with the assistant or the message. Generated + when the assistant uses the "file_search" tool to search files. + properties: + type: + description: Always `file_citation`. + type: string + enum: + - file_citation + x-stainless-const: true + text: + description: The text in the message content that needs to be replaced. + type: string + file_citation: + type: object + properties: + file_id: + description: The ID of the specific File the citation is from. + type: string + required: + - file_id + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - type + - text + - file_citation + - start_index + - end_index + MessageContentTextAnnotationsFilePathObject: + title: File path + type: object + description: A URL for the file that's generated when the assistant used the + `code_interpreter` tool to generate a file. + properties: + type: + description: Always `file_path`. + type: string + enum: + - file_path + x-stainless-const: true + text: + description: The text in the message content that needs to be replaced. type: string file_path: type: object @@ -27429,6 +30674,12 @@ components: enum: - o1-pro - o1-pro-2025-03-19 + - o3-pro + - o3-pro-2025-06-10 + - o3-deep-research + - o3-deep-research-2025-06-26 + - o4-mini-deep-research + - o4-mini-deep-research-2025-06-26 - computer-use-preview - computer-use-preview-2025-03-11 ModelIdsShared: @@ -27462,6 +30713,7 @@ components: - gpt-4o-audio-preview - gpt-4o-audio-preview-2024-10-01 - gpt-4o-audio-preview-2024-12-17 + - gpt-4o-audio-preview-2025-06-03 - gpt-4o-mini-audio-preview - gpt-4o-mini-audio-preview-2024-12-17 - gpt-4o-search-preview @@ -27469,6 +30721,7 @@ components: - gpt-4o-search-preview-2025-03-11 - gpt-4o-mini-search-preview-2025-03-11 - chatgpt-4o-latest + - codex-mini-latest - gpt-4o-mini - gpt-4o-mini-2024-07-18 - gpt-4-turbo @@ -27495,6 +30748,17 @@ components: properties: metadata: $ref: "#/components/schemas/Metadata" + top_logprobs: + description: > + An integer between 0 and 20 specifying the number of most likely + tokens to + + return at each token position, each with an associated log + probability. + type: integer + minimum: 0 + maximum: 20 + nullable: true temperature: type: number minimum: 0 @@ -27533,8 +30797,10 @@ components: type: string example: user-1234 description: > - A unique identifier representing your end-user, which can help - OpenAI to monitor and detect abuse. [Learn + A stable identifier for your end-users. + + Used to boost cache hit rates by better bucketing similar requests + and to help OpenAI detect and prevent abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). service_tier: $ref: "#/components/schemas/ServiceTier" @@ -27864,6 +31130,12 @@ components: - $ref: "#/components/schemas/WebSearchToolCall" - $ref: "#/components/schemas/ComputerToolCall" - $ref: "#/components/schemas/ReasoningItem" + - $ref: "#/components/schemas/ImageGenToolCall" + - $ref: "#/components/schemas/CodeInterpreterToolCall" + - $ref: "#/components/schemas/LocalShellToolCall" + - $ref: "#/components/schemas/MCPToolCall" + - $ref: "#/components/schemas/MCPListTools" + - $ref: "#/components/schemas/MCPApprovalRequest" discriminator: propertyName: type OutputMessage: @@ -28509,6 +31781,24 @@ components: description: "`owner` or `member`" required: - role + Prompt: + type: object + nullable: true + description: | + Reference to a prompt template and its variables. + [Learn more](/docs/guides/text?api-mode=responses#reusable-prompts). + required: + - id + properties: + id: + type: string + description: The unique identifier of the prompt template to use. + version: + type: string + description: Optional version of the prompt template. + nullable: true + variables: + $ref: "#/components/schemas/ResponsePromptVariables" RealtimeClientEvent: discriminator: propertyName: type @@ -28883,7 +32173,7 @@ components: stop generating audio and emit a `output_audio_buffer.cleared` event. This event should be preceded by a `response.cancel` client event to stop the generation of the current response. - [Learn more](/docs/guides/realtime-model-capabilities#client-and-server-events-for-audio-in-webrtc). + [Learn more](/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc). properties: event_id: type: string @@ -29096,7 +32386,9 @@ components: ], "tool_choice": "auto", "temperature": 0.8, - "max_response_output_tokens": "inf" + "max_response_output_tokens": "inf", + "speed": 1.1, + "tracing": "auto" } } RealtimeClientEventTranscriptionSessionUpdate: @@ -29972,15 +33264,14 @@ components: Realtime API models accept audio natively, and thus input transcription - is a + is a separate process run on a separate ASR (Automatic Speech Recognition) - model, + model. - currently always `whisper-1`. Thus the transcript may diverge somewhat - from + The transcript may diverge somewhat from the model's interpretation, and - the model's interpretation, and should be treated as a rough guide. + should be treated as a rough guide. properties: event_id: type: string @@ -30497,7 +33788,7 @@ components: mode when the user has interrupted (`input_audio_buffer.speech_started`), or when the client has emitted the `output_audio_buffer.clear` event to manually cut off the current audio response. - [Learn more](/docs/guides/realtime-model-capabilities#client-and-server-events-for-audio-in-webrtc). + [Learn more](/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc). properties: event_id: type: string @@ -30530,7 +33821,7 @@ components: **WebRTC Only:** Emitted when the server begins streaming audio to the client. This event is emitted after an audio content part has been added (`response.content_part.added`) to the response. - [Learn more](/docs/guides/realtime-model-capabilities#client-and-server-events-for-audio-in-webrtc). + [Learn more](/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc). properties: event_id: type: string @@ -30563,7 +33854,7 @@ components: **WebRTC Only:** Emitted when the output audio buffer has been completely drained on the server, and no more audio is forthcoming. This event is emitted after the full response data has been sent to the client (`response.done`). - [Learn more](/docs/guides/realtime-model-capabilities#client-and-server-events-for-audio-in-webrtc). + [Learn more](/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc). properties: event_id: type: string @@ -31475,7 +34766,9 @@ components: "tools": [], "tool_choice": "auto", "temperature": 0.8, - "max_response_output_tokens": "inf" + "max_response_output_tokens": "inf", + "speed": 1.1, + "tracing": "auto" } } RealtimeServerEventSessionUpdated: @@ -31524,7 +34817,9 @@ components: "tools": [], "tool_choice": "none", "temperature": 0.7, - "max_response_output_tokens": 200 + "max_response_output_tokens": 200, + "speed": 1.1, + "tracing": "auto" } } RealtimeServerEventTranscriptionSessionUpdated: @@ -31611,36 +34906,55 @@ components: - gpt-4o-realtime-preview - gpt-4o-realtime-preview-2024-10-01 - gpt-4o-realtime-preview-2024-12-17 + - gpt-4o-realtime-preview-2025-06-03 - gpt-4o-mini-realtime-preview - gpt-4o-mini-realtime-preview-2024-12-17 instructions: type: string description: > The default system instructions (i.e. system message) prepended to - model calls. This field allows the client to guide the model on - desired responses. The model can be instructed on response content - and format, (e.g. "be extremely succinct", "act friendly", "here - are examples of good responses") and on audio behavior (e.g. "talk - quickly", "inject emotion into your voice", "laugh frequently"). - The instructions are not guaranteed to be followed by the model, - but they provide guidance to the model on the desired behavior. + model + + calls. This field allows the client to guide the model on desired + + responses. The model can be instructed on response content and + format, + + (e.g. "be extremely succinct", "act friendly", "here are examples of + good + + responses") and on audio behavior (e.g. "talk quickly", "inject + emotion + + into your voice", "laugh frequently"). The instructions are not + + guaranteed to be followed by the model, but they provide guidance to + the + + model on the desired behavior. + Note that the server sets default instructions which will be used if - this field is not set and are visible in the `session.created` - event at the start of the session. + this + + field is not set and are visible in the `session.created` event at + the + + start of the session. voice: $ref: "#/components/schemas/VoiceIdsShared" description: > The voice the model uses to respond. Voice cannot be changed during - the + the session once the model has responded with audio at least once. - Current + Current - voice options are `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, + voice options are `alloy`, `ash`, `ballad`, `coral`, `echo`, + `fable`, - `shimmer` and `verse`. + `onyx`, `nova`, `sage`, `shimmer`, and `verse`. input_audio_format: type: string default: pcm16 @@ -31800,6 +35114,65 @@ components: Type of noise reduction. `near_field` is for close-talking microphones such as headphones, `far_field` is for far-field microphones such as laptop or conference room microphones. + speed: + type: number + default: 1 + maximum: 1.5 + minimum: 0.25 + description: > + The speed of the model's spoken response. 1.0 is the default speed. + 0.25 is + + the minimum speed. 1.5 is the maximum speed. This value can only be + changed + + in between model turns, not while a response is in progress. + tracing: + title: Tracing Configuration + description: > + Configuration options for tracing. Set to null to disable tracing. + Once + + tracing is enabled for a session, the configuration cannot be + modified. + + + `auto` will create a trace for the session with default values for + the + + workflow name, group id, and metadata. + oneOf: + - type: string + default: auto + description: | + Default tracing mode for the session. + enum: + - auto + x-stainless-const: true + - type: object + title: Tracing Configuration + description: | + Granular configuration for tracing. + properties: + workflow_name: + type: string + description: > + The name of the workflow to attach to this trace. This is + used to + + name the trace in the traces dashboard. + group_id: + type: string + description: > + The group id to attach to this trace to enable filtering + and + + grouping in the traces dashboard. + metadata: + type: object + description: | + The arbitrary metadata to attach to this trace to enable + filtering in the traces dashboard. tools: type: array description: Tools (functions) available to the model. @@ -31879,32 +35252,33 @@ components: - gpt-4o-realtime-preview - gpt-4o-realtime-preview-2024-10-01 - gpt-4o-realtime-preview-2024-12-17 + - gpt-4o-realtime-preview-2025-06-03 - gpt-4o-mini-realtime-preview - gpt-4o-mini-realtime-preview-2024-12-17 instructions: type: string description: > The default system instructions (i.e. system message) prepended to - model calls. This field allows the client to guide the model on - desired responses. The model can be instructed on response content - and format, (e.g. "be extremely succinct", "act friendly", "here - are examples of good responses") and on audio behavior (e.g. "talk - quickly", "inject emotion into your voice", "laugh frequently"). - The instructions are not guaranteed to be followed by the model, - but they provide guidance to the model on the desired behavior. + model calls. This field allows the client to guide the model on + desired responses. The model can be instructed on response content + and format, (e.g. "be extremely succinct", "act friendly", "here are + examples of good responses") and on audio behavior (e.g. "talk + quickly", "inject emotion into your voice", "laugh frequently"). The + instructions are not guaranteed to be followed by the model, but + they provide guidance to the model on the desired behavior. Note that the server sets default instructions which will be used if - this field is not set and are visible in the `session.created` - event at the start of the session. + this field is not set and are visible in the `session.created` event + at the start of the session. voice: $ref: "#/components/schemas/VoiceIdsShared" description: > The voice the model uses to respond. Voice cannot be changed during - the + the session once the model has responded with audio at least once. - Current + Current voice options are `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, @@ -31921,7 +35295,7 @@ components: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. - For `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, + For `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian byte order. output_audio_format: @@ -31939,7 +35313,7 @@ components: input_audio_transcription: type: object description: | - Configuration for input audio transcription, defaults to off and can be set to `null` to turn off once on. Input audio transcription is not native to the model, since the model consumes audio directly. Transcription runs asynchronously through [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) and should be treated as guidance of input audio content rather than precisely what the model heard. The client can optionally set the language and prompt for transcription, these offer additional guidance to the transcription service. + Configuration for input audio transcription, defaults to off and can be set to `null` to turn off once on. Input audio transcription is not native to the model, since the model consumes audio directly. Transcription runs asynchronously through [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) and should be treated as guidance of input audio content rather than precisely what the model heard. The client can optionally set the language and prompt for transcription, these offer additional guidance to the transcription service. properties: model: type: string @@ -32008,27 +35382,27 @@ components: type: number description: > Used only for `server_vad` mode. Activation threshold for VAD - (0.0 to 1.0), this defaults to 0.5. A + (0.0 to 1.0), this defaults to 0.5. A higher threshold will require louder audio to activate the - model, and + model, and thus might perform better in noisy environments. prefix_padding_ms: type: integer description: > Used only for `server_vad` mode. Amount of audio to include - before the VAD detected speech (in + before the VAD detected speech (in milliseconds). Defaults to 300ms. silence_duration_ms: type: integer description: > Used only for `server_vad` mode. Duration of silence to detect - speech stop (in milliseconds). Defaults + speech stop (in milliseconds). Defaults to 500ms. With shorter values the model will respond more - quickly, + quickly, but may jump in on short pauses from the user. create_response: @@ -32069,6 +35443,65 @@ components: Type of noise reduction. `near_field` is for close-talking microphones such as headphones, `far_field` is for far-field microphones such as laptop or conference room microphones. + speed: + type: number + default: 1 + maximum: 1.5 + minimum: 0.25 + description: > + The speed of the model's spoken response. 1.0 is the default speed. + 0.25 is + + the minimum speed. 1.5 is the maximum speed. This value can only be + changed + + in between model turns, not while a response is in progress. + tracing: + title: Tracing Configuration + description: > + Configuration options for tracing. Set to null to disable tracing. + Once + + tracing is enabled for a session, the configuration cannot be + modified. + + + `auto` will create a trace for the session with default values for + the + + workflow name, group id, and metadata. + oneOf: + - type: string + default: auto + description: | + Default tracing mode for the session. + enum: + - auto + x-stainless-const: true + - type: object + title: Tracing Configuration + description: | + Granular configuration for tracing. + properties: + workflow_name: + type: string + description: > + The name of the workflow to attach to this trace. This is + used to + + name the trace in the traces dashboard. + group_id: + type: string + description: > + The group id to attach to this trace to enable filtering + and + + grouping in the traces dashboard. + metadata: + type: object + description: | + The arbitrary metadata to attach to this trace to enable + filtering in the traces dashboard. tools: type: array description: Tools (functions) available to the model. @@ -32088,10 +35521,10 @@ components: type: string description: > The description of the function, including guidance on when - and how + and how to call it, and guidance about what to tell the user when - calling + calling (if anything). parameters: @@ -32102,7 +35535,7 @@ components: default: auto description: > How the model chooses tools. Options are `auto`, `none`, `required`, - or + or specify a function. temperature: @@ -32124,6 +35557,31 @@ components: inclusive of tool calls. Provide an integer between 1 and 4096 to limit output tokens, or `inf` for the maximum available tokens for a given model. Defaults to `inf`. + client_secret: + type: object + description: | + Configuration options for the generated client secret. + properties: + expires_after: + type: object + description: | + Configuration for the ephemeral token expiration. + properties: + anchor: + type: string + enum: + - created_at + description: > + The anchor point for the ephemeral token expiration. Only + `created_at` is currently supported. + seconds: + default: 600 + type: integer + description: > + The number of seconds from the anchor point to the + expiration. Select a value between `10` and `7200`. + required: + - anchor RealtimeSessionCreateResponse: type: object description: > @@ -32230,20 +35688,75 @@ components: native to the model, since the model consumes audio directly. Transcription - runs + runs - asynchronously through Whisper and should be treated as rough - guidance + asynchronously and should be treated as rough guidance rather than the representation understood by the model. properties: model: type: string - description: > - The model to use for transcription, `whisper-1` is the only - currently + description: | + The model to use for transcription. + speed: + type: number + default: 1 + maximum: 1.5 + minimum: 0.25 + description: > + The speed of the model's spoken response. 1.0 is the default speed. + 0.25 is + + the minimum speed. 1.5 is the maximum speed. This value can only be + changed + + in between model turns, not while a response is in progress. + tracing: + title: Tracing Configuration + description: > + Configuration options for tracing. Set to null to disable tracing. + Once + + tracing is enabled for a session, the configuration cannot be + modified. + + + `auto` will create a trace for the session with default values for + the + + workflow name, group id, and metadata. + oneOf: + - type: string + default: auto + description: | + Default tracing mode for the session. + enum: + - auto + x-stainless-const: true + - type: object + title: Tracing Configuration + description: | + Granular configuration for tracing. + properties: + workflow_name: + type: string + description: > + The name of the workflow to attach to this trace. This is + used to - supported model. + name the trace in the traces dashboard. + group_id: + type: string + description: > + The group id to attach to this trace to enable filtering + and + + grouping in the traces dashboard. + metadata: + type: object + description: | + The arbitrary metadata to attach to this trace to enable + filtering in the traces dashboard. turn_detection: type: object description: > @@ -32358,6 +35871,8 @@ components: "tools": [], "tool_choice": "none", "temperature": 0.7, + "speed": 1.1, + "tracing": "auto", "max_response_output_tokens": 200, "client_secret": { "value": "ek_abc123", @@ -32391,7 +35906,7 @@ components: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. - For `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, + For `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian byte order. input_audio_transcription: @@ -32472,27 +35987,27 @@ components: type: number description: > Used only for `server_vad` mode. Activation threshold for VAD - (0.0 to 1.0), this defaults to 0.5. A + (0.0 to 1.0), this defaults to 0.5. A higher threshold will require louder audio to activate the - model, and + model, and thus might perform better in noisy environments. prefix_padding_ms: type: integer description: > Used only for `server_vad` mode. Amount of audio to include - before the VAD detected speech (in + before the VAD detected speech (in milliseconds). Defaults to 300ms. silence_duration_ms: type: integer description: > Used only for `server_vad` mode. Duration of silence to detect - speech stop (in milliseconds). Defaults + speech stop (in milliseconds). Defaults to 500ms. With shorter values the model will respond more - quickly, + quickly, but may jump in on short pauses from the user. create_response: @@ -32542,6 +36057,30 @@ components: items are: - `item.input_audio_transcription.logprobs` + client_secret: + type: object + description: | + Configuration options for the generated client secret. + properties: + expires_at: + type: object + description: | + Configuration for the ephemeral token expiration. + properties: + anchor: + default: created_at + type: string + enum: + - created_at + description: > + The anchor point for the ephemeral token expiration. Only + `created_at` is currently supported. + seconds: + default: 600 + type: integer + description: > + The number of seconds from the anchor point to the + expiration. Select a value between `10` and `7200`. RealtimeTranscriptionSessionCreateResponse: type: object description: > @@ -32550,7 +36089,7 @@ components: When a session is created on the server via REST API, the session object - also contains an ephemeral key. Default TTL for keys is one minute. + also contains an ephemeral key. Default TTL for keys is 10 minutes. This property is not present when a session is updated via the WebSocket API. @@ -32755,7 +36294,12 @@ components: A description of the chain of thought used by a reasoning model while generating - a response. + a response. Be sure to include these items in your `input` to the + Responses API + + for subsequent turns of a conversation if you are manually + + [managing context](/docs/guides/conversation-state). title: Reasoning properties: type: @@ -32769,6 +36313,15 @@ components: type: string description: | The unique identifier of the reasoning content. + encrypted_content: + type: string + description: > + The encrypted content of the reasoning item - populated when a + response is + + generated with `reasoning.encrypted_content` in the `include` + parameter. + nullable: true summary: type: array description: | @@ -32807,6 +36360,7 @@ components: - summary - type Response: + title: The response object allOf: - $ref: "#/components/schemas/ModelResponseProperties" - $ref: "#/components/schemas/ResponseProperties" @@ -32829,11 +36383,13 @@ components: The status of the response generation. One of `completed`, `failed`, - `in_progress`, or `incomplete`. + `in_progress`, `cancelled`, `queued`, or `incomplete`. enum: - completed - failed - in_progress + - cancelled + - queued - incomplete created_at: type: number @@ -32869,6 +36425,34 @@ components: supported in SDKs. items: $ref: "#/components/schemas/OutputItem" + instructions: + nullable: true + description: > + A system (or developer) message inserted into the model's + context. + + + When using along with `previous_response_id`, the instructions + from a previous + + response will not be carried over to the next response. This + makes it simple + + to swap out system (or developer) messages in new responses. + oneOf: + - type: string + description: > + A text input to the model, equivalent to a text input with + the + + `developer` role. + - type: array + title: Input item list + description: | + A list of one or many input items to the model, containing + different content types. + items: + $ref: "#/components/schemas/InputItem" output_text: type: string nullable: true @@ -32905,66 +36489,53 @@ components: - tool_choice - temperature - top_p - x-oaiMeta: - name: The response object - group: responses - example: > - { - "id": "resp_67ccd3a9da748190baa7f1570fe91ac604becb25c45c1d41", - "object": "response", - "created_at": 1741476777, - "status": "completed", - "error": null, - "incomplete_details": null, - "instructions": null, - "max_output_tokens": null, - "model": "gpt-4o-2024-08-06", - "output": [ - { - "type": "message", - "id": "msg_67ccd3acc8d48190a77525dc6de64b4104becb25c45c1d41", - "status": "completed", - "role": "assistant", - "content": [ - { - "type": "output_text", - "text": "The image depicts a scenic landscape with a wooden boardwalk or pathway leading through lush, green grass under a blue sky with some clouds. The setting suggests a peaceful natural area, possibly a park or nature reserve. There are trees and shrubs in the background.", - "annotations": [] - } - ] - } - ], - "parallel_tool_calls": true, - "previous_response_id": null, - "reasoning": { - "effort": null, - "summary": null - }, - "store": true, - "temperature": 1.0, - "text": { - "format": { - "type": "text" - } - }, - "tool_choice": "auto", - "tools": [], - "top_p": 1.0, - "truncation": "disabled", - "usage": { - "input_tokens": 328, - "input_tokens_details": { - "cached_tokens": 0 - }, - "output_tokens": 52, - "output_tokens_details": { - "reasoning_tokens": 0 - }, - "total_tokens": 380 - }, - "user": null, - "metadata": {} - } + example: + id: resp_67ccd3a9da748190baa7f1570fe91ac604becb25c45c1d41 + object: response + created_at: 1741476777 + status: completed + error: null + incomplete_details: null + instructions: null + max_output_tokens: null + model: gpt-4o-2024-08-06 + output: + - type: message + id: msg_67ccd3acc8d48190a77525dc6de64b4104becb25c45c1d41 + status: completed + role: assistant + content: + - type: output_text + text: The image depicts a scenic landscape with a wooden boardwalk or pathway + leading through lush, green grass under a blue sky with some + clouds. The setting suggests a peaceful natural area, possibly + a park or nature reserve. There are trees and shrubs in the + background. + annotations: [] + parallel_tool_calls: true + previous_response_id: null + reasoning: + effort: null + summary: null + store: true + temperature: 1 + text: + format: + type: text + tool_choice: auto + tools: [] + top_p: 1 + truncation: disabled + usage: + input_tokens: 328 + input_tokens_details: + cached_tokens: 0 + output_tokens: 52 + output_tokens_details: + reasoning_tokens: 0 + total_tokens: 380 + user: null + metadata: {} ResponseAudioDeltaEvent: type: object description: Emitted when there is a partial audio response. @@ -32976,6 +36547,10 @@ components: enum: - response.audio.delta x-stainless-const: true + sequence_number: + type: integer + description: | + A sequence number for this chunk of the stream response. delta: type: string description: | @@ -32983,6 +36558,7 @@ components: required: - type - delta + - sequence_number x-oaiMeta: name: response.audio.delta group: responses @@ -32990,7 +36566,8 @@ components: { "type": "response.audio.delta", "response_id": "resp_123", - "delta": "base64encoded..." + "delta": "base64encoded...", + "sequence_number": 1 } ResponseAudioDoneEvent: type: object @@ -33003,8 +36580,13 @@ components: enum: - response.audio.done x-stainless-const: true + sequence_number: + type: integer + description: | + The sequence number of the delta. required: - type + - sequence_number - response_id x-oaiMeta: name: response.audio.done @@ -33012,7 +36594,8 @@ components: example: | { "type": "response.audio.done", - "response_id": "resp-123" + "response_id": "resp-123", + "sequence_number": 1 } ResponseAudioTranscriptDeltaEvent: type: object @@ -33029,10 +36612,14 @@ components: type: string description: | The partial transcript of the audio response. + sequence_number: + type: integer + description: The sequence number of this event. required: - type - response_id - delta + - sequence_number x-oaiMeta: name: response.audio.transcript.delta group: responses @@ -33040,7 +36627,8 @@ components: { "type": "response.audio.transcript.delta", "response_id": "resp_123", - "delta": " ... partial transcript ... " + "delta": " ... partial transcript ... ", + "sequence_number": 1 } ResponseAudioTranscriptDoneEvent: type: object @@ -33053,88 +36641,103 @@ components: enum: - response.audio.transcript.done x-stainless-const: true + sequence_number: + type: integer + description: The sequence number of this event. required: - type - response_id + - sequence_number x-oaiMeta: name: response.audio.transcript.done group: responses example: | { "type": "response.audio.transcript.done", - "response_id": "resp_123" + "response_id": "resp_123", + "sequence_number": 1 } ResponseCodeInterpreterCallCodeDeltaEvent: type: object - description: Emitted when a partial code snippet is added by the code interpreter. + description: Emitted when a partial code snippet is streamed by the code interpreter. properties: type: type: string - description: > - The type of the event. Always - `response.code_interpreter_call.code.delta`. + description: The type of the event. Always + `response.code_interpreter_call_code.delta`. enum: - - response.code_interpreter_call.code.delta + - response.code_interpreter_call_code.delta x-stainless-const: true output_index: type: integer - description: > - The index of the output item that the code interpreter call is in - progress. + description: The index of the output item in the response for which the code is + being streamed. + item_id: + type: string + description: The unique identifier of the code interpreter tool call item. delta: type: string - description: | - The partial code snippet added by the code interpreter. + description: The partial code snippet being streamed by the code interpreter. + sequence_number: + type: integer + description: The sequence number of this event, used to order streaming events. required: - type - - response_id - output_index + - item_id - delta + - sequence_number x-oaiMeta: - name: response.code_interpreter_call.code.delta + name: response.code_interpreter_call_code.delta group: responses example: | { - "type": "response.code_interpreter_call.code.delta", - "response_id": "resp-123", + "type": "response.code_interpreter_call_code.delta", "output_index": 0, - "delta": "partial code" + "item_id": "ci_12345", + "delta": "print('Hello, world')", + "sequence_number": 1 } ResponseCodeInterpreterCallCodeDoneEvent: type: object - description: Emitted when code snippet output is finalized by the code interpreter. + description: Emitted when the code snippet is finalized by the code interpreter. properties: type: type: string - description: > - The type of the event. Always - `response.code_interpreter_call.code.done`. + description: The type of the event. Always + `response.code_interpreter_call_code.done`. enum: - - response.code_interpreter_call.code.done + - response.code_interpreter_call_code.done x-stainless-const: true output_index: type: integer - description: > - The index of the output item that the code interpreter call is in - progress. + description: The index of the output item in the response for which the code is + finalized. + item_id: + type: string + description: The unique identifier of the code interpreter tool call item. code: type: string - description: | - The final code snippet output by the code interpreter. + description: The final code snippet output by the code interpreter. + sequence_number: + type: integer + description: The sequence number of this event, used to order streaming events. required: - type - - response_id - output_index + - item_id - code + - sequence_number x-oaiMeta: - name: response.code_interpreter_call.code.done + name: response.code_interpreter_call_code.done group: responses example: | { - "type": "response.code_interpreter_call.code.done", - "response_id": "resp-123", + "type": "response.code_interpreter_call_code.done", "output_index": 3, - "code": "console.log('done');" + "item_id": "ci_12345", + "code": "print('done')", + "sequence_number": 1 } ResponseCodeInterpreterCallCompletedEvent: type: object @@ -33142,33 +36745,35 @@ components: properties: type: type: string - description: > - The type of the event. Always + description: The type of the event. Always `response.code_interpreter_call.completed`. enum: - response.code_interpreter_call.completed x-stainless-const: true output_index: type: integer - description: > - The index of the output item that the code interpreter call is in - progress. - code_interpreter_call: - $ref: "#/components/schemas/CodeInterpreterToolCall" + description: The index of the output item in the response for which the code + interpreter call is completed. + item_id: + type: string + description: The unique identifier of the code interpreter tool call item. + sequence_number: + type: integer + description: The sequence number of this event, used to order streaming events. required: - type - - response_id - output_index - - code_interpreter_call + - item_id + - sequence_number x-oaiMeta: name: response.code_interpreter_call.completed group: responses example: | { "type": "response.code_interpreter_call.completed", - "response_id": "resp-123", "output_index": 5, - "code_interpreter_call": {} + "item_id": "ci_12345", + "sequence_number": 1 } ResponseCodeInterpreterCallInProgressEvent: type: object @@ -33176,33 +36781,35 @@ components: properties: type: type: string - description: > - The type of the event. Always + description: The type of the event. Always `response.code_interpreter_call.in_progress`. enum: - response.code_interpreter_call.in_progress x-stainless-const: true output_index: type: integer - description: > - The index of the output item that the code interpreter call is in - progress. - code_interpreter_call: - $ref: "#/components/schemas/CodeInterpreterToolCall" + description: The index of the output item in the response for which the code + interpreter call is in progress. + item_id: + type: string + description: The unique identifier of the code interpreter tool call item. + sequence_number: + type: integer + description: The sequence number of this event, used to order streaming events. required: - type - - response_id - output_index - - code_interpreter_call + - item_id + - sequence_number x-oaiMeta: name: response.code_interpreter_call.in_progress group: responses example: | { - "type": "response.code_interpreter_call.in.progress", - "response_id": "resp-123", + "type": "response.code_interpreter_call.in_progress", "output_index": 0, - "code_interpreter_call": {} + "item_id": "ci_12345", + "sequence_number": 1 } ResponseCodeInterpreterCallInterpretingEvent: type: object @@ -33211,33 +36818,35 @@ components: properties: type: type: string - description: > - The type of the event. Always + description: The type of the event. Always `response.code_interpreter_call.interpreting`. enum: - response.code_interpreter_call.interpreting x-stainless-const: true output_index: type: integer - description: > - The index of the output item that the code interpreter call is in - progress. - code_interpreter_call: - $ref: "#/components/schemas/CodeInterpreterToolCall" + description: The index of the output item in the response for which the code + interpreter is interpreting code. + item_id: + type: string + description: The unique identifier of the code interpreter tool call item. + sequence_number: + type: integer + description: The sequence number of this event, used to order streaming events. required: - type - - response_id - output_index - - code_interpreter_call + - item_id + - sequence_number x-oaiMeta: name: response.code_interpreter_call.interpreting group: responses example: | { "type": "response.code_interpreter_call.interpreting", - "response_id": "resp-123", "output_index": 4, - "code_interpreter_call": {} + "item_id": "ci_12345", + "sequence_number": 1 } ResponseCompletedEvent: type: object @@ -33254,9 +36863,13 @@ components: $ref: "#/components/schemas/Response" description: | Properties of the completed response. + sequence_number: + type: integer + description: The sequence number for this event. required: - type - response + - sequence_number x-oaiMeta: name: response.completed group: responses @@ -33311,7 +36924,8 @@ components: }, "user": null, "metadata": {} - } + }, + "sequence_number": 1 } ResponseContentPartAddedEvent: type: object @@ -33340,12 +36954,16 @@ components: $ref: "#/components/schemas/OutputContent" description: | The content part that was added. + sequence_number: + type: integer + description: The sequence number of this event. required: - type - item_id - output_index - content_index - part + - sequence_number x-oaiMeta: name: response.content_part.added group: responses @@ -33359,7 +36977,8 @@ components: "type": "output_text", "text": "", "annotations": [] - } + }, + "sequence_number": 1 } ResponseContentPartDoneEvent: type: object @@ -33384,6 +37003,9 @@ components: type: integer description: | The index of the content part that is done. + sequence_number: + type: integer + description: The sequence number of this event. part: $ref: "#/components/schemas/OutputContent" description: | @@ -33394,6 +37016,7 @@ components: - output_index - content_index - part + - sequence_number x-oaiMeta: name: response.content_part.done group: responses @@ -33403,6 +37026,7 @@ components: "item_id": "msg_123", "output_index": 0, "content_index": 0, + "sequence_number": 1, "part": { "type": "output_text", "text": "In a shimmering forest under a sky full of stars, a lonely unicorn named Lila discovered a hidden pond that glowed with moonlight. Every night, she would leave sparkling, magical flowers by the water's edge, hoping to share her beauty with others. One enchanting evening, she woke to find a group of friendly animals gathered around, eager to be friends and share in her magic.", @@ -33425,9 +37049,13 @@ components: $ref: "#/components/schemas/Response" description: | The response that was created. + sequence_number: + type: integer + description: The sequence number for this event. required: - type - response + - sequence_number x-oaiMeta: name: response.created group: responses @@ -33465,7 +37093,8 @@ components: "usage": null, "user": null, "metadata": {} - } + }, + "sequence_number": 1 } ResponseError: type: object @@ -33530,11 +37159,15 @@ components: description: | The error parameter. nullable: true + sequence_number: + type: integer + description: The sequence number of this event. required: - type - code - message - param + - sequence_number x-oaiMeta: name: error group: responses @@ -33543,7 +37176,8 @@ components: "type": "error", "code": "ERR_SOMETHING", "message": "Something went wrong", - "param": null + "param": null, + "sequence_number": 1 } ResponseFailedEvent: type: object @@ -33557,6 +37191,9 @@ components: enum: - response.failed x-stainless-const: true + sequence_number: + type: integer + description: The sequence number of this event. response: $ref: "#/components/schemas/Response" description: | @@ -33564,6 +37201,7 @@ components: required: - type - response + - sequence_number x-oaiMeta: name: response.failed group: responses @@ -33621,10 +37259,14 @@ components: type: string description: | The ID of the output item that the file search call is initiated. + sequence_number: + type: integer + description: The sequence number of this event. required: - type - output_index - item_id + - sequence_number x-oaiMeta: name: response.file_search_call.completed group: responses @@ -33633,6 +37275,7 @@ components: "type": "response.file_search_call.completed", "output_index": 0, "item_id": "fs_123", + "sequence_number": 1 } ResponseFileSearchCallInProgressEvent: type: object @@ -33654,10 +37297,14 @@ components: type: string description: | The ID of the output item that the file search call is initiated. + sequence_number: + type: integer + description: The sequence number of this event. required: - type - output_index - item_id + - sequence_number x-oaiMeta: name: response.file_search_call.in_progress group: responses @@ -33666,6 +37313,7 @@ components: "type": "response.file_search_call.in_progress", "output_index": 0, "item_id": "fs_123", + "sequence_number": 1 } ResponseFileSearchCallSearchingEvent: type: object @@ -33686,10 +37334,14 @@ components: type: string description: | The ID of the output item that the file search call is initiated. + sequence_number: + type: integer + description: The sequence number of this event. required: - type - output_index - item_id + - sequence_number x-oaiMeta: name: response.file_search_call.searching group: responses @@ -33698,6 +37350,7 @@ components: "type": "response.file_search_call.searching", "output_index": 0, "item_id": "fs_123", + "sequence_number": 1 } ResponseFormatJsonObject: type: object @@ -33822,6 +37475,9 @@ components: description: > The index of the output item that the function-call arguments delta is added to. + sequence_number: + type: integer + description: The sequence number of this event. delta: type: string description: | @@ -33831,6 +37487,7 @@ components: - item_id - output_index - delta + - sequence_number x-oaiMeta: name: response.function_call_arguments.delta group: responses @@ -33840,6 +37497,7 @@ components: "item_id": "item-abc", "output_index": 0, "delta": "{ \"arg\":" + "sequence_number": 1 } ResponseFunctionCallArgumentsDoneEvent: type: object @@ -33856,6 +37514,9 @@ components: output_index: type: integer description: The index of the output item. + sequence_number: + type: integer + description: The sequence number of this event. arguments: type: string description: The function-call arguments. @@ -33864,6 +37525,7 @@ components: - item_id - output_index - arguments + - sequence_number x-oaiMeta: name: response.function_call_arguments.done group: responses @@ -33872,7 +37534,171 @@ components: "type": "response.function_call_arguments.done", "item_id": "item-abc", "output_index": 1, - "arguments": "{ \"arg\": 123 }" + "arguments": "{ \"arg\": 123 }", + "sequence_number": 1 + } + ResponseImageGenCallCompletedEvent: + type: object + title: ResponseImageGenCallCompletedEvent + description: > + Emitted when an image generation tool call has completed and the final + image is available. + properties: + type: + type: string + enum: + - response.image_generation_call.completed + description: The type of the event. Always + 'response.image_generation_call.completed'. + x-stainless-const: true + output_index: + type: integer + description: The index of the output item in the response's output array. + sequence_number: + type: integer + description: The sequence number of this event. + item_id: + type: string + description: The unique identifier of the image generation item being processed. + required: + - type + - output_index + - item_id + - sequence_number + x-oaiMeta: + name: response.image_generation_call.completed + group: responses + example: | + { + "type": "response.image_generation_call.completed", + "output_index": 0, + "item_id": "item-123", + "sequence_number": 1 + } + ResponseImageGenCallGeneratingEvent: + type: object + title: ResponseImageGenCallGeneratingEvent + description: > + Emitted when an image generation tool call is actively generating an + image (intermediate state). + properties: + type: + type: string + enum: + - response.image_generation_call.generating + description: The type of the event. Always + 'response.image_generation_call.generating'. + x-stainless-const: true + output_index: + type: integer + description: The index of the output item in the response's output array. + item_id: + type: string + description: The unique identifier of the image generation item being processed. + sequence_number: + type: integer + description: The sequence number of the image generation item being processed. + required: + - type + - output_index + - item_id + - sequence_number + x-oaiMeta: + name: response.image_generation_call.generating + group: responses + example: | + { + "type": "response.image_generation_call.generating", + "output_index": 0, + "item_id": "item-123", + "sequence_number": 0 + } + ResponseImageGenCallInProgressEvent: + type: object + title: ResponseImageGenCallInProgressEvent + description: | + Emitted when an image generation tool call is in progress. + properties: + type: + type: string + enum: + - response.image_generation_call.in_progress + description: The type of the event. Always + 'response.image_generation_call.in_progress'. + x-stainless-const: true + output_index: + type: integer + description: The index of the output item in the response's output array. + item_id: + type: string + description: The unique identifier of the image generation item being processed. + sequence_number: + type: integer + description: The sequence number of the image generation item being processed. + required: + - type + - output_index + - item_id + - sequence_number + x-oaiMeta: + name: response.image_generation_call.in_progress + group: responses + example: | + { + "type": "response.image_generation_call.in_progress", + "output_index": 0, + "item_id": "item-123", + "sequence_number": 0 + } + ResponseImageGenCallPartialImageEvent: + type: object + title: ResponseImageGenCallPartialImageEvent + description: > + Emitted when a partial image is available during image generation + streaming. + properties: + type: + type: string + enum: + - response.image_generation_call.partial_image + description: The type of the event. Always + 'response.image_generation_call.partial_image'. + x-stainless-const: true + output_index: + type: integer + description: The index of the output item in the response's output array. + item_id: + type: string + description: The unique identifier of the image generation item being processed. + sequence_number: + type: integer + description: The sequence number of the image generation item being processed. + partial_image_index: + type: integer + description: 0-based index for the partial image (backend is 1-based, but this + is 0-based for the user). + partial_image_b64: + type: string + description: Base64-encoded partial image data, suitable for rendering as an + image. + required: + - type + - output_index + - item_id + - sequence_number + - partial_image_index + - partial_image_b64 + x-oaiMeta: + name: response.image_generation_call.partial_image + group: responses + example: | + { + "type": "response.image_generation_call.partial_image", + "output_index": 0, + "item_id": "item-123", + "sequence_number": 0, + "partial_image_index": 0, + "partial_image_b64": "..." } ResponseInProgressEvent: type: object @@ -33889,9 +37715,13 @@ components: $ref: "#/components/schemas/Response" description: | The response that is in progress. + sequence_number: + type: integer + description: The sequence number of this event. required: - type - response + - sequence_number x-oaiMeta: name: response.in_progress group: responses @@ -33929,7 +37759,8 @@ components: "usage": null, "user": null, "metadata": {} - } + }, + "sequence_number": 1 } ResponseIncompleteEvent: type: object @@ -33947,9 +37778,13 @@ components: $ref: "#/components/schemas/Response" description: | The response that was incomplete. + sequence_number: + type: integer + description: The sequence number of this event. required: - type - response + - sequence_number x-oaiMeta: name: response.incomplete group: responses @@ -33985,7 +37820,8 @@ components: "usage": null, "user": null, "metadata": {} - } + }, + "sequence_number": 1 } ResponseItemList: type: object @@ -34040,6 +37876,263 @@ components: "last_id": "msg_abc123", "has_more": false } + ResponseMCPCallArgumentsDeltaEvent: + type: object + title: ResponseMCPCallArgumentsDeltaEvent + description: > + Emitted when there is a delta (partial update) to the arguments of an + MCP tool call. + properties: + type: + type: string + enum: + - response.mcp_call.arguments_delta + description: The type of the event. Always 'response.mcp_call.arguments_delta'. + x-stainless-const: true + output_index: + type: integer + description: The index of the output item in the response's output array. + item_id: + type: string + description: The unique identifier of the MCP tool call item being processed. + delta: + type: object + description: The partial update to the arguments for the MCP tool call. + sequence_number: + type: integer + description: The sequence number of this event. + required: + - type + - output_index + - item_id + - delta + - sequence_number + x-oaiMeta: + name: response.mcp_call.arguments.delta + group: responses + example: | + { + "type": "response.mcp_call.arguments.delta", + "output_index": 0, + "item_id": "item-abc", + "delta": { + "arg1": "new_value1", + "arg2": "new_value2" + }, + "sequence_number": 1 + } + ResponseMCPCallArgumentsDoneEvent: + type: object + title: ResponseMCPCallArgumentsDoneEvent + description: | + Emitted when the arguments for an MCP tool call are finalized. + properties: + type: + type: string + enum: + - response.mcp_call.arguments_done + description: The type of the event. Always 'response.mcp_call.arguments_done'. + x-stainless-const: true + output_index: + type: integer + description: The index of the output item in the response's output array. + item_id: + type: string + description: The unique identifier of the MCP tool call item being processed. + arguments: + type: object + description: The finalized arguments for the MCP tool call. + sequence_number: + type: integer + description: The sequence number of this event. + required: + - type + - output_index + - item_id + - arguments + - sequence_number + x-oaiMeta: + name: response.mcp_call.arguments.done + group: responses + example: | + { + "type": "response.mcp_call.arguments.done", + "output_index": 0, + "item_id": "item-abc", + "arguments": { + "arg1": "value1", + "arg2": "value2" + }, + "sequence_number": 1 + } + ResponseMCPCallCompletedEvent: + type: object + title: ResponseMCPCallCompletedEvent + description: | + Emitted when an MCP tool call has completed successfully. + properties: + type: + type: string + enum: + - response.mcp_call.completed + description: The type of the event. Always 'response.mcp_call.completed'. + x-stainless-const: true + sequence_number: + type: integer + description: The sequence number of this event. + required: + - type + - sequence_number + x-oaiMeta: + name: response.mcp_call.completed + group: responses + example: | + { + "type": "response.mcp_call.completed", + "sequence_number": 1 + } + ResponseMCPCallFailedEvent: + type: object + title: ResponseMCPCallFailedEvent + description: | + Emitted when an MCP tool call has failed. + properties: + type: + type: string + enum: + - response.mcp_call.failed + description: The type of the event. Always 'response.mcp_call.failed'. + x-stainless-const: true + sequence_number: + type: integer + description: The sequence number of this event. + required: + - type + - sequence_number + x-oaiMeta: + name: response.mcp_call.failed + group: responses + example: | + { + "type": "response.mcp_call.failed", + "sequence_number": 1 + } + ResponseMCPCallInProgressEvent: + type: object + title: ResponseMCPCallInProgressEvent + description: | + Emitted when an MCP tool call is in progress. + properties: + type: + type: string + enum: + - response.mcp_call.in_progress + description: The type of the event. Always 'response.mcp_call.in_progress'. + x-stainless-const: true + sequence_number: + type: integer + description: The sequence number of this event. + output_index: + type: integer + description: The index of the output item in the response's output array. + item_id: + type: string + description: The unique identifier of the MCP tool call item being processed. + required: + - type + - output_index + - item_id + - sequence_number + x-oaiMeta: + name: response.mcp_call.in_progress + group: responses + example: | + { + "type": "response.mcp_call.in_progress", + "output_index": 0, + "item_id": "item-abc", + "sequence_number": 1 + } + ResponseMCPListToolsCompletedEvent: + type: object + title: ResponseMCPListToolsCompletedEvent + description: > + Emitted when the list of available MCP tools has been successfully + retrieved. + properties: + type: + type: string + enum: + - response.mcp_list_tools.completed + description: The type of the event. Always 'response.mcp_list_tools.completed'. + x-stainless-const: true + sequence_number: + type: integer + description: The sequence number of this event. + required: + - type + - sequence_number + x-oaiMeta: + name: response.mcp_list_tools.completed + group: responses + example: | + { + "type": "response.mcp_list_tools.completed", + "sequence_number": 1 + } + ResponseMCPListToolsFailedEvent: + type: object + title: ResponseMCPListToolsFailedEvent + description: | + Emitted when the attempt to list available MCP tools has failed. + properties: + type: + type: string + enum: + - response.mcp_list_tools.failed + description: The type of the event. Always 'response.mcp_list_tools.failed'. + x-stainless-const: true + sequence_number: + type: integer + description: The sequence number of this event. + required: + - type + - sequence_number + x-oaiMeta: + name: response.mcp_list_tools.failed + group: responses + example: | + { + "type": "response.mcp_list_tools.failed", + "sequence_number": 1 + } + ResponseMCPListToolsInProgressEvent: + type: object + title: ResponseMCPListToolsInProgressEvent + description: > + Emitted when the system is in the process of retrieving the list of + available MCP tools. + properties: + type: + type: string + enum: + - response.mcp_list_tools.in_progress + description: The type of the event. Always 'response.mcp_list_tools.in_progress'. + x-stainless-const: true + sequence_number: + type: integer + description: The sequence number of this event. + required: + - type + - sequence_number + x-oaiMeta: + name: response.mcp_list_tools.in_progress + group: responses + example: | + { + "type": "response.mcp_list_tools.in_progress", + "sequence_number": 1 + } ResponseModalities: type: array nullable: true @@ -34081,6 +38174,10 @@ components: type: integer description: | The index of the output item that was added. + sequence_number: + type: integer + description: | + The sequence number of this event. item: $ref: "#/components/schemas/OutputItem" description: | @@ -34089,6 +38186,7 @@ components: - type - output_index - item + - sequence_number x-oaiMeta: name: response.output_item.added group: responses @@ -34102,7 +38200,8 @@ components: "type": "message", "role": "assistant", "content": [] - } + }, + "sequence_number": 1 } ResponseOutputItemDoneEvent: type: object @@ -34119,6 +38218,10 @@ components: type: integer description: | The index of the output item that was marked done. + sequence_number: + type: integer + description: | + The sequence number of this event. item: $ref: "#/components/schemas/OutputItem" description: | @@ -34127,6 +38230,7 @@ components: - type - output_index - item + - sequence_number x-oaiMeta: name: response.output_item.done group: responses @@ -34146,8 +38250,86 @@ components: "annotations": [] } ] - } + }, + "sequence_number": 1 } + ResponseOutputTextAnnotationAddedEvent: + type: object + title: ResponseOutputTextAnnotationAddedEvent + description: | + Emitted when an annotation is added to output text content. + properties: + type: + type: string + enum: + - response.output_text_annotation.added + description: The type of the event. Always + 'response.output_text_annotation.added'. + x-stainless-const: true + item_id: + type: string + description: The unique identifier of the item to which the annotation is being + added. + output_index: + type: integer + description: The index of the output item in the response's output array. + content_index: + type: integer + description: The index of the content part within the output item. + annotation_index: + type: integer + description: The index of the annotation within the content part. + sequence_number: + type: integer + description: The sequence number of this event. + annotation: + type: object + description: The annotation object being added. (See annotation schema for + details.) + required: + - type + - item_id + - output_index + - content_index + - annotation_index + - annotation + - sequence_number + x-oaiMeta: + name: response.output_text_annotation.added + group: responses + example: | + { + "type": "response.output_text_annotation.added", + "item_id": "item-abc", + "output_index": 0, + "content_index": 0, + "annotation_index": 0, + "annotation": { + "type": "text_annotation", + "text": "This is a test annotation", + "start": 0, + "end": 10 + }, + "sequence_number": 1 + } + ResponsePromptVariables: + type: object + title: Prompt Variables + description: | + Optional map of values to substitute in for variables in your + prompt. The substitution values can either be strings, or other + Response input types like images or files. + x-oaiExpandable: true + x-oaiTypeLabel: map + nullable: true + additionalProperties: + x-oaiExpandable: true + x-oaiTypeLabel: map + oneOf: + - type: string + - $ref: "#/components/schemas/InputTextContent" + - $ref: "#/components/schemas/InputImageContent" + - $ref: "#/components/schemas/InputFileContent" ResponseProperties: type: object properties: @@ -34174,6 +38356,13 @@ components: reasoning: $ref: "#/components/schemas/Reasoning" nullable: true + background: + type: boolean + description: | + Whether to run the model response in the background. + [Learn more](/docs/guides/background). + default: false + nullable: true max_output_tokens: description: > An upper bound for the number of tokens that can be generated for a @@ -34181,20 +38370,13 @@ components: tokens](/docs/guides/reasoning). type: integer nullable: true - instructions: - type: string + max_tool_calls: description: > - Inserts a system (or developer) message as the first item in the - model's context. - - - When using along with `previous_response_id`, the instructions from - a previous - - response will not be carried over to the next response. This makes - it simple - - to swap out system (or developer) messages in new responses. + The maximum number of total calls to built-in tools that can be + processed in a response. This maximum number applies across all + built-in tool calls, not per individual tool. Any further attempts + to call a tool by the model will be ignored. + type: integer nullable: true text: type: object @@ -34247,6 +38429,9 @@ components: - $ref: "#/components/schemas/ToolChoiceOptions" - $ref: "#/components/schemas/ToolChoiceTypes" - $ref: "#/components/schemas/ToolChoiceFunction" + - $ref: "#/components/schemas/ToolChoiceMCP" + prompt: + $ref: "#/components/schemas/Prompt" truncation: type: string description: > @@ -34264,6 +38449,234 @@ components: - disabled nullable: true default: disabled + ResponseQueuedEvent: + type: object + title: ResponseQueuedEvent + description: | + Emitted when a response is queued and waiting to be processed. + properties: + type: + type: string + enum: + - response.queued + description: The type of the event. Always 'response.queued'. + x-stainless-const: true + response: + $ref: "#/components/schemas/Response" + description: The full response object that is queued. + sequence_number: + type: integer + description: The sequence number for this event. + required: + - type + - response + - sequence_number + x-oaiMeta: + name: response.queued + group: responses + example: | + { + "type": "response.queued", + "response": { + "id": "res_123", + "status": "queued", + "created_at": "2021-01-01T00:00:00Z", + "updated_at": "2021-01-01T00:00:00Z" + }, + "sequence_number": 1 + } + ResponseReasoningDeltaEvent: + type: object + title: ResponseReasoningDeltaEvent + description: | + Emitted when there is a delta (partial update) to the reasoning content. + properties: + type: + type: string + enum: + - response.reasoning.delta + description: The type of the event. Always 'response.reasoning.delta'. + x-stainless-const: true + item_id: + type: string + description: The unique identifier of the item for which reasoning is being + updated. + output_index: + type: integer + description: The index of the output item in the response's output array. + content_index: + type: integer + description: The index of the reasoning content part within the output item. + delta: + type: object + description: The partial update to the reasoning content. + sequence_number: + type: integer + description: The sequence number of this event. + required: + - type + - item_id + - output_index + - content_index + - delta + - sequence_number + x-oaiMeta: + name: response.reasoning.delta + group: responses + example: | + { + "type": "response.reasoning.delta", + "item_id": "item-abc", + "output_index": 0, + "content_index": 0, + "delta": { + "text": "This is a test delta" + }, + "sequence_number": 1 + } + ResponseReasoningDoneEvent: + type: object + title: ResponseReasoningDoneEvent + description: | + Emitted when the reasoning content is finalized for an item. + properties: + type: + type: string + enum: + - response.reasoning.done + description: The type of the event. Always 'response.reasoning.done'. + x-stainless-const: true + item_id: + type: string + description: The unique identifier of the item for which reasoning is finalized. + output_index: + type: integer + description: The index of the output item in the response's output array. + content_index: + type: integer + description: The index of the reasoning content part within the output item. + text: + type: string + description: The finalized reasoning text. + sequence_number: + type: integer + description: The sequence number of this event. + required: + - type + - item_id + - output_index + - content_index + - text + - sequence_number + x-oaiMeta: + name: response.reasoning.done + group: responses + example: | + { + "type": "response.reasoning.done", + "item_id": "item-abc", + "output_index": 0, + "content_index": 0, + "text": "This is a test reasoning", + "sequence_number": 1 + } + ResponseReasoningSummaryDeltaEvent: + type: object + title: ResponseReasoningSummaryDeltaEvent + description: > + Emitted when there is a delta (partial update) to the reasoning summary + content. + properties: + type: + type: string + enum: + - response.reasoning_summary.delta + description: The type of the event. Always 'response.reasoning_summary.delta'. + x-stainless-const: true + item_id: + type: string + description: The unique identifier of the item for which the reasoning summary + is being updated. + output_index: + type: integer + description: The index of the output item in the response's output array. + summary_index: + type: integer + description: The index of the summary part within the output item. + sequence_number: + type: integer + description: The sequence number of this event. + delta: + type: object + description: The partial update to the reasoning summary content. + required: + - type + - item_id + - output_index + - summary_index + - delta + - sequence_number + x-oaiMeta: + name: response.reasoning_summary.delta + group: responses + example: | + { + "type": "response.reasoning_summary.delta", + "item_id": "item-abc", + "output_index": 0, + "summary_index": 0, + "delta": { + "text": "delta text" + }, + "sequence_number": 1 + } + ResponseReasoningSummaryDoneEvent: + type: object + title: ResponseReasoningSummaryDoneEvent + description: | + Emitted when the reasoning summary content is finalized for an item. + properties: + type: + type: string + enum: + - response.reasoning_summary.done + description: The type of the event. Always 'response.reasoning_summary.done'. + x-stainless-const: true + item_id: + type: string + description: The unique identifier of the item for which the reasoning summary + is finalized. + output_index: + type: integer + description: The index of the output item in the response's output array. + summary_index: + type: integer + description: The index of the summary part within the output item. + text: + type: string + description: The finalized reasoning summary text. + sequence_number: + type: integer + description: The sequence number of this event. + required: + - type + - item_id + - output_index + - summary_index + - text + - sequence_number + x-oaiMeta: + name: response.reasoning_summary.done + group: responses + example: | + { + "type": "response.reasoning_summary.done", + "item_id": "item-abc", + "output_index": 0, + "summary_index": 0, + "text": "This is a test reasoning summary", + "sequence_number": 1 + } ResponseReasoningSummaryPartAddedEvent: type: object description: Emitted when a new reasoning summary part is added. @@ -34288,6 +38701,10 @@ components: type: integer description: | The index of the summary part within the reasoning summary. + sequence_number: + type: integer + description: | + The sequence number of this event. part: type: object description: | @@ -34311,6 +38728,7 @@ components: - output_index - summary_index - part + - sequence_number x-oaiMeta: name: response.reasoning_summary_part.added group: responses @@ -34323,7 +38741,8 @@ components: "part": { "type": "summary_text", "text": "" - } + }, + "sequence_number": 1 } ResponseReasoningSummaryPartDoneEvent: type: object @@ -34348,6 +38767,10 @@ components: type: integer description: | The index of the summary part within the reasoning summary. + sequence_number: + type: integer + description: | + The sequence number of this event. part: type: object description: | @@ -34371,6 +38794,7 @@ components: - output_index - summary_index - part + - sequence_number x-oaiMeta: name: response.reasoning_summary_part.done group: responses @@ -34383,7 +38807,8 @@ components: "part": { "type": "summary_text", "text": "**Responding to a greeting**\n\nThe user just said, \"Hello!\" So, it seems I need to engage. I'll greet them back and offer help since they're looking to chat. I could say something like, \"Hello! How can I assist you today?\" That feels friendly and open. They didn't ask a specific question, so this approach will work well for starting a conversation. Let's see where it goes from there!" - } + }, + "sequence_number": 1 } ResponseReasoningSummaryTextDeltaEvent: type: object @@ -34414,22 +38839,28 @@ components: type: string description: | The text delta that was added to the summary. + sequence_number: + type: integer + description: | + The sequence number of this event. required: - type - item_id - output_index - summary_index - delta + - sequence_number x-oaiMeta: name: response.reasoning_summary_text.delta group: responses - example: | + example: > { "type": "response.reasoning_summary_text.delta", "item_id": "rs_6806bfca0b2481918a5748308061a2600d3ce51bdffd5476", "output_index": 0, "summary_index": 0, - "delta": "**Respond" + "delta": "**Responding to a greeting**\n\nThe user just said, \"Hello!\" So, it seems I need to engage. I'll greet them back and offer help since they're looking to chat. I could say something like, \"Hello! How can I assist you today?\" That feels friendly and open. They didn't ask a specific question, so this approach will work well for starting a conversation. Let's see where it goes from there!", + "sequence_number": 1 } ResponseReasoningSummaryTextDoneEvent: type: object @@ -34458,12 +38889,17 @@ components: type: string description: | The full text of the completed reasoning summary. + sequence_number: + type: integer + description: | + The sequence number of this event. required: - type - item_id - output_index - summary_index - text + - sequence_number x-oaiMeta: name: response.reasoning_summary_text.done group: responses @@ -34473,7 +38909,8 @@ components: "item_id": "rs_6806bfca0b2481918a5748308061a2600d3ce51bdffd5476", "output_index": 0, "summary_index": 0, - "text": "**Responding to a greeting**\n\nThe user just said, \"Hello!\" So, it seems I need to engage. I'll greet them back and offer help since they're looking to chat. I could say something like, \"Hello! How can I assist you today?\" That feels friendly and open. They didn't ask a specific question, so this approach will work well for starting a conversation. Let's see where it goes from there!" + "text": "**Responding to a greeting**\n\nThe user just said, \"Hello!\" So, it seems I need to engage. I'll greet them back and offer help since they're looking to chat. I could say something like, \"Hello! How can I assist you today?\" That feels friendly and open. They didn't ask a specific question, so this approach will work well for starting a conversation. Let's see where it goes from there!", + "sequence_number": 1 } ResponseRefusalDeltaEvent: type: object @@ -34502,12 +38939,17 @@ components: type: string description: | The refusal text that is added. + sequence_number: + type: integer + description: | + The sequence number of this event. required: - type - item_id - output_index - content_index - delta + - sequence_number x-oaiMeta: name: response.refusal.delta group: responses @@ -34517,7 +38959,8 @@ components: "item_id": "msg_123", "output_index": 0, "content_index": 0, - "delta": "refusal text so far" + "delta": "refusal text so far", + "sequence_number": 1 } ResponseRefusalDoneEvent: type: object @@ -34546,12 +38989,17 @@ components: type: string description: | The refusal text that is finalized. + sequence_number: + type: integer + description: | + The sequence number of this event. required: - type - item_id - output_index - content_index - refusal + - sequence_number x-oaiMeta: name: response.refusal.done group: responses @@ -34561,7 +39009,8 @@ components: "item_id": "item-abc", "output_index": 1, "content_index": 2, - "refusal": "final refusal text" + "refusal": "final refusal text", + "sequence_number": 1 } ResponseStreamEvent: anyOf: @@ -34595,68 +39044,31 @@ components: - $ref: "#/components/schemas/ResponseReasoningSummaryTextDoneEvent" - $ref: "#/components/schemas/ResponseRefusalDeltaEvent" - $ref: "#/components/schemas/ResponseRefusalDoneEvent" - - $ref: "#/components/schemas/ResponseTextAnnotationDeltaEvent" - $ref: "#/components/schemas/ResponseTextDeltaEvent" - $ref: "#/components/schemas/ResponseTextDoneEvent" - $ref: "#/components/schemas/ResponseWebSearchCallCompletedEvent" - $ref: "#/components/schemas/ResponseWebSearchCallInProgressEvent" - $ref: "#/components/schemas/ResponseWebSearchCallSearchingEvent" + - $ref: "#/components/schemas/ResponseImageGenCallCompletedEvent" + - $ref: "#/components/schemas/ResponseImageGenCallGeneratingEvent" + - $ref: "#/components/schemas/ResponseImageGenCallInProgressEvent" + - $ref: "#/components/schemas/ResponseImageGenCallPartialImageEvent" + - $ref: "#/components/schemas/ResponseMCPCallArgumentsDeltaEvent" + - $ref: "#/components/schemas/ResponseMCPCallArgumentsDoneEvent" + - $ref: "#/components/schemas/ResponseMCPCallCompletedEvent" + - $ref: "#/components/schemas/ResponseMCPCallFailedEvent" + - $ref: "#/components/schemas/ResponseMCPCallInProgressEvent" + - $ref: "#/components/schemas/ResponseMCPListToolsCompletedEvent" + - $ref: "#/components/schemas/ResponseMCPListToolsFailedEvent" + - $ref: "#/components/schemas/ResponseMCPListToolsInProgressEvent" + - $ref: "#/components/schemas/ResponseOutputTextAnnotationAddedEvent" + - $ref: "#/components/schemas/ResponseQueuedEvent" + - $ref: "#/components/schemas/ResponseReasoningDeltaEvent" + - $ref: "#/components/schemas/ResponseReasoningDoneEvent" + - $ref: "#/components/schemas/ResponseReasoningSummaryDeltaEvent" + - $ref: "#/components/schemas/ResponseReasoningSummaryDoneEvent" discriminator: propertyName: type - ResponseTextAnnotationDeltaEvent: - type: object - description: Emitted when a text annotation is added. - properties: - type: - type: string - description: > - The type of the event. Always - `response.output_text.annotation.added`. - enum: - - response.output_text.annotation.added - x-stainless-const: true - item_id: - type: string - description: | - The ID of the output item that the text annotation was added to. - output_index: - type: integer - description: | - The index of the output item that the text annotation was added to. - content_index: - type: integer - description: | - The index of the content part that the text annotation was added to. - annotation_index: - type: integer - description: | - The index of the annotation that was added. - annotation: - $ref: "#/components/schemas/Annotation" - required: - - type - - item_id - - output_index - - content_index - - annotation_index - - annotation - x-oaiMeta: - name: response.output_text.annotation.added - group: responses - example: | - { - "type": "response.output_text.annotation.added", - "item_id": "msg_abc123", - "output_index": 1, - "content_index": 0, - "annotation_index": 0, - "annotation": { - "type": "file_citation", - "index": 390, - "file_id": "file-4wDz5b167pAf72nx1h9eiN", - "filename": "dragons.pdf" - } - } ResponseTextDeltaEvent: type: object description: Emitted when there is an additional text delta. @@ -34684,12 +39096,16 @@ components: type: string description: | The text delta that was added. + sequence_number: + type: integer + description: The sequence number for this event. required: - type - item_id - output_index - content_index - delta + - sequence_number x-oaiMeta: name: response.output_text.delta group: responses @@ -34699,7 +39115,8 @@ components: "item_id": "msg_123", "output_index": 0, "content_index": 0, - "delta": "In" + "delta": "In", + "sequence_number": 1 } ResponseTextDoneEvent: type: object @@ -34728,12 +39145,16 @@ components: type: string description: | The text content that is finalized. + sequence_number: + type: integer + description: The sequence number for this event. required: - type - item_id - output_index - content_index - text + - sequence_number x-oaiMeta: name: response.output_text.done group: responses @@ -34743,7 +39164,8 @@ components: "item_id": "msg_123", "output_index": 0, "content_index": 0, - "text": "In a shimmering forest under a sky full of stars, a lonely unicorn named Lila discovered a hidden pond that glowed with moonlight. Every night, she would leave sparkling, magical flowers by the water's edge, hoping to share her beauty with others. One enchanting evening, she woke to find a group of friendly animals gathered around, eager to be friends and share in her magic." + "text": "In a shimmering forest under a sky full of stars, a lonely unicorn named Lila discovered a hidden pond that glowed with moonlight. Every night, she would leave sparkling, magical flowers by the water's edge, hoping to share her beauty with others. One enchanting evening, she woke to find a group of friendly animals gathered around, eager to be friends and share in her magic.", + "sequence_number": 1 } ResponseUsage: type: object @@ -34806,10 +39228,14 @@ components: type: string description: | Unique ID for the output item associated with the web search call. + sequence_number: + type: integer + description: The sequence number of the web search call being processed. required: - type - output_index - item_id + - sequence_number x-oaiMeta: name: response.web_search_call.completed group: responses @@ -34818,6 +39244,7 @@ components: "type": "response.web_search_call.completed", "output_index": 0, "item_id": "ws_123", + "sequence_number": 0 } ResponseWebSearchCallInProgressEvent: type: object @@ -34839,10 +39266,14 @@ components: type: string description: | Unique ID for the output item associated with the web search call. + sequence_number: + type: integer + description: The sequence number of the web search call being processed. required: - type - output_index - item_id + - sequence_number x-oaiMeta: name: response.web_search_call.in_progress group: responses @@ -34851,6 +39282,7 @@ components: "type": "response.web_search_call.in_progress", "output_index": 0, "item_id": "ws_123", + "sequence_number": 0 } ResponseWebSearchCallSearchingEvent: type: object @@ -34872,10 +39304,14 @@ components: type: string description: | Unique ID for the output item associated with the web search call. + sequence_number: + type: integer + description: The sequence number of the web search call being processed. required: - type - output_index - item_id + - sequence_number x-oaiMeta: name: response.web_search_call.searching group: responses @@ -34884,6 +39320,7 @@ components: "type": "response.web_search_call.searching", "output_index": 0, "item_id": "ws_123", + "sequence_number": 0 } RunCompletionUsage: type: object @@ -34904,6 +39341,135 @@ components: - completion_tokens - total_tokens nullable: true + RunGraderRequest: + type: object + title: RunGraderRequest + properties: + grader: + type: object + description: The grader used for the fine-tuning job. + oneOf: + - $ref: "#/components/schemas/GraderStringCheck" + - $ref: "#/components/schemas/GraderTextSimilarity" + - $ref: "#/components/schemas/GraderPython" + - $ref: "#/components/schemas/GraderScoreModel" + - $ref: "#/components/schemas/GraderMulti" + item: + type: object + description: > + The dataset item provided to the grader. This will be used to + populate + + the `item` namespace. See [the guide](/docs/guides/graders) for more + details. + model_sample: + type: string + description: >+ + The model sample to be evaluated. This value will be used to + populate + + the `sample` namespace. See [the guide](/docs/guides/graders) for + more details. + + The `output_json` variable will be populated if the model sample is + a + + valid JSON string. + + required: + - grader + - model_sample + RunGraderResponse: + type: object + properties: + reward: + type: number + metadata: + type: object + properties: + name: + type: string + type: + type: string + errors: + type: object + properties: + formula_parse_error: + type: boolean + sample_parse_error: + type: boolean + truncated_observation_error: + type: boolean + unresponsive_reward_error: + type: boolean + invalid_variable_error: + type: boolean + other_error: + type: boolean + python_grader_server_error: + type: boolean + python_grader_server_error_type: + type: string + nullable: true + python_grader_runtime_error: + type: boolean + python_grader_runtime_error_details: + type: string + nullable: true + model_grader_server_error: + type: boolean + model_grader_refusal_error: + type: boolean + model_grader_parse_error: + type: boolean + model_grader_server_error_details: + type: string + nullable: true + required: + - formula_parse_error + - sample_parse_error + - truncated_observation_error + - unresponsive_reward_error + - invalid_variable_error + - other_error + - python_grader_server_error + - python_grader_server_error_type + - python_grader_runtime_error + - python_grader_runtime_error_details + - model_grader_server_error + - model_grader_refusal_error + - model_grader_parse_error + - model_grader_server_error_details + execution_time: + type: number + scores: + type: object + additionalProperties: {} + token_usage: + type: integer + nullable: true + sampled_model_name: + type: string + nullable: true + required: + - name + - type + - errors + - execution_time + - scores + - token_usage + - sampled_model_name + sub_rewards: + type: object + additionalProperties: {} + model_grader_token_usage_per_model: + type: object + additionalProperties: {} + required: + - reward + - metadata + - sub_rewards + - model_grader_token_usage_per_model RunObject: type: object title: A run on a thread @@ -36155,23 +40721,92 @@ components: ServiceTier: type: string description: > - Specifies the latency tier to use for processing the request. This - parameter is relevant for customers subscribed to the scale tier - service: - - If set to 'auto', and the Project is Scale tier enabled, the system - will utilize scale tier credits until they are exhausted. - - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - - If set to 'flex', the request will be processed with the Flex Processing service tier. [Learn more](/docs/guides/flex-processing). + Specifies the processing type used for serving the request. + - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. + - If set to 'default', then the requset will be processed with the standard pricing and performance for the selected model. + - If set to '[flex](/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service tier. [Contact sales](https://openai.com/contact-sales) to learn more about Priority processing. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` utilized. + When the `service_tier` parameter is set, the response body will include the `service_tier` value based on the processing mode actually used to serve the request. This response value may be different from the value set in the parameter. enum: - auto - default - flex + - scale + - priority nullable: true default: auto + SpeechAudioDeltaEvent: + type: object + description: Emitted for each chunk of audio data generated during speech synthesis. + properties: + type: + type: string + description: | + The type of the event. Always `speech.audio.delta`. + enum: + - speech.audio.delta + x-stainless-const: true + audio: + type: string + description: | + A chunk of Base64-encoded audio data. + required: + - type + - audio + x-oaiMeta: + name: Stream Event (speech.audio.delta) + group: speech + example: | + { + "type": "speech.audio.delta", + "audio": "base64-encoded-audio-data" + } + SpeechAudioDoneEvent: + type: object + description: Emitted when the speech synthesis is complete and all audio has + been streamed. + properties: + type: + type: string + description: | + The type of the event. Always `speech.audio.done`. + enum: + - speech.audio.done + x-stainless-const: true + usage: + type: object + description: | + Token usage statistics for the request. + properties: + input_tokens: + type: integer + description: Number of input tokens in the prompt. + output_tokens: + type: integer + description: Number of output tokens generated. + total_tokens: + type: integer + description: Total number of tokens used (input + output). + required: + - input_tokens + - output_tokens + - total_tokens + required: + - type + - usage + x-oaiMeta: + name: Stream Event (speech.audio.done) + group: speech + example: | + { + "type": "speech.audio.done", + "usage": { + "input_tokens": 14, + "output_tokens": 101, + "total_tokens": 115 + } + } StaticChunkingStrategy: type: object additionalProperties: false @@ -36318,1731 +40953,2801 @@ components: x-stainless-const: true description: type: string - description: > - A description of what the response format is for, used by the model - to - - determine how to respond in the format. - name: + description: > + A description of what the response format is for, used by the model + to + + determine how to respond in the format. + name: + type: string + description: | + The name of the response format. Must be a-z, A-Z, 0-9, or contain + underscores and dashes, with a maximum length of 64. + schema: + $ref: "#/components/schemas/ResponseFormatJsonSchemaSchema" + strict: + type: boolean + nullable: true + default: false + description: > + Whether to enable strict schema adherence when generating the + output. + + If set to true, the model will always follow the exact schema + defined + + in the `schema` field. Only a subset of JSON Schema is supported + when + + `strict` is `true`. To learn more, read the [Structured Outputs + + guide](/docs/guides/structured-outputs). + required: + - type + - schema + - name + ThreadObject: + type: object + title: Thread + description: Represents a thread that contains + [messages](/docs/api-reference/messages). + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `thread`. + type: string + enum: + - thread + x-stainless-const: true + created_at: + description: The Unix timestamp (in seconds) for when the thread was created. + type: integer + tool_resources: + type: object + description: > + A set of resources that are made available to the assistant's tools + in this thread. The resources are specific to the type of tool. For + example, the `code_interpreter` tool requires a list of file IDs, + while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: > + A list of [file](/docs/api-reference/files) IDs made + available to the `code_interpreter` tool. There can be a + maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: > + The [vector store](/docs/api-reference/vector-stores/object) + attached to this thread. There can be a maximum of 1 vector + store attached to the thread. + maxItems: 1 + items: + type: string + nullable: true + metadata: + $ref: "#/components/schemas/Metadata" + required: + - id + - object + - created_at + - tool_resources + - metadata + x-oaiMeta: + name: The thread object + beta: true + example: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1698107661, + "metadata": {} + } + ThreadStreamEvent: + oneOf: + - type: object + properties: + enabled: + type: boolean + description: Whether to enable input audio transcription. + event: + type: string + enum: + - thread.created + x-stainless-const: true + data: + $ref: "#/components/schemas/ThreadObject" + required: + - event + - data + description: Occurs when a new [thread](/docs/api-reference/threads/object) is + created. + x-oaiMeta: + dataDescription: "`data` is a [thread](/docs/api-reference/threads/object)" + ToggleCertificatesRequest: + type: object + properties: + certificate_ids: + type: array + items: + type: string + example: cert_abc + minItems: 1 + maxItems: 10 + required: + - certificate_ids + Tool: + description: | + A tool that can be used to generate a response. + discriminator: + propertyName: type + oneOf: + - $ref: "#/components/schemas/FunctionTool" + - $ref: "#/components/schemas/FileSearchTool" + - $ref: "#/components/schemas/WebSearchPreviewTool" + - $ref: "#/components/schemas/ComputerUsePreviewTool" + - $ref: "#/components/schemas/MCPTool" + - $ref: "#/components/schemas/CodeInterpreterTool" + - $ref: "#/components/schemas/ImageGenTool" + - $ref: "#/components/schemas/LocalShellTool" + ToolChoiceFunction: + type: object + title: Function tool + description: | + Use this option to force the model to call a specific function. + properties: + type: + type: string + enum: + - function + description: For function calling, the type is always `function`. + x-stainless-const: true + name: + type: string + description: The name of the function to call. + required: + - type + - name + ToolChoiceMCP: + type: object + title: MCP tool + description: > + Use this option to force the model to call a specific tool on a remote + MCP server. + properties: + type: + type: string + enum: + - mcp + description: For MCP tools, the type is always `mcp`. + x-stainless-const: true + server_label: + type: string + description: | + The label of the MCP server to use. + name: + type: string + description: | + The name of the tool to call on the server. + nullable: true + required: + - type + - server_label + ToolChoiceOptions: + type: string + title: Tool choice mode + description: > + Controls which (if any) tool is called by the model. + + + `none` means the model will not call any tool and instead generates a + message. + + + `auto` means the model can pick between generating a message or calling + one or + + more tools. + + + `required` means the model must call one or more tools. + enum: + - none + - auto + - required + ToolChoiceTypes: + type: object + title: Hosted tool + description: > + Indicates that the model should use a built-in tool to generate a + response. + + [Learn more about built-in tools](/docs/guides/tools). + properties: + type: + type: string + description: | + The type of hosted tool the model should to use. Learn more about + [built-in tools](/docs/guides/tools). + + Allowed values are: + - `file_search` + - `web_search_preview` + - `computer_use_preview` + - `code_interpreter` + - `image_generation` + enum: + - file_search + - web_search_preview + - computer_use_preview + - web_search_preview_2025_03_11 + - image_generation + - code_interpreter + required: + - type + TranscriptTextDeltaEvent: + type: object + description: Emitted when there is an additional text delta. This is also the + first event emitted when the transcription starts. Only emitted when you + [create a transcription](/docs/api-reference/audio/create-transcription) + with the `Stream` parameter set to `true`. + properties: + type: + type: string + description: | + The type of the event. Always `transcript.text.delta`. + enum: + - transcript.text.delta + x-stainless-const: true + delta: + type: string + description: | + The text delta that was additionally transcribed. + logprobs: + type: array + description: > + The log probabilities of the delta. Only included if you [create a + transcription](/docs/api-reference/audio/create-transcription) with + the `include[]` parameter set to `logprobs`. + items: + type: object + properties: + token: + type: string + description: | + The token that was used to generate the log probability. + logprob: + type: number + description: | + The log probability of the token. + bytes: + type: array + items: + type: integer + description: | + The bytes that were used to generate the log probability. + required: + - type + - delta + x-oaiMeta: + name: Stream Event (transcript.text.delta) + group: transcript + example: | + { + "type": "transcript.text.delta", + "delta": " wonderful" + } + TranscriptTextDoneEvent: + type: object + description: Emitted when the transcription is complete. Contains the complete + transcription text. Only emitted when you [create a + transcription](/docs/api-reference/audio/create-transcription) with the + `Stream` parameter set to `true`. + properties: + type: + type: string + description: | + The type of the event. Always `transcript.text.done`. + enum: + - transcript.text.done + x-stainless-const: true + text: + type: string + description: | + The text that was transcribed. + logprobs: + type: array + description: > + The log probabilities of the individual tokens in the transcription. + Only included if you [create a + transcription](/docs/api-reference/audio/create-transcription) with + the `include[]` parameter set to `logprobs`. + items: + type: object + properties: + token: + type: string + description: | + The token that was used to generate the log probability. + logprob: + type: number + description: | + The log probability of the token. + bytes: + type: array + items: + type: integer + description: | + The bytes that were used to generate the log probability. + usage: + $ref: "#/components/schemas/TranscriptTextUsageTokens" + required: + - type + - text + x-oaiMeta: + name: Stream Event (transcript.text.done) + group: transcript + example: > + { + "type": "transcript.text.done", + "text": "I see skies of blue and clouds of white, the bright blessed days, the dark sacred nights, and I think to myself, what a wonderful world.", + "usage": { + "type": "tokens", + "input_tokens": 14, + "input_token_details": { + "text_tokens": 10, + "audio_tokens": 4 + }, + "output_tokens": 31, + "total_tokens": 45 + } + } + TranscriptTextUsageDuration: + type: object + title: Duration Usage + description: Usage statistics for models billed by audio input duration. + properties: + type: + type: string + enum: + - duration + description: The type of the usage object. Always `duration` for this variant. + x-stainless-const: true + duration: + type: number + description: Duration of the input audio in seconds. + required: + - type + - duration + TranscriptTextUsageTokens: + type: object + title: Token Usage + description: Usage statistics for models billed by token usage. + properties: + type: + type: string + enum: + - tokens + description: The type of the usage object. Always `tokens` for this variant. + x-stainless-const: true + input_tokens: + type: integer + description: Number of input tokens billed for this request. + input_token_details: + type: object + description: Details about the input tokens billed for this request. + properties: + text_tokens: + type: integer + description: Number of text tokens billed for this request. + audio_tokens: + type: integer + description: Number of audio tokens billed for this request. + output_tokens: + type: integer + description: Number of output tokens generated. + total_tokens: + type: integer + description: Total number of tokens used (input + output). + required: + - type + - input_tokens + - output_tokens + - total_tokens + TranscriptionChunkingStrategy: + type: object + description: >- + Controls how the audio is cut into chunks. When set to `"auto"`, the + + server first normalizes loudness and then uses voice activity detection + (VAD) to + + choose boundaries. `server_vad` object can be provided to tweak VAD + detection + + parameters manually. If unset, the audio is transcribed as a single + block. + oneOf: + - type: string + enum: + - auto + default: + - auto + description: > + Automatically set chunking parameters based on the audio. Must be + set to `"auto"`. + x-stainless-const: true + - $ref: "#/components/schemas/VadConfig" + TranscriptionInclude: + type: string + enum: + - logprobs + default: [] + TranscriptionSegment: + type: object + properties: + id: + type: integer + description: Unique identifier of the segment. + seek: + type: integer + description: Seek offset of the segment. + start: + type: number + format: float + description: Start time of the segment in seconds. + end: + type: number + format: float + description: End time of the segment in seconds. + text: + type: string + description: Text content of the segment. + tokens: + type: array + items: + type: integer + description: Array of token IDs for the text content. + temperature: + type: number + format: float + description: Temperature parameter used for generating the segment. + avg_logprob: + type: number + format: float + description: Average logprob of the segment. If the value is lower than -1, + consider the logprobs failed. + compression_ratio: + type: number + format: float + description: Compression ratio of the segment. If the value is greater than 2.4, + consider the compression failed. + no_speech_prob: + type: number + format: float + description: Probability of no speech in the segment. If the value is higher + than 1.0 and the `avg_logprob` is below -1, consider this segment + silent. + required: + - id + - seek + - start + - end + - text + - tokens + - temperature + - avg_logprob + - compression_ratio + - no_speech_prob + TranscriptionWord: + type: object + properties: + word: + type: string + description: The text content of the word. + start: + type: number + format: float + description: Start time of the word in seconds. + end: + type: number + format: float + description: End time of the word in seconds. + required: + - word + - start + - end + TruncationObject: + type: object + title: Thread Truncation Controls + description: Controls for how a thread will be truncated prior to the run. Use + this to control the intial context window of the run. + properties: + type: + type: string + description: The truncation strategy to use for the thread. The default is + `auto`. If set to `last_messages`, the thread will be truncated to + the n most recent messages in the thread. When set to `auto`, + messages in the middle of the thread will be dropped to fit the + context length of the model, `max_prompt_tokens`. + enum: + - auto + - last_messages + last_messages: + type: integer + description: The number of most recent messages from the thread when + constructing the context for the run. + minimum: 1 + nullable: true + required: + - type + Type: + type: object + title: Type + description: | + An action to type in text. + properties: + type: + type: string + enum: + - type + default: type + description: | + Specifies the event type. For a type action, this property is + always set to `type`. + x-stainless-const: true + text: + type: string + description: | + The text to type. + required: + - type + - text + UpdateVectorStoreFileAttributesRequest: + type: object + additionalProperties: false + properties: + attributes: + $ref: "#/components/schemas/VectorStoreFileAttributes" + required: + - attributes + x-oaiMeta: + name: Update vector store file attributes request + UpdateVectorStoreRequest: + type: object + additionalProperties: false + properties: + name: + description: The name of the vector store. + type: string + nullable: true + expires_after: + allOf: + - $ref: "#/components/schemas/VectorStoreExpirationAfter" + - nullable: true + metadata: + $ref: "#/components/schemas/Metadata" + Upload: + type: object + title: Upload + description: | + The Upload object can accept byte chunks in the form of Parts. + properties: + id: + type: string + description: The Upload unique identifier, which can be referenced in API + endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the Upload was created. + filename: + type: string + description: The name of the file to be uploaded. + bytes: + type: integer + description: The intended number of bytes to be uploaded. + purpose: + type: string + description: The intended purpose of the file. [Please refer + here](/docs/api-reference/files/object#files/object-purpose) for + acceptable values. + status: + type: string + description: The status of the Upload. + enum: + - pending + - completed + - cancelled + - expired + expires_at: + type: integer + description: The Unix timestamp (in seconds) for when the Upload will expire. + object: + type: string + description: The object type, which is always "upload". + enum: + - upload + x-stainless-const: true + file: + allOf: + - $ref: "#/components/schemas/OpenAIFile" + - nullable: true + description: The ready File object after the Upload is completed. + required: + - bytes + - created_at + - expires_at + - filename + - id + - purpose + - status + x-oaiMeta: + name: The upload object + example: | + { + "id": "upload_abc123", + "object": "upload", + "bytes": 2147483648, + "created_at": 1719184911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + "status": "completed", + "expires_at": 1719127296, + "file": { + "id": "file-xyz321", + "object": "file", + "bytes": 2147483648, + "created_at": 1719186911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + } + } + UploadCertificateRequest: + type: object + properties: + name: + type: string + description: An optional name for the certificate + content: + type: string + description: The certificate content in PEM format + required: + - content + UploadPart: + type: object + title: UploadPart + description: > + The upload Part represents a chunk of bytes we can add to an Upload + object. + properties: + id: + type: string + description: The upload Part unique identifier, which can be referenced in API + endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the Part was created. + upload_id: + type: string + description: The ID of the Upload object that this Part was added to. + object: + type: string + description: The object type, which is always `upload.part`. + enum: + - upload.part + x-stainless-const: true + required: + - created_at + - id + - object + - upload_id + x-oaiMeta: + name: The upload part object + example: | + { + "id": "part_def456", + "object": "upload.part", + "created_at": 1719186911, + "upload_id": "upload_abc123" + } + UsageAudioSpeechesResult: + type: object + description: The aggregated audio speeches usage details of the specific time bucket. + properties: + object: + type: string + enum: + - organization.usage.audio_speeches.result + x-stainless-const: true + characters: + type: integer + description: The number of characters processed. + num_model_requests: + type: integer + description: The count of requests made to the model. + project_id: + type: string + nullable: true + description: When `group_by=project_id`, this field provides the project ID of + the grouped usage result. + user_id: + type: string + nullable: true + description: When `group_by=user_id`, this field provides the user ID of the + grouped usage result. + api_key_id: + type: string + nullable: true + description: When `group_by=api_key_id`, this field provides the API key ID of + the grouped usage result. + model: + type: string + nullable: true + description: When `group_by=model`, this field provides the model name of the + grouped usage result. + required: + - object + - characters + - num_model_requests + x-oaiMeta: + name: Audio speeches usage object + example: | + { + "object": "organization.usage.audio_speeches.result", + "characters": 45, + "num_model_requests": 1, + "project_id": "proj_abc", + "user_id": "user-abc", + "api_key_id": "key_abc", + "model": "tts-1" + } + UsageAudioTranscriptionsResult: + type: object + description: The aggregated audio transcriptions usage details of the specific + time bucket. + properties: + object: + type: string + enum: + - organization.usage.audio_transcriptions.result + x-stainless-const: true + seconds: + type: integer + description: The number of seconds processed. + num_model_requests: + type: integer + description: The count of requests made to the model. + project_id: + type: string + nullable: true + description: When `group_by=project_id`, this field provides the project ID of + the grouped usage result. + user_id: + type: string + nullable: true + description: When `group_by=user_id`, this field provides the user ID of the + grouped usage result. + api_key_id: + type: string + nullable: true + description: When `group_by=api_key_id`, this field provides the API key ID of + the grouped usage result. + model: + type: string + nullable: true + description: When `group_by=model`, this field provides the model name of the + grouped usage result. + required: + - object + - seconds + - num_model_requests + x-oaiMeta: + name: Audio transcriptions usage object + example: | + { + "object": "organization.usage.audio_transcriptions.result", + "seconds": 10, + "num_model_requests": 1, + "project_id": "proj_abc", + "user_id": "user-abc", + "api_key_id": "key_abc", + "model": "tts-1" + } + UsageCodeInterpreterSessionsResult: + type: object + description: The aggregated code interpreter sessions usage details of the + specific time bucket. + properties: + object: + type: string + enum: + - organization.usage.code_interpreter_sessions.result + x-stainless-const: true + num_sessions: + type: integer + description: The number of code interpreter sessions. + project_id: + type: string + nullable: true + description: When `group_by=project_id`, this field provides the project ID of + the grouped usage result. + required: + - object + - sessions + x-oaiMeta: + name: Code interpreter sessions usage object + example: | + { + "object": "organization.usage.code_interpreter_sessions.result", + "num_sessions": 1, + "project_id": "proj_abc" + } + UsageCompletionsResult: + type: object + description: The aggregated completions usage details of the specific time bucket. + properties: + object: + type: string + enum: + - organization.usage.completions.result + x-stainless-const: true + input_tokens: + type: integer + description: The aggregated number of text input tokens used, including cached + tokens. For customers subscribe to scale tier, this includes scale + tier tokens. + input_cached_tokens: + type: integer + description: The aggregated number of text input tokens that has been cached + from previous requests. For customers subscribe to scale tier, this + includes scale tier tokens. + output_tokens: + type: integer + description: The aggregated number of text output tokens used. For customers + subscribe to scale tier, this includes scale tier tokens. + input_audio_tokens: + type: integer + description: The aggregated number of audio input tokens used, including cached + tokens. + output_audio_tokens: + type: integer + description: The aggregated number of audio output tokens used. + num_model_requests: + type: integer + description: The count of requests made to the model. + project_id: + type: string + nullable: true + description: When `group_by=project_id`, this field provides the project ID of + the grouped usage result. + user_id: + type: string + nullable: true + description: When `group_by=user_id`, this field provides the user ID of the + grouped usage result. + api_key_id: + type: string + nullable: true + description: When `group_by=api_key_id`, this field provides the API key ID of + the grouped usage result. + model: + type: string + nullable: true + description: When `group_by=model`, this field provides the model name of the + grouped usage result. + batch: + type: boolean + nullable: true + description: When `group_by=batch`, this field tells whether the grouped usage + result is batch or not. + required: + - object + - input_tokens + - output_tokens + - num_model_requests + x-oaiMeta: + name: Completions usage object + example: | + { + "object": "organization.usage.completions.result", + "input_tokens": 5000, + "output_tokens": 1000, + "input_cached_tokens": 4000, + "input_audio_tokens": 300, + "output_audio_tokens": 200, + "num_model_requests": 5, + "project_id": "proj_abc", + "user_id": "user-abc", + "api_key_id": "key_abc", + "model": "gpt-4o-mini-2024-07-18", + "batch": false + } + UsageEmbeddingsResult: + type: object + description: The aggregated embeddings usage details of the specific time bucket. + properties: + object: + type: string + enum: + - organization.usage.embeddings.result + x-stainless-const: true + input_tokens: + type: integer + description: The aggregated number of input tokens used. + num_model_requests: + type: integer + description: The count of requests made to the model. + project_id: + type: string + nullable: true + description: When `group_by=project_id`, this field provides the project ID of + the grouped usage result. + user_id: + type: string + nullable: true + description: When `group_by=user_id`, this field provides the user ID of the + grouped usage result. + api_key_id: + type: string + nullable: true + description: When `group_by=api_key_id`, this field provides the API key ID of + the grouped usage result. + model: + type: string + nullable: true + description: When `group_by=model`, this field provides the model name of the + grouped usage result. + required: + - object + - input_tokens + - num_model_requests + x-oaiMeta: + name: Embeddings usage object + example: | + { + "object": "organization.usage.embeddings.result", + "input_tokens": 20, + "num_model_requests": 2, + "project_id": "proj_abc", + "user_id": "user-abc", + "api_key_id": "key_abc", + "model": "text-embedding-ada-002-v2" + } + UsageImagesResult: + type: object + description: The aggregated images usage details of the specific time bucket. + properties: + object: + type: string + enum: + - organization.usage.images.result + x-stainless-const: true + images: + type: integer + description: The number of images processed. + num_model_requests: + type: integer + description: The count of requests made to the model. + source: + type: string + nullable: true + description: When `group_by=source`, this field provides the source of the + grouped usage result, possible values are `image.generation`, + `image.edit`, `image.variation`. + size: + type: string + nullable: true + description: When `group_by=size`, this field provides the image size of the + grouped usage result. + project_id: + type: string + nullable: true + description: When `group_by=project_id`, this field provides the project ID of + the grouped usage result. + user_id: + type: string + nullable: true + description: When `group_by=user_id`, this field provides the user ID of the + grouped usage result. + api_key_id: type: string - description: | - The name of the response format. Must be a-z, A-Z, 0-9, or contain - underscores and dashes, with a maximum length of 64. - schema: - $ref: "#/components/schemas/ResponseFormatJsonSchemaSchema" - strict: - type: boolean nullable: true - default: false - description: > - Whether to enable strict schema adherence when generating the - output. - - If set to true, the model will always follow the exact schema - defined - - in the `schema` field. Only a subset of JSON Schema is supported - when - - `strict` is `true`. To learn more, read the [Structured Outputs - - guide](/docs/guides/structured-outputs). + description: When `group_by=api_key_id`, this field provides the API key ID of + the grouped usage result. + model: + type: string + nullable: true + description: When `group_by=model`, this field provides the model name of the + grouped usage result. required: - - type - - schema - - name - ThreadObject: + - object + - images + - num_model_requests + x-oaiMeta: + name: Images usage object + example: | + { + "object": "organization.usage.images.result", + "images": 2, + "num_model_requests": 2, + "size": "1024x1024", + "source": "image.generation", + "project_id": "proj_abc", + "user_id": "user-abc", + "api_key_id": "key_abc", + "model": "dall-e-3" + } + UsageModerationsResult: type: object - title: Thread - description: Represents a thread that contains - [messages](/docs/api-reference/messages). + description: The aggregated moderations usage details of the specific time bucket. properties: - id: - description: The identifier, which can be referenced in API endpoints. - type: string object: - description: The object type, which is always `thread`. type: string enum: - - thread + - organization.usage.moderations.result x-stainless-const: true - created_at: - description: The Unix timestamp (in seconds) for when the thread was created. + input_tokens: type: integer - tool_resources: - type: object - description: > - A set of resources that are made available to the assistant's tools - in this thread. The resources are specific to the type of tool. For - example, the `code_interpreter` tool requires a list of file IDs, - while the `file_search` tool requires a list of vector store IDs. - properties: - code_interpreter: - type: object - properties: - file_ids: - type: array - description: > - A list of [file](/docs/api-reference/files) IDs made - available to the `code_interpreter` tool. There can be a - maximum of 20 files associated with the tool. - default: [] - maxItems: 20 - items: - type: string - file_search: - type: object - properties: - vector_store_ids: - type: array - description: > - The [vector store](/docs/api-reference/vector-stores/object) - attached to this thread. There can be a maximum of 1 vector - store attached to the thread. - maxItems: 1 - items: - type: string + description: The aggregated number of input tokens used. + num_model_requests: + type: integer + description: The count of requests made to the model. + project_id: + type: string nullable: true - metadata: - $ref: "#/components/schemas/Metadata" + description: When `group_by=project_id`, this field provides the project ID of + the grouped usage result. + user_id: + type: string + nullable: true + description: When `group_by=user_id`, this field provides the user ID of the + grouped usage result. + api_key_id: + type: string + nullable: true + description: When `group_by=api_key_id`, this field provides the API key ID of + the grouped usage result. + model: + type: string + nullable: true + description: When `group_by=model`, this field provides the model name of the + grouped usage result. required: - - id - object - - created_at - - tool_resources - - metadata + - input_tokens + - num_model_requests x-oaiMeta: - name: The thread object - beta: true + name: Moderations usage object example: | { - "id": "thread_abc123", - "object": "thread", - "created_at": 1698107661, - "metadata": {} + "object": "organization.usage.moderations.result", + "input_tokens": 20, + "num_model_requests": 2, + "project_id": "proj_abc", + "user_id": "user-abc", + "api_key_id": "key_abc", + "model": "text-moderation" } - ThreadStreamEvent: - oneOf: - - type: object - properties: - enabled: - type: boolean - description: Whether to enable input audio transcription. - event: - type: string - enum: - - thread.created - x-stainless-const: true - data: - $ref: "#/components/schemas/ThreadObject" - required: - - event - - data - description: Occurs when a new [thread](/docs/api-reference/threads/object) is - created. - x-oaiMeta: - dataDescription: "`data` is a [thread](/docs/api-reference/threads/object)" - ToggleCertificatesRequest: - type: object - properties: - certificate_ids: - type: array - items: - type: string - example: cert_abc - minItems: 1 - maxItems: 10 - required: - - certificate_ids - ToolChoiceFunction: + UsageResponse: type: object - title: Function tool - description: | - Use this option to force the model to call a specific function. properties: - type: + object: type: string enum: - - function - description: For function calling, the type is always `function`. + - page x-stainless-const: true - name: + data: + type: array + items: + $ref: "#/components/schemas/UsageTimeBucket" + has_more: + type: boolean + next_page: type: string - description: The name of the function to call. required: - - type - - name - ToolChoiceOptions: - type: string - title: Tool choice mode - description: > - Controls which (if any) tool is called by the model. - - - `none` means the model will not call any tool and instead generates a - message. - - - `auto` means the model can pick between generating a message or calling - one or - - more tools. - - - `required` means the model must call one or more tools. - enum: - - none - - auto - - required - ToolChoiceTypes: + - object + - data + - has_more + - next_page + UsageTimeBucket: type: object - title: Hosted tool - description: > - Indicates that the model should use a built-in tool to generate a - response. - - [Learn more about built-in tools](/docs/guides/tools). properties: - type: + object: type: string - description: | - The type of hosted tool the model should to use. Learn more about - [built-in tools](/docs/guides/tools). - - Allowed values are: - - `file_search` - - `web_search_preview` - - `computer_use_preview` enum: - - file_search - - web_search_preview - - computer_use_preview - - web_search_preview_2025_03_11 + - bucket + x-stainless-const: true + start_time: + type: integer + end_time: + type: integer + result: + type: array + items: + oneOf: + - $ref: "#/components/schemas/UsageCompletionsResult" + - $ref: "#/components/schemas/UsageEmbeddingsResult" + - $ref: "#/components/schemas/UsageModerationsResult" + - $ref: "#/components/schemas/UsageImagesResult" + - $ref: "#/components/schemas/UsageAudioSpeechesResult" + - $ref: "#/components/schemas/UsageAudioTranscriptionsResult" + - $ref: "#/components/schemas/UsageVectorStoresResult" + - $ref: "#/components/schemas/UsageCodeInterpreterSessionsResult" + - $ref: "#/components/schemas/CostsResult" required: - - type - TranscriptTextDeltaEvent: + - object + - start_time + - end_time + - result + UsageVectorStoresResult: type: object - description: Emitted when there is an additional text delta. This is also the - first event emitted when the transcription starts. Only emitted when you - [create a transcription](/docs/api-reference/audio/create-transcription) - with the `Stream` parameter set to `true`. + description: The aggregated vector stores usage details of the specific time bucket. properties: - type: + object: type: string - description: | - The type of the event. Always `transcript.text.delta`. enum: - - transcript.text.delta + - organization.usage.vector_stores.result x-stainless-const: true - delta: + usage_bytes: + type: integer + description: The vector stores usage in bytes. + project_id: type: string - description: | - The text delta that was additionally transcribed. - logprobs: - type: array - description: > - The log probabilities of the delta. Only included if you [create a - transcription](/docs/api-reference/audio/create-transcription) with - the `include[]` parameter set to `logprobs`. - items: - type: object - properties: - token: - type: string - description: | - The token that was used to generate the log probability. - logprob: - type: number - description: | - The log probability of the token. - bytes: - type: array - description: | - The bytes that were used to generate the log probability. + nullable: true + description: When `group_by=project_id`, this field provides the project ID of + the grouped usage result. required: - - type - - delta + - object + - usage_bytes x-oaiMeta: - name: Stream Event (transcript.text.delta) - group: transcript + name: Vector stores usage object example: | { - "type": "transcript.text.delta", - "delta": " wonderful" + "object": "organization.usage.vector_stores.result", + "usage_bytes": 1024, + "project_id": "proj_abc" } - TranscriptTextDoneEvent: + User: type: object - description: Emitted when the transcription is complete. Contains the complete - transcription text. Only emitted when you [create a - transcription](/docs/api-reference/audio/create-transcription) with the - `Stream` parameter set to `true`. + description: Represents an individual `user` within an organization. properties: - type: + object: type: string - description: | - The type of the event. Always `transcript.text.done`. enum: - - transcript.text.done + - organization.user + description: The object type, which is always `organization.user` x-stainless-const: true - text: + id: type: string - description: | - The text that was transcribed. - logprobs: - type: array - description: > - The log probabilities of the individual tokens in the transcription. - Only included if you [create a - transcription](/docs/api-reference/audio/create-transcription) with - the `include[]` parameter set to `logprobs`. - items: - type: object - properties: - token: - type: string - description: | - The token that was used to generate the log probability. - logprob: - type: number - description: | - The log probability of the token. - bytes: - type: array - description: | - The bytes that were used to generate the log probability. + description: The identifier, which can be referenced in API endpoints + name: + type: string + description: The name of the user + email: + type: string + description: The email address of the user + role: + type: string + enum: + - owner + - reader + description: "`owner` or `reader`" + added_at: + type: integer + description: The Unix timestamp (in seconds) of when the user was added. required: - - type - - text + - object + - id + - name + - email + - role + - added_at x-oaiMeta: - name: Stream Event (transcript.text.done) - group: transcript - example: > + name: The user object + example: | { - "type": "transcript.text.done", - "text": "I see skies of blue and clouds of white, the bright blessed days, the dark sacred nights, and I think to myself, what a wonderful world." + "object": "organization.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 } - TranscriptionInclude: - type: string - enum: - - logprobs - default: [] - TranscriptionSegment: + UserDeleteResponse: type: object properties: + object: + type: string + enum: + - organization.user.deleted + x-stainless-const: true id: - type: integer - description: Unique identifier of the segment. - seek: - type: integer - description: Seek offset of the segment. - start: - type: number - format: float - description: Start time of the segment in seconds. - end: - type: number - format: float - description: End time of the segment in seconds. - text: type: string - description: Text content of the segment. - tokens: - type: array - items: - type: integer - description: Array of token IDs for the text content. - temperature: - type: number - format: float - description: Temperature parameter used for generating the segment. - avg_logprob: - type: number - format: float - description: Average logprob of the segment. If the value is lower than -1, - consider the logprobs failed. - compression_ratio: - type: number - format: float - description: Compression ratio of the segment. If the value is greater than 2.4, - consider the compression failed. - no_speech_prob: - type: number - format: float - description: Probability of no speech in the segment. If the value is higher - than 1.0 and the `avg_logprob` is below -1, consider this segment - silent. + deleted: + type: boolean required: + - object - id - - seek - - start - - end - - text - - tokens - - temperature - - avg_logprob - - compression_ratio - - no_speech_prob - TranscriptionWord: + - deleted + UserListResponse: type: object properties: - word: + object: type: string - description: The text content of the word. - start: - type: number - format: float - description: Start time of the word in seconds. - end: - type: number - format: float - description: End time of the word in seconds. + enum: + - list + x-stainless-const: true + data: + type: array + items: + $ref: "#/components/schemas/User" + first_id: + type: string + last_id: + type: string + has_more: + type: boolean required: - - word - - start - - end - TruncationObject: + - object + - data + - first_id + - last_id + - has_more + UserRoleUpdateRequest: type: object - title: Thread Truncation Controls - description: Controls for how a thread will be truncated prior to the run. Use - this to control the intial context window of the run. properties: - type: + role: type: string - description: The truncation strategy to use for the thread. The default is - `auto`. If set to `last_messages`, the thread will be truncated to - the n most recent messages in the thread. When set to `auto`, - messages in the middle of the thread will be dropped to fit the - context length of the model, `max_prompt_tokens`. enum: - - auto - - last_messages - last_messages: - type: integer - description: The number of most recent messages from the thread when - constructing the context for the run. - minimum: 1 - nullable: true + - owner + - reader + description: "`owner` or `reader`" required: - - type - Type: + - role + VadConfig: type: object - title: Type - description: | - An action to type in text. + additionalProperties: false + required: + - type properties: type: type: string enum: - - type - default: type + - server_vad + description: Must be set to `server_vad` to enable manual chunking using server + side VAD. + prefix_padding_ms: + type: integer + default: 300 description: | - Specifies the event type. For a type action, this property is - always set to `type`. - x-stainless-const: true - text: - type: string + Amount of audio to include before the VAD detected speech (in + milliseconds). + silence_duration_ms: + type: integer + default: 200 description: | - The text to type. - required: - - type - - text - UpdateVectorStoreFileAttributesRequest: + Duration of silence to detect speech stop (in milliseconds). + With shorter values the model will respond more quickly, + but may jump in on short pauses from the user. + threshold: + type: number + default: 0.5 + description: > + Sensitivity threshold (0.0 to 1.0) for voice activity detection. A + + higher threshold will require louder audio to activate the model, + and + + thus might perform better in noisy environments. + ValidateGraderRequest: type: object - additionalProperties: false + title: ValidateGraderRequest properties: - attributes: - $ref: "#/components/schemas/VectorStoreFileAttributes" + grader: + type: object + description: The grader used for the fine-tuning job. + oneOf: + - $ref: "#/components/schemas/GraderStringCheck" + - $ref: "#/components/schemas/GraderTextSimilarity" + - $ref: "#/components/schemas/GraderPython" + - $ref: "#/components/schemas/GraderScoreModel" + - $ref: "#/components/schemas/GraderMulti" required: - - attributes - x-oaiMeta: - name: Update vector store file attributes request - UpdateVectorStoreRequest: + - grader + ValidateGraderResponse: type: object - additionalProperties: false + title: ValidateGraderResponse properties: - name: - description: The name of the vector store. + grader: + type: object + description: The grader used for the fine-tuning job. + oneOf: + - $ref: "#/components/schemas/GraderStringCheck" + - $ref: "#/components/schemas/GraderTextSimilarity" + - $ref: "#/components/schemas/GraderPython" + - $ref: "#/components/schemas/GraderScoreModel" + - $ref: "#/components/schemas/GraderMulti" + VectorStoreExpirationAfter: + type: object + title: Vector store expiration policy + description: The expiration policy for a vector store. + properties: + anchor: + description: "Anchor timestamp after which the expiration policy applies. + Supported anchors: `last_active_at`." type: string - nullable: true - expires_after: - allOf: - - $ref: "#/components/schemas/VectorStoreExpirationAfter" - - nullable: true - metadata: - $ref: "#/components/schemas/Metadata" - Upload: + enum: + - last_active_at + x-stainless-const: true + days: + description: The number of days after the anchor time that the vector store will + expire. + type: integer + minimum: 1 + maximum: 365 + required: + - anchor + - days + VectorStoreFileAttributes: type: object - title: Upload - description: | - The Upload object can accept byte chunks in the form of Parts. + description: > + Set of 16 key-value pairs that can be attached to an object. This can + be + + useful for storing additional information about the object in a + structured + + format, and querying for objects via API or the dashboard. Keys are + strings + + with a maximum length of 64 characters. Values are strings with a + maximum + + length of 512 characters, booleans, or numbers. + maxProperties: 16 + propertyNames: + type: string + maxLength: 64 + additionalProperties: + oneOf: + - type: string + maxLength: 512 + - type: number + - type: boolean + x-oaiTypeLabel: map + nullable: true + VectorStoreFileBatchObject: + type: object + title: Vector store file batch + description: A batch of files attached to a vector store. properties: id: + description: The identifier, which can be referenced in API endpoints. type: string - description: The Upload unique identifier, which can be referenced in API - endpoints. - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the Upload was created. - filename: + object: + description: The object type, which is always `vector_store.file_batch`. type: string - description: The name of the file to be uploaded. - bytes: + enum: + - vector_store.files_batch + x-stainless-const: true + created_at: + description: The Unix timestamp (in seconds) for when the vector store files + batch was created. type: integer - description: The intended number of bytes to be uploaded. - purpose: + vector_store_id: + description: The ID of the [vector + store](/docs/api-reference/vector-stores/object) that the + [File](/docs/api-reference/files) is attached to. type: string - description: The intended purpose of the file. [Please refer - here](/docs/api-reference/files/object#files/object-purpose) for - acceptable values. status: + description: The status of the vector store files batch, which can be either + `in_progress`, `completed`, `cancelled` or `failed`. type: string - description: The status of the Upload. enum: - - pending + - in_progress - completed - cancelled - - expired - expires_at: - type: integer - description: The Unix timestamp (in seconds) for when the Upload will expire. - object: - type: string - description: The object type, which is always "upload". - enum: - - upload - x-stainless-const: true - file: - allOf: - - $ref: "#/components/schemas/OpenAIFile" - - nullable: true - description: The ready File object after the Upload is completed. + - failed + file_counts: + type: object + properties: + in_progress: + description: The number of files that are currently being processed. + type: integer + completed: + description: The number of files that have been processed. + type: integer + failed: + description: The number of files that have failed to process. + type: integer + cancelled: + description: The number of files that where cancelled. + type: integer + total: + description: The total number of files. + type: integer + required: + - in_progress + - completed + - cancelled + - failed + - total required: - - bytes - - created_at - - expires_at - - filename - id - - purpose + - object + - created_at + - vector_store_id - status + - file_counts x-oaiMeta: - name: The upload object + name: The vector store files batch object + beta: true example: | { - "id": "upload_abc123", - "object": "upload", - "bytes": 2147483648, - "created_at": 1719184911, - "filename": "training_examples.jsonl", - "purpose": "fine-tune", + "id": "vsfb_123", + "object": "vector_store.files_batch", + "created_at": 1698107661, + "vector_store_id": "vs_abc123", "status": "completed", - "expires_at": 1719127296, - "file": { - "id": "file-xyz321", - "object": "file", - "bytes": 2147483648, - "created_at": 1719186911, - "filename": "training_examples.jsonl", - "purpose": "fine-tune", + "file_counts": { + "in_progress": 0, + "completed": 100, + "failed": 0, + "cancelled": 0, + "total": 100 } } - UploadCertificateRequest: + VectorStoreFileContentResponse: type: object + description: Represents the parsed content of a vector store file. properties: - name: + object: type: string - description: An optional name for the certificate - content: + enum: + - vector_store.file_content.page + description: The object type, which is always `vector_store.file_content.page` + x-stainless-const: true + data: + type: array + description: Parsed content of the file. + items: + type: object + properties: + type: + type: string + description: The content type (currently only `"text"`) + text: + type: string + description: The text content + has_more: + type: boolean + description: Indicates if there are more content pages to fetch. + next_page: type: string - description: The certificate content in PEM format + description: The token for the next page, if any. + nullable: true required: - - content - UploadPart: + - object + - data + - has_more + - next_page + VectorStoreFileObject: type: object - title: UploadPart - description: > - The upload Part represents a chunk of bytes we can add to an Upload - object. + title: Vector store files + description: A list of files attached to a vector store. properties: id: + description: The identifier, which can be referenced in API endpoints. type: string - description: The upload Part unique identifier, which can be referenced in API - endpoints. + object: + description: The object type, which is always `vector_store.file`. + type: string + enum: + - vector_store.file + x-stainless-const: true + usage_bytes: + description: The total vector store usage in bytes. Note that this may be + different from the original file size. + type: integer created_at: + description: The Unix timestamp (in seconds) for when the vector store file was + created. type: integer - description: The Unix timestamp (in seconds) for when the Part was created. - upload_id: + vector_store_id: + description: The ID of the [vector + store](/docs/api-reference/vector-stores/object) that the + [File](/docs/api-reference/files) is attached to. + type: string + status: + description: The status of the vector store file, which can be either + `in_progress`, `completed`, `cancelled`, or `failed`. The status + `completed` indicates that the vector store file is ready for use. + type: string + enum: + - in_progress + - completed + - cancelled + - failed + last_error: + type: object + description: The last error associated with this vector store file. Will be + `null` if there are no errors. + nullable: true + properties: + code: + type: string + description: One of `server_error` or `rate_limit_exceeded`. + enum: + - server_error + - unsupported_file + - invalid_file + message: + type: string + description: A human-readable description of the error. + required: + - code + - message + chunking_strategy: + type: object + description: The strategy used to chunk the file. + oneOf: + - $ref: "#/components/schemas/StaticChunkingStrategyResponseParam" + - $ref: "#/components/schemas/OtherChunkingStrategyResponseParam" + attributes: + $ref: "#/components/schemas/VectorStoreFileAttributes" + required: + - id + - object + - usage_bytes + - created_at + - vector_store_id + - status + - last_error + x-oaiMeta: + name: The vector store file object + beta: true + example: | + { + "id": "file-abc123", + "object": "vector_store.file", + "usage_bytes": 1234, + "created_at": 1698107661, + "vector_store_id": "vs_abc123", + "status": "completed", + "last_error": null, + "chunking_strategy": { + "type": "static", + "static": { + "max_chunk_size_tokens": 800, + "chunk_overlap_tokens": 400 + } + } + } + VectorStoreObject: + type: object + title: Vector store + description: A vector store is a collection of processed files can be used by + the `file_search` tool. + properties: + id: + description: The identifier, which can be referenced in API endpoints. type: string - description: The ID of the Upload object that this Part was added to. object: + description: The object type, which is always `vector_store`. type: string - description: The object type, which is always `upload.part`. enum: - - upload.part + - vector_store x-stainless-const: true + created_at: + description: The Unix timestamp (in seconds) for when the vector store was + created. + type: integer + name: + description: The name of the vector store. + type: string + usage_bytes: + description: The total number of bytes used by the files in the vector store. + type: integer + file_counts: + type: object + properties: + in_progress: + description: The number of files that are currently being processed. + type: integer + completed: + description: The number of files that have been successfully processed. + type: integer + failed: + description: The number of files that have failed to process. + type: integer + cancelled: + description: The number of files that were cancelled. + type: integer + total: + description: The total number of files. + type: integer + required: + - in_progress + - completed + - failed + - cancelled + - total + status: + description: The status of the vector store, which can be either `expired`, + `in_progress`, or `completed`. A status of `completed` indicates + that the vector store is ready for use. + type: string + enum: + - expired + - in_progress + - completed + expires_after: + $ref: "#/components/schemas/VectorStoreExpirationAfter" + expires_at: + description: The Unix timestamp (in seconds) for when the vector store will + expire. + type: integer + nullable: true + last_active_at: + description: The Unix timestamp (in seconds) for when the vector store was last + active. + type: integer + nullable: true + metadata: + $ref: "#/components/schemas/Metadata" required: - - created_at - id - object - - upload_id + - usage_bytes + - created_at + - status + - last_active_at + - name + - file_counts + - metadata x-oaiMeta: - name: The upload part object + name: The vector store object example: | { - "id": "part_def456", - "object": "upload.part", - "created_at": 1719186911, - "upload_id": "upload_abc123" + "id": "vs_123", + "object": "vector_store", + "created_at": 1698107661, + "usage_bytes": 123456, + "last_active_at": 1698107661, + "name": "my_vector_store", + "status": "completed", + "file_counts": { + "in_progress": 0, + "completed": 100, + "cancelled": 0, + "failed": 0, + "total": 100 + }, + "last_used_at": 1698107661 } - UsageAudioSpeechesResult: + VectorStoreSearchRequest: + type: object + additionalProperties: false + properties: + query: + description: A query string for a search + oneOf: + - type: string + - type: array + items: + type: string + description: A list of queries to search for. + minItems: 1 + rewrite_query: + description: Whether to rewrite the natural language query for vector search. + type: boolean + default: false + max_num_results: + description: The maximum number of results to return. This number should be + between 1 and 50 inclusive. + type: integer + default: 10 + minimum: 1 + maximum: 50 + filters: + description: A filter to apply based on file attributes. + oneOf: + - $ref: "#/components/schemas/ComparisonFilter" + - $ref: "#/components/schemas/CompoundFilter" + ranking_options: + description: Ranking options for search. + type: object + additionalProperties: false + properties: + ranker: + type: string + enum: + - auto + - default-2024-11-15 + default: auto + score_threshold: + type: number + minimum: 0 + maximum: 1 + default: 0 + required: + - query + x-oaiMeta: + name: Vector store search request + VectorStoreSearchResultContentObject: type: object - description: The aggregated audio speeches usage details of the specific time bucket. + additionalProperties: false properties: - object: + type: + description: The type of content. type: string enum: - - organization.usage.audio_speeches.result - x-stainless-const: true - characters: - type: integer - description: The number of characters processed. - num_model_requests: - type: integer - description: The count of requests made to the model. - project_id: - type: string - nullable: true - description: When `group_by=project_id`, this field provides the project ID of - the grouped usage result. - user_id: - type: string - nullable: true - description: When `group_by=user_id`, this field provides the user ID of the - grouped usage result. - api_key_id: - type: string - nullable: true - description: When `group_by=api_key_id`, this field provides the API key ID of - the grouped usage result. - model: + - text + text: + description: The text content returned from search. type: string - nullable: true - description: When `group_by=model`, this field provides the model name of the - grouped usage result. required: - - object - - characters - - num_model_requests + - type + - text x-oaiMeta: - name: Audio speeches usage object - example: | - { - "object": "organization.usage.audio_speeches.result", - "characters": 45, - "num_model_requests": 1, - "project_id": "proj_abc", - "user_id": "user-abc", - "api_key_id": "key_abc", - "model": "tts-1" - } - UsageAudioTranscriptionsResult: + name: Vector store search result content object + VectorStoreSearchResultItem: type: object - description: The aggregated audio transcriptions usage details of the specific - time bucket. + additionalProperties: false properties: - object: - type: string - enum: - - organization.usage.audio_transcriptions.result - x-stainless-const: true - seconds: - type: integer - description: The number of seconds processed. - num_model_requests: - type: integer - description: The count of requests made to the model. - project_id: - type: string - nullable: true - description: When `group_by=project_id`, this field provides the project ID of - the grouped usage result. - user_id: - type: string - nullable: true - description: When `group_by=user_id`, this field provides the user ID of the - grouped usage result. - api_key_id: + file_id: type: string - nullable: true - description: When `group_by=api_key_id`, this field provides the API key ID of - the grouped usage result. - model: + description: The ID of the vector store file. + filename: type: string - nullable: true - description: When `group_by=model`, this field provides the model name of the - grouped usage result. + description: The name of the vector store file. + score: + type: number + description: The similarity score for the result. + minimum: 0 + maximum: 1 + attributes: + $ref: "#/components/schemas/VectorStoreFileAttributes" + content: + type: array + description: Content chunks from the file. + items: + $ref: "#/components/schemas/VectorStoreSearchResultContentObject" required: - - object - - seconds - - num_model_requests + - file_id + - filename + - score + - attributes + - content x-oaiMeta: - name: Audio transcriptions usage object - example: | - { - "object": "organization.usage.audio_transcriptions.result", - "seconds": 10, - "num_model_requests": 1, - "project_id": "proj_abc", - "user_id": "user-abc", - "api_key_id": "key_abc", - "model": "tts-1" - } - UsageCodeInterpreterSessionsResult: + name: Vector store search result item + VectorStoreSearchResultsPage: type: object - description: The aggregated code interpreter sessions usage details of the - specific time bucket. + additionalProperties: false properties: object: type: string enum: - - organization.usage.code_interpreter_sessions.result + - vector_store.search_results.page + description: The object type, which is always `vector_store.search_results.page` x-stainless-const: true - num_sessions: - type: integer - description: The number of code interpreter sessions. - project_id: + search_query: + type: array + items: + type: string + description: The query used for this search. + minItems: 1 + data: + type: array + description: The list of search result items. + items: + $ref: "#/components/schemas/VectorStoreSearchResultItem" + has_more: + type: boolean + description: Indicates if there are more results to fetch. + next_page: type: string + description: The token for the next page, if any. nullable: true - description: When `group_by=project_id`, this field provides the project ID of - the grouped usage result. required: - object - - sessions + - search_query + - data + - has_more + - next_page x-oaiMeta: - name: Code interpreter sessions usage object - example: | - { - "object": "organization.usage.code_interpreter_sessions.result", - "num_sessions": 1, - "project_id": "proj_abc" - } - UsageCompletionsResult: + name: Vector store search results page + VoiceIdsShared: + example: ash + anyOf: + - type: string + - type: string + enum: + - alloy + - ash + - ballad + - coral + - echo + - fable + - onyx + - nova + - sage + - shimmer + - verse + Wait: type: object - description: The aggregated completions usage details of the specific time bucket. + title: Wait + description: | + A wait action. properties: - object: + type: type: string enum: - - organization.usage.completions.result + - wait + default: wait + description: | + Specifies the event type. For a wait action, this property is + always set to `wait`. x-stainless-const: true - input_tokens: - type: integer - description: The aggregated number of text input tokens used, including cached - tokens. For customers subscribe to scale tier, this includes scale - tier tokens. - input_cached_tokens: - type: integer - description: The aggregated number of text input tokens that has been cached - from previous requests. For customers subscribe to scale tier, this - includes scale tier tokens. - output_tokens: - type: integer - description: The aggregated number of text output tokens used. For customers - subscribe to scale tier, this includes scale tier tokens. - input_audio_tokens: - type: integer - description: The aggregated number of audio input tokens used, including cached - tokens. - output_audio_tokens: - type: integer - description: The aggregated number of audio output tokens used. - num_model_requests: - type: integer - description: The count of requests made to the model. - project_id: - type: string - nullable: true - description: When `group_by=project_id`, this field provides the project ID of - the grouped usage result. - user_id: - type: string - nullable: true - description: When `group_by=user_id`, this field provides the user ID of the - grouped usage result. - api_key_id: - type: string - nullable: true - description: When `group_by=api_key_id`, this field provides the API key ID of - the grouped usage result. - model: - type: string - nullable: true - description: When `group_by=model`, this field provides the model name of the - grouped usage result. - batch: - type: boolean - nullable: true - description: When `group_by=batch`, this field tells whether the grouped usage - result is batch or not. required: - - object - - input_tokens - - output_tokens - - num_model_requests - x-oaiMeta: - name: Completions usage object - example: | - { - "object": "organization.usage.completions.result", - "input_tokens": 5000, - "output_tokens": 1000, - "input_cached_tokens": 4000, - "input_audio_tokens": 300, - "output_audio_tokens": 200, - "num_model_requests": 5, - "project_id": "proj_abc", - "user_id": "user-abc", - "api_key_id": "key_abc", - "model": "gpt-4o-mini-2024-07-18", - "batch": false - } - UsageEmbeddingsResult: + - type + WebSearchActionFind: type: object - description: The aggregated embeddings usage details of the specific time bucket. + title: Find action + description: | + Action type "find": Searches for a pattern within a loaded page. properties: - object: + type: type: string enum: - - organization.usage.embeddings.result + - find + description: | + The action type. x-stainless-const: true - input_tokens: - type: integer - description: The aggregated number of input tokens used. - num_model_requests: - type: integer - description: The count of requests made to the model. - project_id: - type: string - nullable: true - description: When `group_by=project_id`, this field provides the project ID of - the grouped usage result. - user_id: - type: string - nullable: true - description: When `group_by=user_id`, this field provides the user ID of the - grouped usage result. - api_key_id: + url: type: string - nullable: true - description: When `group_by=api_key_id`, this field provides the API key ID of - the grouped usage result. - model: + format: uri + description: | + The URL of the page searched for the pattern. + pattern: type: string - nullable: true - description: When `group_by=model`, this field provides the model name of the - grouped usage result. + description: | + The pattern or text to search for within the page. required: - - object - - input_tokens - - num_model_requests - x-oaiMeta: - name: Embeddings usage object - example: | - { - "object": "organization.usage.embeddings.result", - "input_tokens": 20, - "num_model_requests": 2, - "project_id": "proj_abc", - "user_id": "user-abc", - "api_key_id": "key_abc", - "model": "text-embedding-ada-002-v2" - } - UsageImagesResult: + - type + - url + - pattern + WebSearchActionOpenPage: type: object - description: The aggregated images usage details of the specific time bucket. + title: Open page action + description: | + Action type "open_page" - Opens a specific URL from search results. properties: - object: + type: type: string enum: - - organization.usage.images.result + - open_page + description: | + The action type. x-stainless-const: true - images: - type: integer - description: The number of images processed. - num_model_requests: - type: integer - description: The count of requests made to the model. - source: - type: string - nullable: true - description: When `group_by=source`, this field provides the source of the - grouped usage result, possible values are `image.generation`, - `image.edit`, `image.variation`. - size: - type: string - nullable: true - description: When `group_by=size`, this field provides the image size of the - grouped usage result. - project_id: - type: string - nullable: true - description: When `group_by=project_id`, this field provides the project ID of - the grouped usage result. - user_id: - type: string - nullable: true - description: When `group_by=user_id`, this field provides the user ID of the - grouped usage result. - api_key_id: - type: string - nullable: true - description: When `group_by=api_key_id`, this field provides the API key ID of - the grouped usage result. - model: + url: type: string - nullable: true - description: When `group_by=model`, this field provides the model name of the - grouped usage result. + format: uri + description: | + The URL opened by the model. required: - - object - - images - - num_model_requests - x-oaiMeta: - name: Images usage object - example: | - { - "object": "organization.usage.images.result", - "images": 2, - "num_model_requests": 2, - "size": "1024x1024", - "source": "image.generation", - "project_id": "proj_abc", - "user_id": "user-abc", - "api_key_id": "key_abc", - "model": "dall-e-3" - } - UsageModerationsResult: + - type + - url + WebSearchActionSearch: type: object - description: The aggregated moderations usage details of the specific time bucket. + title: Search action + description: | + Action type "search" - Performs a web search query. properties: - object: + type: type: string enum: - - organization.usage.moderations.result + - search + description: | + The action type. x-stainless-const: true - input_tokens: - type: integer - description: The aggregated number of input tokens used. - num_model_requests: - type: integer - description: The count of requests made to the model. - project_id: + query: type: string - nullable: true - description: When `group_by=project_id`, this field provides the project ID of - the grouped usage result. - user_id: + description: | + The search query. + required: + - type + - query + WebSearchContextSize: + type: string + description: > + High level guidance for the amount of context window space to use for + the + + search. One of `low`, `medium`, or `high`. `medium` is the default. + enum: + - low + - medium + - high + default: medium + WebSearchLocation: + type: object + title: Web search location + description: Approximate location parameters for the search. + properties: + country: type: string - nullable: true - description: When `group_by=user_id`, this field provides the user ID of the - grouped usage result. - api_key_id: + description: > + The two-letter + + [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of the + user, + + e.g. `US`. + region: type: string - nullable: true - description: When `group_by=api_key_id`, this field provides the API key ID of - the grouped usage result. - model: + description: | + Free text input for the region of the user, e.g. `California`. + city: type: string - nullable: true - description: When `group_by=model`, this field provides the model name of the - grouped usage result. - required: - - object - - input_tokens - - num_model_requests - x-oaiMeta: - name: Moderations usage object - example: | - { - "object": "organization.usage.moderations.result", - "input_tokens": 20, - "num_model_requests": 2, - "project_id": "proj_abc", - "user_id": "user-abc", - "api_key_id": "key_abc", - "model": "text-moderation" - } - UsageResponse: + description: | + Free text input for the city of the user, e.g. `San Francisco`. + timezone: + type: string + description: > + The [IANA + timezone](https://timeapi.io/documentation/iana-timezones) + + of the user, e.g. `America/Los_Angeles`. + WebSearchToolCall: type: object + title: Web search tool call + description: | + The results of a web search tool call. See the + [web search guide](/docs/guides/tools-web-search) for more information. properties: - object: + id: + type: string + description: | + The unique ID of the web search tool call. + type: type: string enum: - - page + - web_search_call + description: | + The type of the web search tool call. Always `web_search_call`. x-stainless-const: true - data: - type: array - items: - $ref: "#/components/schemas/UsageTimeBucket" - has_more: - type: boolean - next_page: + status: type: string + description: | + The status of the web search tool call. + enum: + - in_progress + - searching + - completed + - failed + action: + type: object + description: > + An object describing the specific action taken in this web search + call. + + Includes details on how the model used the web (search, open_page, + find). + oneOf: + - $ref: "#/components/schemas/WebSearchActionSearch" + - $ref: "#/components/schemas/WebSearchActionOpenPage" + - $ref: "#/components/schemas/WebSearchActionFind" + required: + - id + - type + - status + - action + WebhookBatchCancelled: + type: object + title: batch.cancelled + description: | + Sent when a batch API request has been cancelled. required: - - object + - created_at + - id - data - - has_more - - next_page - UsageTimeBucket: - type: object + - type properties: + created_at: + type: integer + description: > + The Unix timestamp (in seconds) of when the batch API request was + cancelled. + id: + type: string + description: | + The unique ID of the event. + data: + type: object + description: | + Event data payload. + required: + - id + properties: + id: + type: string + description: | + The unique ID of the batch API request. object: type: string + description: | + The object of the event. Always `event`. enum: - - bucket + - event x-stainless-const: true - start_time: - type: integer - end_time: - type: integer - result: - type: array - items: - oneOf: - - $ref: "#/components/schemas/UsageCompletionsResult" - - $ref: "#/components/schemas/UsageEmbeddingsResult" - - $ref: "#/components/schemas/UsageModerationsResult" - - $ref: "#/components/schemas/UsageImagesResult" - - $ref: "#/components/schemas/UsageAudioSpeechesResult" - - $ref: "#/components/schemas/UsageAudioTranscriptionsResult" - - $ref: "#/components/schemas/UsageVectorStoresResult" - - $ref: "#/components/schemas/UsageCodeInterpreterSessionsResult" - - $ref: "#/components/schemas/CostsResult" - required: - - object - - start_time - - end_time - - result - UsageVectorStoresResult: - type: object - description: The aggregated vector stores usage details of the specific time bucket. - properties: - object: + type: type: string + description: | + The type of the event. Always `batch.cancelled`. enum: - - organization.usage.vector_stores.result + - batch.cancelled x-stainless-const: true - usage_bytes: - type: integer - description: The vector stores usage in bytes. - project_id: - type: string - nullable: true - description: When `group_by=project_id`, this field provides the project ID of - the grouped usage result. - required: - - object - - usage_bytes x-oaiMeta: - name: Vector stores usage object + name: batch.cancelled + group: webhook-events example: | { - "object": "organization.usage.vector_stores.result", - "usage_bytes": 1024, - "project_id": "proj_abc" + "id": "evt_abc123", + "type": "batch.cancelled", + "created_at": 1719168000, + "data": { + "id": "batch_abc123" + } } - User: + WebhookBatchCompleted: type: object - description: Represents an individual `user` within an organization. + title: batch.completed + description: | + Sent when a batch API request has been completed. + required: + - created_at + - id + - data + - type properties: + created_at: + type: integer + description: > + The Unix timestamp (in seconds) of when the batch API request was + completed. + id: + type: string + description: | + The unique ID of the event. + data: + type: object + description: | + Event data payload. + required: + - id + properties: + id: + type: string + description: | + The unique ID of the batch API request. object: type: string + description: | + The object of the event. Always `event`. enum: - - organization.user - description: The object type, which is always `organization.user` + - event x-stainless-const: true - id: - type: string - description: The identifier, which can be referenced in API endpoints - name: - type: string - description: The name of the user - email: - type: string - description: The email address of the user - role: + type: type: string + description: | + The type of the event. Always `batch.completed`. enum: - - owner - - reader - description: "`owner` or `reader`" - added_at: - type: integer - description: The Unix timestamp (in seconds) of when the user was added. - required: - - object - - id - - name - - email - - role - - added_at + - batch.completed + x-stainless-const: true x-oaiMeta: - name: The user object + name: batch.completed + group: webhook-events example: | { - "object": "organization.user", - "id": "user_abc", - "name": "First Last", - "email": "user@example.com", - "role": "owner", - "added_at": 1711471533 + "id": "evt_abc123", + "type": "batch.completed", + "created_at": 1719168000, + "data": { + "id": "batch_abc123" + } } - UserDeleteResponse: + WebhookBatchExpired: type: object - properties: - object: - type: string - enum: - - organization.user.deleted - x-stainless-const: true - id: - type: string - deleted: - type: boolean + title: batch.expired + description: | + Sent when a batch API request has expired. required: - - object + - created_at - id - - deleted - UserListResponse: - type: object + - data + - type properties: + created_at: + type: integer + description: > + The Unix timestamp (in seconds) of when the batch API request + expired. + id: + type: string + description: | + The unique ID of the event. + data: + type: object + description: | + Event data payload. + required: + - id + properties: + id: + type: string + description: | + The unique ID of the batch API request. object: type: string + description: | + The object of the event. Always `event`. enum: - - list + - event x-stainless-const: true - data: - type: array - items: - $ref: "#/components/schemas/User" - first_id: - type: string - last_id: + type: type: string - has_more: - type: boolean + description: | + The type of the event. Always `batch.expired`. + enum: + - batch.expired + x-stainless-const: true + x-oaiMeta: + name: batch.expired + group: webhook-events + example: | + { + "id": "evt_abc123", + "type": "batch.expired", + "created_at": 1719168000, + "data": { + "id": "batch_abc123" + } + } + WebhookBatchFailed: + type: object + title: batch.failed + description: | + Sent when a batch API request has failed. required: - - object + - created_at + - id - data - - first_id - - last_id - - has_more - UserRoleUpdateRequest: - type: object + - type properties: - role: + created_at: + type: integer + description: > + The Unix timestamp (in seconds) of when the batch API request failed. + id: + type: string + description: | + The unique ID of the event. + data: + type: object + description: | + Event data payload. + required: + - id + properties: + id: + type: string + description: | + The unique ID of the batch API request. + object: type: string + description: | + The object of the event. Always `event`. enum: - - owner - - reader - description: "`owner` or `reader`" - required: - - role - VectorStoreExpirationAfter: - type: object - title: Vector store expiration policy - description: The expiration policy for a vector store. - properties: - anchor: - description: "Anchor timestamp after which the expiration policy applies. - Supported anchors: `last_active_at`." + - event + x-stainless-const: true + type: type: string + description: | + The type of the event. Always `batch.failed`. enum: - - last_active_at + - batch.failed x-stainless-const: true - days: - description: The number of days after the anchor time that the vector store will - expire. - type: integer - minimum: 1 - maximum: 365 - required: - - anchor - - days - VectorStoreFileAttributes: - type: object - description: > - Set of 16 key-value pairs that can be attached to an object. This can - be - - useful for storing additional information about the object in a - structured - - format, and querying for objects via API or the dashboard. Keys are - strings - - with a maximum length of 64 characters. Values are strings with a - maximum - - length of 512 characters, booleans, or numbers. - maxProperties: 16 - propertyNames: - type: string - maxLength: 64 - additionalProperties: - oneOf: - - type: string - maxLength: 512 - - type: number - - type: boolean - x-oaiTypeLabel: map - nullable: true - VectorStoreFileBatchObject: + x-oaiMeta: + name: batch.failed + group: webhook-events + example: | + { + "id": "evt_abc123", + "type": "batch.failed", + "created_at": 1719168000, + "data": { + "id": "batch_abc123" + } + } + WebhookEvalRunCanceled: type: object - title: Vector store file batch - description: A batch of files attached to a vector store. + title: eval.run.canceled + description: | + Sent when an eval run has been canceled. + required: + - created_at + - id + - data + - type properties: + created_at: + type: integer + description: | + The Unix timestamp (in seconds) of when the eval run was canceled. id: - description: The identifier, which can be referenced in API endpoints. type: string + description: | + The unique ID of the event. + data: + type: object + description: | + Event data payload. + required: + - id + properties: + id: + type: string + description: | + The unique ID of the eval run. object: - description: The object type, which is always `vector_store.file_batch`. type: string + description: | + The object of the event. Always `event`. enum: - - vector_store.files_batch + - event x-stainless-const: true - created_at: - description: The Unix timestamp (in seconds) for when the vector store files - batch was created. - type: integer - vector_store_id: - description: The ID of the [vector - store](/docs/api-reference/vector-stores/object) that the - [File](/docs/api-reference/files) is attached to. - type: string - status: - description: The status of the vector store files batch, which can be either - `in_progress`, `completed`, `cancelled` or `failed`. + type: type: string + description: | + The type of the event. Always `eval.run.canceled`. enum: - - in_progress - - completed - - cancelled - - failed - file_counts: - type: object - properties: - in_progress: - description: The number of files that are currently being processed. - type: integer - completed: - description: The number of files that have been processed. - type: integer - failed: - description: The number of files that have failed to process. - type: integer - cancelled: - description: The number of files that where cancelled. - type: integer - total: - description: The total number of files. - type: integer - required: - - in_progress - - completed - - cancelled - - failed - - total - required: - - id - - object - - created_at - - vector_store_id - - status - - file_counts + - eval.run.canceled + x-stainless-const: true x-oaiMeta: - name: The vector store files batch object - beta: true + name: eval.run.canceled + group: webhook-events example: | { - "id": "vsfb_123", - "object": "vector_store.files_batch", - "created_at": 1698107661, - "vector_store_id": "vs_abc123", - "status": "completed", - "file_counts": { - "in_progress": 0, - "completed": 100, - "failed": 0, - "cancelled": 0, - "total": 100 + "id": "evt_abc123", + "type": "eval.run.canceled", + "created_at": 1719168000, + "data": { + "id": "evalrun_abc123" } - } - VectorStoreFileContentResponse: + } + WebhookEvalRunFailed: type: object - description: Represents the parsed content of a vector store file. + title: eval.run.failed + description: | + Sent when an eval run has failed. + required: + - created_at + - id + - data + - type properties: + created_at: + type: integer + description: | + The Unix timestamp (in seconds) of when the eval run failed. + id: + type: string + description: | + The unique ID of the event. + data: + type: object + description: | + Event data payload. + required: + - id + properties: + id: + type: string + description: | + The unique ID of the eval run. object: type: string + description: | + The object of the event. Always `event`. enum: - - vector_store.file_content.page - description: The object type, which is always `vector_store.file_content.page` + - event x-stainless-const: true - data: - type: array - description: Parsed content of the file. - items: - type: object - properties: - type: - type: string - description: The content type (currently only `"text"`) - text: - type: string - description: The text content - has_more: - type: boolean - description: Indicates if there are more content pages to fetch. - next_page: + type: type: string - description: The token for the next page, if any. - nullable: true + description: | + The type of the event. Always `eval.run.failed`. + enum: + - eval.run.failed + x-stainless-const: true + x-oaiMeta: + name: eval.run.failed + group: webhook-events + example: | + { + "id": "evt_abc123", + "type": "eval.run.failed", + "created_at": 1719168000, + "data": { + "id": "evalrun_abc123" + } + } + WebhookEvalRunSucceeded: + type: object + title: eval.run.succeeded + description: | + Sent when an eval run has succeeded. required: - - object + - created_at + - id - data - - has_more - - next_page - VectorStoreFileObject: - type: object - title: Vector store files - description: A list of files attached to a vector store. + - type properties: + created_at: + type: integer + description: | + The Unix timestamp (in seconds) of when the eval run succeeded. id: - description: The identifier, which can be referenced in API endpoints. type: string + description: | + The unique ID of the event. + data: + type: object + description: | + Event data payload. + required: + - id + properties: + id: + type: string + description: | + The unique ID of the eval run. object: - description: The object type, which is always `vector_store.file`. type: string + description: | + The object of the event. Always `event`. enum: - - vector_store.file + - event x-stainless-const: true - usage_bytes: - description: The total vector store usage in bytes. Note that this may be - different from the original file size. - type: integer - created_at: - description: The Unix timestamp (in seconds) for when the vector store file was - created. - type: integer - vector_store_id: - description: The ID of the [vector - store](/docs/api-reference/vector-stores/object) that the - [File](/docs/api-reference/files) is attached to. - type: string - status: - description: The status of the vector store file, which can be either - `in_progress`, `completed`, `cancelled`, or `failed`. The status - `completed` indicates that the vector store file is ready for use. + type: type: string + description: | + The type of the event. Always `eval.run.succeeded`. enum: - - in_progress - - completed - - cancelled - - failed - last_error: - type: object - description: The last error associated with this vector store file. Will be - `null` if there are no errors. - nullable: true - properties: - code: - type: string - description: One of `server_error` or `rate_limit_exceeded`. - enum: - - server_error - - unsupported_file - - invalid_file - message: - type: string - description: A human-readable description of the error. - required: - - code - - message - chunking_strategy: - type: object - description: The strategy used to chunk the file. - oneOf: - - $ref: "#/components/schemas/StaticChunkingStrategyResponseParam" - - $ref: "#/components/schemas/OtherChunkingStrategyResponseParam" - attributes: - $ref: "#/components/schemas/VectorStoreFileAttributes" - required: - - id - - object - - usage_bytes - - created_at - - vector_store_id - - status - - last_error + - eval.run.succeeded + x-stainless-const: true x-oaiMeta: - name: The vector store file object - beta: true + name: eval.run.succeeded + group: webhook-events example: | { - "id": "file-abc123", - "object": "vector_store.file", - "usage_bytes": 1234, - "created_at": 1698107661, - "vector_store_id": "vs_abc123", - "status": "completed", - "last_error": null, - "chunking_strategy": { - "type": "static", - "static": { - "max_chunk_size_tokens": 800, - "chunk_overlap_tokens": 400 - } + "id": "evt_abc123", + "type": "eval.run.succeeded", + "created_at": 1719168000, + "data": { + "id": "evalrun_abc123" } - } - VectorStoreObject: + } + WebhookFineTuningJobCancelled: type: object - title: Vector store - description: A vector store is a collection of processed files can be used by - the `file_search` tool. + title: fine_tuning.job.cancelled + description: | + Sent when a fine-tuning job has been cancelled. + required: + - created_at + - id + - data + - type properties: + created_at: + type: integer + description: > + The Unix timestamp (in seconds) of when the fine-tuning job was + cancelled. id: - description: The identifier, which can be referenced in API endpoints. type: string + description: | + The unique ID of the event. + data: + type: object + description: | + Event data payload. + required: + - id + properties: + id: + type: string + description: | + The unique ID of the fine-tuning job. object: - description: The object type, which is always `vector_store`. type: string + description: | + The object of the event. Always `event`. enum: - - vector_store + - event + x-stainless-const: true + type: + type: string + description: | + The type of the event. Always `fine_tuning.job.cancelled`. + enum: + - fine_tuning.job.cancelled x-stainless-const: true + x-oaiMeta: + name: fine_tuning.job.cancelled + group: webhook-events + example: | + { + "id": "evt_abc123", + "type": "fine_tuning.job.cancelled", + "created_at": 1719168000, + "data": { + "id": "ftjob_abc123" + } + } + WebhookFineTuningJobFailed: + type: object + title: fine_tuning.job.failed + description: | + Sent when a fine-tuning job has failed. + required: + - created_at + - id + - data + - type + properties: created_at: - description: The Unix timestamp (in seconds) for when the vector store was - created. type: integer - name: - description: The name of the vector store. + description: | + The Unix timestamp (in seconds) of when the fine-tuning job failed. + id: type: string - usage_bytes: - description: The total number of bytes used by the files in the vector store. - type: integer - file_counts: + description: | + The unique ID of the event. + data: type: object - properties: - in_progress: - description: The number of files that are currently being processed. - type: integer - completed: - description: The number of files that have been successfully processed. - type: integer - failed: - description: The number of files that have failed to process. - type: integer - cancelled: - description: The number of files that were cancelled. - type: integer - total: - description: The total number of files. - type: integer + description: | + Event data payload. required: - - in_progress - - completed - - failed - - cancelled - - total - status: - description: The status of the vector store, which can be either `expired`, - `in_progress`, or `completed`. A status of `completed` indicates - that the vector store is ready for use. + - id + properties: + id: + type: string + description: | + The unique ID of the fine-tuning job. + object: type: string + description: | + The object of the event. Always `event`. enum: - - expired - - in_progress - - completed - expires_after: - $ref: "#/components/schemas/VectorStoreExpirationAfter" - expires_at: - description: The Unix timestamp (in seconds) for when the vector store will - expire. - type: integer - nullable: true - last_active_at: - description: The Unix timestamp (in seconds) for when the vector store was last - active. - type: integer - nullable: true - metadata: - $ref: "#/components/schemas/Metadata" - required: - - id - - object - - usage_bytes - - created_at - - status - - last_active_at - - name - - file_counts - - metadata + - event + x-stainless-const: true + type: + type: string + description: | + The type of the event. Always `fine_tuning.job.failed`. + enum: + - fine_tuning.job.failed + x-stainless-const: true x-oaiMeta: - name: The vector store object + name: fine_tuning.job.failed + group: webhook-events example: | { - "id": "vs_123", - "object": "vector_store", - "created_at": 1698107661, - "usage_bytes": 123456, - "last_active_at": 1698107661, - "name": "my_vector_store", - "status": "completed", - "file_counts": { - "in_progress": 0, - "completed": 100, - "cancelled": 0, - "failed": 0, - "total": 100 - }, - "last_used_at": 1698107661 - } - VectorStoreSearchRequest: + "id": "evt_abc123", + "type": "fine_tuning.job.failed", + "created_at": 1719168000, + "data": { + "id": "ftjob_abc123" + } + } + WebhookFineTuningJobSucceeded: type: object - additionalProperties: false + title: fine_tuning.job.succeeded + description: | + Sent when a fine-tuning job has succeeded. + required: + - created_at + - id + - data + - type properties: - query: - description: A query string for a search - oneOf: - - type: string - - type: array - items: - type: string - description: A list of queries to search for. - minItems: 1 - rewrite_query: - description: Whether to rewrite the natural language query for vector search. - type: boolean - default: false - max_num_results: - description: The maximum number of results to return. This number should be - between 1 and 50 inclusive. + created_at: type: integer - default: 10 - minimum: 1 - maximum: 50 - filters: - description: A filter to apply based on file attributes. - oneOf: - - $ref: "#/components/schemas/ComparisonFilter" - - $ref: "#/components/schemas/CompoundFilter" - ranking_options: - description: Ranking options for search. + description: > + The Unix timestamp (in seconds) of when the fine-tuning job + succeeded. + id: + type: string + description: | + The unique ID of the event. + data: type: object - additionalProperties: false + description: | + Event data payload. + required: + - id properties: - ranker: + id: type: string - enum: - - auto - - default-2024-11-15 - default: auto - score_threshold: - type: number - minimum: 0 - maximum: 1 - default: 0 - required: - - query - x-oaiMeta: - name: Vector store search request - VectorStoreSearchResultContentObject: - type: object - additionalProperties: false - properties: - type: - description: The type of content. + description: | + The unique ID of the fine-tuning job. + object: type: string + description: | + The object of the event. Always `event`. enum: - - text - text: - description: The text content returned from search. + - event + x-stainless-const: true + type: type: string - required: - - type - - text + description: | + The type of the event. Always `fine_tuning.job.succeeded`. + enum: + - fine_tuning.job.succeeded + x-stainless-const: true x-oaiMeta: - name: Vector store search result content object - VectorStoreSearchResultItem: + name: fine_tuning.job.succeeded + group: webhook-events + example: | + { + "id": "evt_abc123", + "type": "fine_tuning.job.succeeded", + "created_at": 1719168000, + "data": { + "id": "ftjob_abc123" + } + } + WebhookResponseCancelled: type: object - additionalProperties: false - properties: - file_id: - type: string - description: The ID of the vector store file. - filename: - type: string - description: The name of the vector store file. - score: - type: number - description: The similarity score for the result. - minimum: 0 - maximum: 1 - attributes: - $ref: "#/components/schemas/VectorStoreFileAttributes" - content: - type: array - description: Content chunks from the file. - items: - $ref: "#/components/schemas/VectorStoreSearchResultContentObject" + title: response.cancelled + description: | + Sent when a background response has been cancelled. required: - - file_id - - filename - - score - - attributes - - content - x-oaiMeta: - name: Vector store search result item - VectorStoreSearchResultsPage: - type: object - additionalProperties: false + - created_at + - id + - data + - type properties: + created_at: + type: integer + description: > + The Unix timestamp (in seconds) of when the model response was + cancelled. + id: + type: string + description: | + The unique ID of the event. + data: + type: object + description: | + Event data payload. + required: + - id + properties: + id: + type: string + description: | + The unique ID of the model response. object: type: string + description: | + The object of the event. Always `event`. enum: - - vector_store.search_results.page - description: The object type, which is always `vector_store.search_results.page` + - event x-stainless-const: true - search_query: - type: array - items: - type: string - description: The query used for this search. - minItems: 1 - data: - type: array - description: The list of search result items. - items: - $ref: "#/components/schemas/VectorStoreSearchResultItem" - has_more: - type: boolean - description: Indicates if there are more results to fetch. - next_page: + type: type: string - description: The token for the next page, if any. - nullable: true - required: - - object - - search_query - - data - - has_more - - next_page - x-oaiMeta: - name: Vector store search results page - VoiceIdsShared: - example: ash - anyOf: - - type: string - - type: string + description: | + The type of the event. Always `response.cancelled`. enum: - - alloy - - ash - - ballad - - coral - - echo - - fable - - onyx - - nova - - sage - - shimmer - - verse - Wait: + - response.cancelled + x-stainless-const: true + x-oaiMeta: + name: response.cancelled + group: webhook-events + example: | + { + "id": "evt_abc123", + "type": "response.cancelled", + "created_at": 1719168000, + "data": { + "id": "resp_abc123" + } + } + WebhookResponseCompleted: type: object - title: Wait + title: response.completed description: | - A wait action. + Sent when a background response has been completed. + required: + - created_at + - id + - data + - type properties: - type: + created_at: + type: integer + description: > + The Unix timestamp (in seconds) of when the model response was + completed. + id: + type: string + description: | + The unique ID of the event. + data: + type: object + description: | + Event data payload. + required: + - id + properties: + id: + type: string + description: | + The unique ID of the model response. + object: type: string + description: | + The object of the event. Always `event`. enum: - - wait - default: wait + - event + x-stainless-const: true + type: + type: string description: | - Specifies the event type. For a wait action, this property is - always set to `wait`. + The type of the event. Always `response.completed`. + enum: + - response.completed x-stainless-const: true + x-oaiMeta: + name: response.completed + group: webhook-events + example: | + { + "id": "evt_abc123", + "type": "response.completed", + "created_at": 1719168000, + "data": { + "id": "resp_abc123" + } + } + WebhookResponseFailed: + type: object + title: response.failed + description: | + Sent when a background response has failed. required: + - created_at + - id + - data - type - WebSearchContextSize: - type: string - description: > - High level guidance for the amount of context window space to use for - the - - search. One of `low`, `medium`, or `high`. `medium` is the default. - enum: - - low - - medium - - high - default: medium - WebSearchLocation: - type: object - title: Web search location - description: Approximate location parameters for the search. properties: - country: - type: string - description: > - The two-letter - - [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of the - user, - - e.g. `US`. - region: + created_at: + type: integer + description: | + The Unix timestamp (in seconds) of when the model response failed. + id: type: string description: | - Free text input for the region of the user, e.g. `California`. - city: + The unique ID of the event. + data: + type: object + description: | + Event data payload. + required: + - id + properties: + id: + type: string + description: | + The unique ID of the model response. + object: type: string description: | - Free text input for the city of the user, e.g. `San Francisco`. - timezone: + The object of the event. Always `event`. + enum: + - event + x-stainless-const: true + type: type: string - description: > - The [IANA - timezone](https://timeapi.io/documentation/iana-timezones) - - of the user, e.g. `America/Los_Angeles`. - WebSearchToolCall: + description: | + The type of the event. Always `response.failed`. + enum: + - response.failed + x-stainless-const: true + x-oaiMeta: + name: response.failed + group: webhook-events + example: | + { + "id": "evt_abc123", + "type": "response.failed", + "created_at": 1719168000, + "data": { + "id": "resp_abc123" + } + } + WebhookResponseIncomplete: type: object - title: Web search tool call + title: response.incomplete description: | - The results of a web search tool call. See the - [web search guide](/docs/guides/tools-web-search) for more information. + Sent when a background response has been interrupted. + required: + - created_at + - id + - data + - type properties: + created_at: + type: integer + description: > + The Unix timestamp (in seconds) of when the model response was + interrupted. id: type: string description: | - The unique ID of the web search tool call. - type: + The unique ID of the event. + data: + type: object + description: | + Event data payload. + required: + - id + properties: + id: + type: string + description: | + The unique ID of the model response. + object: type: string - enum: - - web_search_call description: | - The type of the web search tool call. Always `web_search_call`. + The object of the event. Always `event`. + enum: + - event x-stainless-const: true - status: + type: type: string description: | - The status of the web search tool call. + The type of the event. Always `response.incomplete`. enum: - - in_progress - - searching - - completed - - failed - required: - - id - - type - - status + - response.incomplete + x-stainless-const: true + x-oaiMeta: + name: response.incomplete + group: webhook-events + example: | + { + "id": "evt_abc123", + "type": "response.incomplete", + "created_at": 1719168000, + "data": { + "id": "resp_abc123" + } + } InputTextContent: properties: type: @@ -38122,6 +43827,45 @@ components: - type title: Input file description: A file input to the model. + FunctionTool: + properties: + type: + type: string + enum: + - function + description: The type of the function tool. Always `function`. + default: function + x-stainless-const: true + name: + type: string + description: The name of the function to call. + description: + anyOf: + - type: string + description: A description of the function. Used by the model to determine + whether or not to call the function. + - type: "null" + parameters: + anyOf: + - additionalProperties: {} + type: object + description: A JSON schema object describing the parameters of the function. + - type: "null" + strict: + anyOf: + - type: boolean + description: Whether to enforce strict parameter validation. Default `true`. + - type: "null" + type: object + required: + - type + - name + - strict + - parameters + title: Function + description: Defines a function in your own code the model can choose to call. + Learn more about [function + calling](https://platform.openai.com/docs/guides/function-calling). RankingOptions: properties: ranker: @@ -38175,45 +43919,6 @@ components: description: A tool that searches for relevant content from uploaded files. Learn more about the [file search tool](https://platform.openai.com/docs/guides/tools-file-search). - FunctionTool: - properties: - type: - type: string - enum: - - function - description: The type of the function tool. Always `function`. - default: function - x-stainless-const: true - name: - type: string - description: The name of the function to call. - description: - anyOf: - - type: string - description: A description of the function. Used by the model to determine - whether or not to call the function. - - type: "null" - parameters: - anyOf: - - additionalProperties: {} - type: object - description: A JSON schema object describing the parameters of the function. - - type: "null" - strict: - anyOf: - - type: boolean - description: Whether to enforce strict parameter validation. Default `true`. - - type: "null" - type: object - required: - - type - - name - - strict - - parameters - title: Function - description: Defines a function in your own code the model can choose to call. - Learn more about [function - calling](https://platform.openai.com/docs/guides/function-calling). ApproximateLocation: properties: type: @@ -38314,14 +44019,6 @@ components: description: A tool that controls a virtual computer. Learn more about the [computer tool](https://platform.openai.com/docs/guides/tools-computer-use). - Tool: - oneOf: - - $ref: "#/components/schemas/FileSearchTool" - - $ref: "#/components/schemas/FunctionTool" - - $ref: "#/components/schemas/WebSearchPreviewTool" - - $ref: "#/components/schemas/ComputerUsePreviewTool" - discriminator: - propertyName: type FileCitationBody: properties: type: @@ -38337,11 +44034,15 @@ components: index: type: integer description: The index of the file in the list of files. + filename: + type: string + description: The filename of the file cited. type: object required: - type - file_id - index + - filename title: File citation description: A citation to a file. UrlCitationBody: @@ -38374,13 +44075,90 @@ components: - title title: URL citation description: A citation for a web resource used to generate a model response. + ContainerFileCitationBody: + properties: + type: + type: string + enum: + - container_file_citation + description: The type of the container file citation. Always + `container_file_citation`. + default: container_file_citation + x-stainless-const: true + container_id: + type: string + description: The ID of the container file. + file_id: + type: string + description: The ID of the file. + start_index: + type: integer + description: The index of the first character of the container file citation in + the message. + end_index: + type: integer + description: The index of the last character of the container file citation in + the message. + filename: + type: string + description: The filename of the container file cited. + type: object + required: + - type + - container_id + - file_id + - start_index + - end_index + - filename + title: Container file citation + description: A citation for a container file used to generate a model response. Annotation: oneOf: - $ref: "#/components/schemas/FileCitationBody" - $ref: "#/components/schemas/UrlCitationBody" + - $ref: "#/components/schemas/ContainerFileCitationBody" - $ref: "#/components/schemas/FilePath" discriminator: propertyName: type + TopLogProb: + properties: + token: + type: string + logprob: + type: number + bytes: + items: + type: integer + type: array + type: object + required: + - token + - logprob + - bytes + title: Top log probability + description: The top log probability of a token. + LogProb: + properties: + token: + type: string + logprob: + type: number + bytes: + items: + type: integer + type: array + top_logprobs: + items: + $ref: "#/components/schemas/TopLogProb" + type: array + type: object + required: + - token + - logprob + - bytes + - top_logprobs + title: Log probability + description: The log probability of a token. OutputTextContent: properties: type: @@ -38398,6 +44176,10 @@ components: $ref: "#/components/schemas/Annotation" type: array description: The annotations of the text output. + logprobs: + items: + $ref: "#/components/schemas/LogProb" + type: array type: object required: - type @@ -38561,6 +44343,8 @@ x-oaiMeta: title: Responses - id: chat title: Chat Completions + - id: webhooks + title: Webhooks - id: realtime title: Realtime beta: true @@ -38568,6 +44352,8 @@ x-oaiMeta: title: Platform APIs - id: vector_stores title: Vector stores + - id: containers + title: Containers - id: assistants title: Assistants beta: true @@ -38623,6 +44409,9 @@ x-oaiMeta: - type: endpoint key: deleteResponse path: delete + - type: endpoint + key: cancelResponse + path: cancel - type: endpoint key: listInputItems path: input-items @@ -38679,9 +44468,6 @@ x-oaiMeta: - type: object key: ResponseTextDeltaEvent path: - - type: object - key: ResponseTextAnnotationDeltaEvent - path: - type: object key: ResponseTextDoneEvent path: @@ -38727,6 +44513,75 @@ x-oaiMeta: - type: object key: ResponseReasoningSummaryTextDoneEvent path: + - type: object + key: ResponseImageGenCallCompletedEvent + path: + - type: object + key: ResponseImageGenCallGeneratingEvent + path: + - type: object + key: ResponseImageGenCallInProgressEvent + path: + - type: object + key: ResponseImageGenCallPartialImageEvent + path: + - type: object + key: ResponseMCPCallArgumentsDeltaEvent + path: + - type: object + key: ResponseMCPCallArgumentsDoneEvent + path: + - type: object + key: ResponseMCPCallCompletedEvent + path: + - type: object + key: ResponseMCPCallFailedEvent + path: + - type: object + key: ResponseMCPCallInProgressEvent + path: + - type: object + key: ResponseMCPListToolsCompletedEvent + path: + - type: object + key: ResponseMCPListToolsFailedEvent + path: + - type: object + key: ResponseMCPListToolsInProgressEvent + path: + - type: object + key: ResponseCodeInterpreterCallInProgressEvent + path: + - type: object + key: ResponseCodeInterpreterCallInterpretingEvent + path: + - type: object + key: ResponseCodeInterpreterCallCompletedEvent + path: + - type: object + key: ResponseCodeInterpreterCallCodeDeltaEvent + path: + - type: object + key: ResponseCodeInterpreterCallCodeDoneEvent + path: + - type: object + key: ResponseOutputTextAnnotationAddedEvent + path: + - type: object + key: ResponseQueuedEvent + path: + - type: object + key: ResponseReasoningDeltaEvent + path: + - type: object + key: ResponseReasoningDoneEvent + path: + - type: object + key: ResponseReasoningSummaryDeltaEvent + path: + - type: object + key: ResponseReasoningSummaryDoneEvent + path: - type: object key: ResponseErrorEvent path: @@ -38788,6 +44643,60 @@ x-oaiMeta: - type: object key: CreateChatCompletionStreamResponse path: streaming + - id: webhook-events + title: Webhook Events + description: > + Webhooks are HTTP requests sent by OpenAI to a URL you specify when + certain + + events happen during the course of API usage. + + + [Learn more about webhooks](/docs/guides/webhooks). + navigationGroup: webhooks + sections: + - type: object + key: WebhookResponseCompleted + path: + - type: object + key: WebhookResponseCancelled + path: + - type: object + key: WebhookResponseFailed + path: + - type: object + key: WebhookResponseIncomplete + path: + - type: object + key: WebhookBatchCompleted + path: + - type: object + key: WebhookBatchCancelled + path: + - type: object + key: WebhookBatchExpired + path: + - type: object + key: WebhookBatchFailed + path: + - type: object + key: WebhookFineTuningJobSucceeded + path: + - type: object + key: WebhookFineTuningJobFailed + path: + - type: object + key: WebhookFineTuningJobCancelled + path: + - type: object + key: WebhookEvalRunSucceeded + path: + - type: object + key: WebhookEvalRunFailed + path: + - type: object + key: WebhookEvalRunCanceled + path: - id: realtime title: Realtime beta: true @@ -38993,6 +44902,12 @@ x-oaiMeta: - type: object key: CreateTranscriptionResponseVerboseJson path: verbose-json-object + - type: object + key: SpeechAudioDeltaEvent + path: speech-audio-delta-event + - type: object + key: SpeechAudioDoneEvent + path: speech-audio-done-event - type: object key: TranscriptTextDeltaEvent path: transcript-text-delta-event @@ -39123,6 +45038,12 @@ x-oaiMeta: - type: endpoint key: cancelFineTuningJob path: cancel + - type: endpoint + key: resumeFineTuningJob + path: resume + - type: endpoint + key: pauseFineTuningJob + path: pause - type: object key: FineTuneChatRequestInput path: chat-input @@ -39130,8 +45051,8 @@ x-oaiMeta: key: FineTunePreferenceRequestInput path: preference-input - type: object - key: FineTuneCompletionRequestInput - path: completions-input + key: FineTuneReinforcementRequestInput + path: reinforcement-input - type: object key: FineTuningJob path: object @@ -39144,6 +45065,38 @@ x-oaiMeta: - type: object key: FineTuningCheckpointPermission path: permission-object + - id: graders + title: Graders + description: | + Manage and run graders in the OpenAI platform. + Related guide: [Graders](/docs/guides/graders) + navigationGroup: endpoints + sections: + - type: object + key: GraderStringCheck + path: string-check + - type: object + key: GraderTextSimilarity + path: text-similarity + - type: object + key: GraderScoreModel + path: score-model + - type: object + key: GraderLabelModel + path: label-model + - type: object + key: GraderPython + path: python + - type: object + key: GraderMulti + path: multi + - type: endpoint + key: runGrader + path: run + - type: endpoint + key: validateGrader + path: validate + beta: true - id: batch title: Batch description: > @@ -39344,6 +45297,51 @@ x-oaiMeta: - type: object key: VectorStoreFileBatchObject path: batch-object + - id: containers + title: Containers + description: | + Create and manage containers for use with the Code Interpreter tool. + navigationGroup: containers + sections: + - type: endpoint + key: CreateContainer + path: createContainers + - type: endpoint + key: ListContainers + path: listContainers + - type: endpoint + key: RetrieveContainer + path: retrieveContainer + - type: endpoint + key: DeleteContainer + path: deleteContainer + - type: object + key: ContainerResource + path: object + - id: container-files + title: Container Files + description: > + Create and manage container files for use with the Code Interpreter tool. + navigationGroup: containers + sections: + - type: endpoint + key: CreateContainerFile + path: createContainerFile + - type: endpoint + key: ListContainerFiles + path: listContainerFiles + - type: endpoint + key: RetrieveContainerFile + path: retrieveContainerFile + - type: endpoint + key: RetrieveContainerFileContent + path: retrieveContainerFileContent + - type: endpoint + key: DeleteContainerFile + path: deleteContainerFile + - type: object + key: ContainerFileResource + path: object - id: assistants title: Assistants beta: true @@ -39703,8 +45701,8 @@ x-oaiMeta: description: > Logs of user actions and configuration changes within this organization. - To log events, you must activate logging in the [Organization - Settings](/settings/organization/general). + To log events, an Organization Owner must activate logging in the [Data + Controls Settings](/settings/organization/data-controls/data-retention). Once activated, for security reasons, logging cannot be deactivated. navigationGroup: administration @@ -39845,4 +45843,4 @@ x-oaiMeta: path: create - type: object key: CreateCompletionResponse - path: object + path: object \ No newline at end of file