Skip to content

Commit 73c71cb

Browse files
authored
Sync updates from Spec (#171)
* updates to doc comments and types * deprecated * update ChatCompletionFunctions to FunctionObject * More type updates * add logprobs field * update from spec * updated spec * fixes suggested by cargo clippy
1 parent b5c83c0 commit 73c71cb

File tree

18 files changed

+698
-457
lines changed

18 files changed

+698
-457
lines changed

async-openai/src/chat.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ use crate::{
99

1010
/// Given a list of messages comprising a conversation, the model will return a response.
1111
///
12-
/// Related guide: [Chat completions](https://platform.openai.com/docs/guides/gpt)
12+
/// Related guide: [Chat completions](https://platform.openai.com//docs/guides/text-generation)
1313
pub struct Chat<'c, C: Config> {
1414
client: &'c Client<C>,
1515
}

async-openai/src/client.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -388,7 +388,7 @@ where
388388
}
389389

390390
let response = match serde_json::from_str::<O>(&message.data) {
391-
Err(e) => Err(map_deserialization_error(e, &message.data.as_bytes())),
391+
Err(e) => Err(map_deserialization_error(e, message.data.as_bytes())),
392392
Ok(output) => Ok(output),
393393
};
394394

async-openai/src/download.rs

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -42,18 +42,12 @@ pub(crate) async fn download_url<P: AsRef<Path>>(
4242

4343
tokio::fs::create_dir_all(dir.as_path())
4444
.await
45-
.map_err(|e| {
46-
OpenAIError::FileSaveError(format!("{}, dir: {}", e.to_string(), dir.display()))
47-
})?;
45+
.map_err(|e| OpenAIError::FileSaveError(format!("{}, dir: {}", e, dir.display())))?;
4846

4947
tokio::fs::write(
5048
file_path.as_path(),
5149
response.bytes().await.map_err(|e| {
52-
OpenAIError::FileSaveError(format!(
53-
"{}, file path: {}",
54-
e.to_string(),
55-
file_path.display()
56-
))
50+
OpenAIError::FileSaveError(format!("{}, file path: {}", e, file_path.display()))
5751
})?,
5852
)
5953
.await
@@ -80,9 +74,7 @@ pub(crate) async fn save_b64<P: AsRef<Path>>(b64: &str, dir: P) -> Result<PathBu
8074
.map_err(|e| OpenAIError::FileSaveError(e.to_string()))?,
8175
)
8276
.await
83-
.map_err(|e| {
84-
OpenAIError::FileSaveError(format!("{}, path: {}", e.to_string(), path.display()))
85-
})?;
77+
.map_err(|e| OpenAIError::FileSaveError(format!("{}, path: {}", e, path.display())))?;
8678

8779
Ok(path)
8880
}

async-openai/src/error.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ pub(crate) struct WrappedError {
4545
pub(crate) fn map_deserialization_error(e: serde_json::Error, bytes: &[u8]) -> OpenAIError {
4646
tracing::error!(
4747
"failed deserialization of: {}",
48-
String::from_utf8_lossy(bytes.as_ref())
48+
String::from_utf8_lossy(bytes)
4949
);
5050
OpenAIError::JSONDeserialize(e)
5151
}

async-openai/src/file.rs

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,11 @@ impl<'c, C: Config> Files<'c, C> {
1515
Self { client }
1616
}
1717

18-
/// Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit.
18+
/// Upload a file that can be used across various endpoints. The size of all the files uploaded by one organization can be up to 100 GB.
19+
///
20+
/// The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See the [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) to learn more about the types of files supported. The Fine-tuning API only supports `.jsonl` files.
21+
///
22+
/// Please [contact us](https://help.openai.com/) if you need to increase these storage limits.
1923
pub async fn create(&self, request: CreateFileRequest) -> Result<OpenAIFile, OpenAIError> {
2024
self.client.post_form("/files", request).await
2125
}

async-openai/src/types/assistant.rs

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,9 @@ use std::collections::HashMap;
33
use derive_builder::Builder;
44
use serde::{Deserialize, Serialize};
55

6-
use crate::{error::OpenAIError, types::ChatCompletionFunctions};
6+
use crate::error::OpenAIError;
7+
8+
use super::FunctionObject;
79

810
/// Represents an `assistant` that can call the model and use tools.
911
#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
@@ -46,7 +48,7 @@ pub struct AssistantToolsRetrieval {
4648
#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
4749
pub struct AssistantToolsFunction {
4850
pub r#type: String,
49-
pub function: ChatCompletionFunctions,
51+
pub function: FunctionObject,
5052
}
5153

5254
#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
@@ -106,6 +108,7 @@ pub struct ModifyAssistantRequest {
106108
#[serde(skip_serializing_if = "Option::is_none")]
107109
pub tools: Option<Vec<AssistantTools>>,
108110

111+
/// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previously attached to the list but does not show up in the list, it will be deleted from the assistant.
109112
#[serde(skip_serializing_if = "Option::is_none")]
110113
pub file_ids: Option<Vec<String>>,
111114

async-openai/src/types/assistant_impls.rs

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
11
use crate::types::ChatCompletionFunctions;
22

3-
use super::{AssistantTools, AssistantToolsCode, AssistantToolsFunction, AssistantToolsRetrieval};
3+
use super::{
4+
AssistantTools, AssistantToolsCode, AssistantToolsFunction, AssistantToolsRetrieval,
5+
FunctionObject,
6+
};
47

58
impl From<AssistantToolsCode> for AssistantTools {
69
fn from(value: AssistantToolsCode) -> Self {
@@ -45,17 +48,17 @@ impl Default for AssistantToolsFunction {
4548
}
4649
}
4750

48-
impl From<ChatCompletionFunctions> for AssistantToolsFunction {
49-
fn from(value: ChatCompletionFunctions) -> Self {
51+
impl From<FunctionObject> for AssistantToolsFunction {
52+
fn from(value: FunctionObject) -> Self {
5053
Self {
5154
r#type: "function".into(),
5255
function: value,
5356
}
5457
}
5558
}
5659

57-
impl From<ChatCompletionFunctions> for AssistantTools {
58-
fn from(value: ChatCompletionFunctions) -> Self {
60+
impl From<FunctionObject> for AssistantTools {
61+
fn from(value: FunctionObject) -> Self {
5962
Self::Function(value.into())
6063
}
6164
}

async-openai/src/types/audio.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ pub struct CreateSpeechRequest {
101101
/// One of the available [TTS models](https://platform.openai.com/docs/models/tts): `tts-1` or `tts-1-hd`
102102
pub model: SpeechModel,
103103

104-
/// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`.
104+
/// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options).
105105
pub voice: Voice,
106106

107107
/// The format to audio in. Supported formats are mp3, opus, aac, and flac.

async-openai/src/types/chat.rs

Lines changed: 74 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ pub struct CompletionUsage {
103103
#[builder(build_fn(error = "OpenAIError"))]
104104
pub struct ChatCompletionRequestSystemMessage {
105105
/// The contents of the system message.
106-
pub content: Option<String>,
106+
pub content: String,
107107
/// The role of the messages author, in this case `system`.
108108
#[builder(default = "Role::System")]
109109
pub role: Role,
@@ -142,7 +142,7 @@ pub enum ImageUrlDetail {
142142
pub struct ImageUrl {
143143
/// Either a URL of the image or the base64 encoded image data.
144144
pub url: String,
145-
/// Specifies the detail level of the image.
145+
/// Specifies the detail level of the image. Learn more in the [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding).
146146
pub detail: ImageUrlDetail,
147147
}
148148

@@ -184,7 +184,7 @@ pub enum ChatCompletionRequestUserMessageContent {
184184
#[builder(build_fn(error = "OpenAIError"))]
185185
pub struct ChatCompletionRequestUserMessage {
186186
/// The contents of the user message.
187-
pub content: Option<ChatCompletionRequestUserMessageContent>,
187+
pub content: ChatCompletionRequestUserMessageContent,
188188
/// The role of the messages author, in this case `user`.
189189
#[builder(default = "Role::User")]
190190
pub role: Role,
@@ -228,7 +228,7 @@ pub struct ChatCompletionRequestToolMessage {
228228
#[builder(default = "Role::Tool")]
229229
pub role: Role,
230230
/// The contents of the tool message.
231-
pub content: Option<String>,
231+
pub content: String,
232232
pub tool_call_id: String,
233233
}
234234

@@ -292,22 +292,38 @@ pub struct ChatCompletionResponseMessage {
292292
#[builder(setter(into, strip_option), default)]
293293
#[builder(derive(Debug))]
294294
#[builder(build_fn(error = "OpenAIError"))]
295+
#[deprecated]
295296
pub struct ChatCompletionFunctions {
296297
/// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
297298
pub name: String,
298299
/// A description of what the function does, used by the model to choose when and how to call the function.
299300
#[serde(skip_serializing_if = "Option::is_none")]
300301
pub description: Option<String>,
301-
/// The parameters the functions accepts, described as a JSON Schema object.
302-
/// See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for examples,
303-
/// and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
304-
/// documentation about the format.
302+
/// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.
305303
///
306-
/// To describe a function that accepts no parameters, provide the
307-
/// value `{\"type\": \"object\", \"properties\": {}}`.
304+
/// Omitting `parameters` defines a function with an empty parameter list.
308305
pub parameters: serde_json::Value,
309306
}
310307

308+
#[derive(Clone, Serialize, Default, Debug, Deserialize, Builder, PartialEq)]
309+
#[builder(name = "FunctionObjectArgs")]
310+
#[builder(pattern = "mutable")]
311+
#[builder(setter(into, strip_option), default)]
312+
#[builder(derive(Debug))]
313+
#[builder(build_fn(error = "OpenAIError"))]
314+
pub struct FunctionObject {
315+
/// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
316+
pub name: String,
317+
/// A description of what the function does, used by the model to choose when and how to call the function.
318+
#[serde(skip_serializing_if = "Option::is_none")]
319+
pub description: Option<String>,
320+
/// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.
321+
///
322+
/// Omitting `parameters` defines a function with an empty parameter list.
323+
#[serde(skip_serializing_if = "Option::is_none")]
324+
pub parameters: Option<serde_json::Value>,
325+
}
326+
311327
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
312328
#[serde(rename_all = "snake_case")]
313329
pub enum ChatCompletionResponseFormatType {
@@ -344,7 +360,7 @@ pub enum ChatCompletionToolType {
344360
pub struct ChatCompletionTool {
345361
#[builder(default = "ChatCompletionToolType::Function")]
346362
pub r#type: ChatCompletionToolType,
347-
pub function: ChatCompletionFunctions,
363+
pub function: FunctionObject,
348364
}
349365

350366
#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)]
@@ -407,13 +423,21 @@ pub struct CreateChatCompletionRequest {
407423
#[serde(skip_serializing_if = "Option::is_none")]
408424
pub logit_bias: Option<HashMap<String, serde_json::Value>>, // default: null
409425

410-
/// The maximum number of [tokens](https://platform.openai.com/tokenizer) to generate in the chat completion.
426+
/// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. This option is currently not available on the `gpt-4-vision-preview` model.
427+
#[serde(skip_serializing_if = "Option::is_none")]
428+
pub logprobs: Option<bool>,
429+
430+
/// An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used.
431+
#[serde(skip_serializing_if = "Option::is_none")]
432+
pub top_logprobs: Option<u8>,
433+
434+
/// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion.
411435
///
412-
/// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
436+
/// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.
413437
#[serde(skip_serializing_if = "Option::is_none")]
414438
pub max_tokens: Option<u16>,
415439

416-
/// How many chat completion choices to generate for each input message.
440+
/// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.
417441
#[serde(skip_serializing_if = "Option::is_none")]
418442
pub n: Option<u8>, // min:1, max: 128, default: 1
419443

@@ -423,11 +447,11 @@ pub struct CreateChatCompletionRequest {
423447
#[serde(skip_serializing_if = "Option::is_none")]
424448
pub presence_penalty: Option<f32>, // min: -2.0, max: 2.0, default 0
425449

426-
/// An object specifying the format that the model must output.
450+
/// An object specifying the format that the model must output. Compatible with `gpt-4-1106-preview` and `gpt-3.5-turbo-1106`.
427451
///
428452
/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
429453
///
430-
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in increased latency and appearance of a "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.
454+
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.
431455
#[serde(skip_serializing_if = "Option::is_none")]
432456
pub response_format: Option<ChatCompletionResponseFormat>,
433457

@@ -500,6 +524,34 @@ pub enum FinishReason {
500524
FunctionCall,
501525
}
502526

527+
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
528+
pub struct TopLogprobs {
529+
/// The token.
530+
pub token: String,
531+
/// The log probability of this token.
532+
pub logprob: f32,
533+
/// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token.
534+
pub bytes: Option<Vec<u8>>,
535+
}
536+
537+
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
538+
pub struct ChatCompletionTokenLogprob {
539+
/// The token.
540+
pub token: String,
541+
/// The log probability of this token.
542+
pub logprob: f32,
543+
/// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token.
544+
pub bytes: Option<Vec<u8>>,
545+
/// List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned.
546+
pub top_logprobs: Vec<TopLogprobs>,
547+
}
548+
549+
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
550+
pub struct ChatChoiceLogprobs {
551+
/// A list of message content tokens with log probability information.
552+
pub content: Option<Vec<ChatCompletionTokenLogprob>>,
553+
}
554+
503555
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
504556
pub struct ChatChoice {
505557
/// The index of the choice in the list of choices.
@@ -510,6 +562,8 @@ pub struct ChatChoice {
510562
/// `content_filter` if content was omitted due to a flag from our content filters,
511563
/// `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.
512564
pub finish_reason: Option<FinishReason>,
565+
/// Log probability information for the choice.
566+
pub logprobs: Option<ChatChoiceLogprobs>,
513567
}
514568

515569
/// Represents a chat completion response returned by model, based on the provided input.
@@ -573,11 +627,13 @@ pub struct ChatCompletionStreamResponseDelta {
573627
}
574628

575629
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
576-
pub struct ChatCompletionResponseStreamMessage {
630+
pub struct ChatChoiceStream {
577631
/// The index of the choice in the list of choices.
578632
pub index: u32,
579633
pub delta: ChatCompletionStreamResponseDelta,
580634
pub finish_reason: Option<FinishReason>,
635+
/// Log probability information for the choice.
636+
pub logprobs: Option<ChatChoiceLogprobs>,
581637
}
582638

583639
#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)]
@@ -586,7 +642,7 @@ pub struct CreateChatCompletionStreamResponse {
586642
/// A unique identifier for the chat completion. Each chunk has the same ID.
587643
pub id: String,
588644
/// A list of chat completion choices. Can be more than one if `n` is greater than 1.
589-
pub choices: Vec<ChatCompletionResponseStreamMessage>,
645+
pub choices: Vec<ChatChoiceStream>,
590646

591647
/// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp.
592648
pub created: u32,

0 commit comments

Comments
 (0)