Skip to content

Commit eaa081c

Browse files
committed
move over parse and stream methods out of beta
1 parent 710fe8f commit eaa081c

File tree

3 files changed

+643
-191
lines changed

3 files changed

+643
-191
lines changed

src/openai/resources/beta/chat/completions.py

Lines changed: 85 additions & 171 deletions
Original file line numberDiff line numberDiff line change
@@ -2,32 +2,23 @@
22

33
from __future__ import annotations
44

5-
from typing import Dict, List, Type, Union, Iterable, Optional, cast
6-
from functools import partial
5+
from typing import Dict, List, Union, Iterable, Optional
76
from typing_extensions import Literal
87

98
import httpx
109

1110
from .... import _legacy_response
1211
from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
13-
from ...._utils import maybe_transform, async_maybe_transform
1412
from ...._compat import cached_property
1513
from ...._resource import SyncAPIResource, AsyncAPIResource
1614
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
17-
from ...._streaming import Stream
1815
from ....types.chat import completion_create_params
19-
from ...._base_client import make_request_options
2016
from ....lib._parsing import (
2117
ResponseFormatT,
22-
validate_input_tools as _validate_input_tools,
23-
parse_chat_completion as _parse_chat_completion,
24-
type_to_response_format_param as _type_to_response_format,
2518
)
2619
from ....types.chat_model import ChatModel
2720
from ....lib.streaming.chat import ChatCompletionStreamManager, AsyncChatCompletionStreamManager
2821
from ....types.shared_params import Metadata, ReasoningEffort
29-
from ....types.chat.chat_completion import ChatCompletion
30-
from ....types.chat.chat_completion_chunk import ChatCompletionChunk
3122
from ....types.chat.parsed_chat_completion import ParsedChatCompletion
3223
from ....types.chat.chat_completion_tool_param import ChatCompletionToolParam
3324
from ....types.chat.chat_completion_audio_param import ChatCompletionAudioParam
@@ -126,7 +117,7 @@ class MathResponse(BaseModel):
126117
127118
128119
client = OpenAI()
129-
completion = client.beta.chat.completions.parse(
120+
completion = client.chat.completions.parse(
130121
model="gpt-4o-2024-08-06",
131122
messages=[
132123
{"role": "system", "content": "You are a helpful math tutor."},
@@ -141,69 +132,42 @@ class MathResponse(BaseModel):
141132
print("answer: ", message.parsed.final_answer)
142133
```
143134
"""
144-
_validate_input_tools(tools)
145-
146-
extra_headers = {
147-
"X-Stainless-Helper-Method": "beta.chat.completions.parse",
148-
**(extra_headers or {}),
149-
}
150-
151-
def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseFormatT]:
152-
return _parse_chat_completion(
153-
response_format=response_format,
154-
chat_completion=raw_completion,
155-
input_tools=tools,
156-
)
157-
158-
return self._post(
159-
"/chat/completions",
160-
body=maybe_transform(
161-
{
162-
"messages": messages,
163-
"model": model,
164-
"audio": audio,
165-
"frequency_penalty": frequency_penalty,
166-
"function_call": function_call,
167-
"functions": functions,
168-
"logit_bias": logit_bias,
169-
"logprobs": logprobs,
170-
"max_completion_tokens": max_completion_tokens,
171-
"max_tokens": max_tokens,
172-
"metadata": metadata,
173-
"modalities": modalities,
174-
"n": n,
175-
"parallel_tool_calls": parallel_tool_calls,
176-
"prediction": prediction,
177-
"presence_penalty": presence_penalty,
178-
"reasoning_effort": reasoning_effort,
179-
"response_format": _type_to_response_format(response_format),
180-
"seed": seed,
181-
"service_tier": service_tier,
182-
"stop": stop,
183-
"store": store,
184-
"stream": False,
185-
"stream_options": stream_options,
186-
"temperature": temperature,
187-
"tool_choice": tool_choice,
188-
"tools": tools,
189-
"top_logprobs": top_logprobs,
190-
"top_p": top_p,
191-
"user": user,
192-
"web_search_options": web_search_options,
193-
},
194-
completion_create_params.CompletionCreateParams,
195-
),
196-
options=make_request_options(
197-
extra_headers=extra_headers,
198-
extra_query=extra_query,
199-
extra_body=extra_body,
200-
timeout=timeout,
201-
post_parser=parser,
202-
),
203-
# we turn the `ChatCompletion` instance into a `ParsedChatCompletion`
204-
# in the `parser` function above
205-
cast_to=cast(Type[ParsedChatCompletion[ResponseFormatT]], ChatCompletion),
206-
stream=False,
135+
# Delegate to the regular chat.completions.parse method
136+
return self._client.chat.completions.parse(
137+
messages=messages,
138+
model=model,
139+
audio=audio,
140+
response_format=response_format,
141+
frequency_penalty=frequency_penalty,
142+
function_call=function_call,
143+
functions=functions,
144+
logit_bias=logit_bias,
145+
logprobs=logprobs,
146+
max_completion_tokens=max_completion_tokens,
147+
max_tokens=max_tokens,
148+
metadata=metadata,
149+
modalities=modalities,
150+
n=n,
151+
parallel_tool_calls=parallel_tool_calls,
152+
prediction=prediction,
153+
presence_penalty=presence_penalty,
154+
reasoning_effort=reasoning_effort,
155+
seed=seed,
156+
service_tier=service_tier,
157+
stop=stop,
158+
store=store,
159+
stream_options=stream_options,
160+
temperature=temperature,
161+
tool_choice=tool_choice,
162+
tools=tools,
163+
top_logprobs=top_logprobs,
164+
top_p=top_p,
165+
user=user,
166+
web_search_options=web_search_options,
167+
extra_headers=extra_headers,
168+
extra_query=extra_query,
169+
extra_body=extra_body,
170+
timeout=timeout,
207171
)
208172

209173
def stream(
@@ -254,7 +218,7 @@ def stream(
254218
Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:
255219
256220
```py
257-
with client.beta.chat.completions.stream(
221+
with client.chat.completions.stream(
258222
model="gpt-4o-2024-08-06",
259223
messages=[...],
260224
) as stream:
@@ -268,18 +232,12 @@ def stream(
268232
When the context manager exits, the response will be closed, however the `stream` instance is still available outside
269233
the context manager.
270234
"""
271-
extra_headers = {
272-
"X-Stainless-Helper-Method": "beta.chat.completions.stream",
273-
**(extra_headers or {}),
274-
}
275-
276-
api_request: partial[Stream[ChatCompletionChunk]] = partial(
277-
self._client.chat.completions.create,
235+
# Delegate to the regular chat.completions.stream method
236+
return self._client.chat.completions.stream(
278237
messages=messages,
279238
model=model,
280239
audio=audio,
281-
stream=True,
282-
response_format=_type_to_response_format(response_format),
240+
response_format=response_format,
283241
frequency_penalty=frequency_penalty,
284242
function_call=function_call,
285243
functions=functions,
@@ -296,8 +254,8 @@ def stream(
296254
reasoning_effort=reasoning_effort,
297255
seed=seed,
298256
service_tier=service_tier,
299-
store=store,
300257
stop=stop,
258+
store=store,
301259
stream_options=stream_options,
302260
temperature=temperature,
303261
tool_choice=tool_choice,
@@ -311,11 +269,6 @@ def stream(
311269
extra_body=extra_body,
312270
timeout=timeout,
313271
)
314-
return ChatCompletionStreamManager(
315-
api_request,
316-
response_format=response_format,
317-
input_tools=tools,
318-
)
319272

320273

321274
class AsyncCompletions(AsyncAPIResource):
@@ -405,7 +358,7 @@ class MathResponse(BaseModel):
405358
406359
407360
client = AsyncOpenAI()
408-
completion = await client.beta.chat.completions.parse(
361+
completion = await client.chat.completions.parse(
409362
model="gpt-4o-2024-08-06",
410363
messages=[
411364
{"role": "system", "content": "You are a helpful math tutor."},
@@ -420,69 +373,42 @@ class MathResponse(BaseModel):
420373
print("answer: ", message.parsed.final_answer)
421374
```
422375
"""
423-
_validate_input_tools(tools)
424-
425-
extra_headers = {
426-
"X-Stainless-Helper-Method": "beta.chat.completions.parse",
427-
**(extra_headers or {}),
428-
}
429-
430-
def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseFormatT]:
431-
return _parse_chat_completion(
432-
response_format=response_format,
433-
chat_completion=raw_completion,
434-
input_tools=tools,
435-
)
436-
437-
return await self._post(
438-
"/chat/completions",
439-
body=await async_maybe_transform(
440-
{
441-
"messages": messages,
442-
"model": model,
443-
"audio": audio,
444-
"frequency_penalty": frequency_penalty,
445-
"function_call": function_call,
446-
"functions": functions,
447-
"logit_bias": logit_bias,
448-
"logprobs": logprobs,
449-
"max_completion_tokens": max_completion_tokens,
450-
"max_tokens": max_tokens,
451-
"metadata": metadata,
452-
"modalities": modalities,
453-
"n": n,
454-
"parallel_tool_calls": parallel_tool_calls,
455-
"prediction": prediction,
456-
"presence_penalty": presence_penalty,
457-
"reasoning_effort": reasoning_effort,
458-
"response_format": _type_to_response_format(response_format),
459-
"seed": seed,
460-
"service_tier": service_tier,
461-
"store": store,
462-
"stop": stop,
463-
"stream": False,
464-
"stream_options": stream_options,
465-
"temperature": temperature,
466-
"tool_choice": tool_choice,
467-
"tools": tools,
468-
"top_logprobs": top_logprobs,
469-
"top_p": top_p,
470-
"user": user,
471-
"web_search_options": web_search_options,
472-
},
473-
completion_create_params.CompletionCreateParams,
474-
),
475-
options=make_request_options(
476-
extra_headers=extra_headers,
477-
extra_query=extra_query,
478-
extra_body=extra_body,
479-
timeout=timeout,
480-
post_parser=parser,
481-
),
482-
# we turn the `ChatCompletion` instance into a `ParsedChatCompletion`
483-
# in the `parser` function above
484-
cast_to=cast(Type[ParsedChatCompletion[ResponseFormatT]], ChatCompletion),
485-
stream=False,
376+
# Delegate to the regular chat.completions.parse method
377+
return await self._client.chat.completions.parse(
378+
messages=messages,
379+
model=model,
380+
audio=audio,
381+
response_format=response_format,
382+
frequency_penalty=frequency_penalty,
383+
function_call=function_call,
384+
functions=functions,
385+
logit_bias=logit_bias,
386+
logprobs=logprobs,
387+
max_completion_tokens=max_completion_tokens,
388+
max_tokens=max_tokens,
389+
metadata=metadata,
390+
modalities=modalities,
391+
n=n,
392+
parallel_tool_calls=parallel_tool_calls,
393+
prediction=prediction,
394+
presence_penalty=presence_penalty,
395+
reasoning_effort=reasoning_effort,
396+
seed=seed,
397+
service_tier=service_tier,
398+
stop=stop,
399+
store=store,
400+
stream_options=stream_options,
401+
temperature=temperature,
402+
tool_choice=tool_choice,
403+
tools=tools,
404+
top_logprobs=top_logprobs,
405+
top_p=top_p,
406+
user=user,
407+
web_search_options=web_search_options,
408+
extra_headers=extra_headers,
409+
extra_query=extra_query,
410+
extra_body=extra_body,
411+
timeout=timeout,
486412
)
487413

488414
def stream(
@@ -533,7 +459,7 @@ def stream(
533459
Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:
534460
535461
```py
536-
async with client.beta.chat.completions.stream(
462+
async with client.chat.completions.stream(
537463
model="gpt-4o-2024-08-06",
538464
messages=[...],
539465
) as stream:
@@ -547,19 +473,12 @@ def stream(
547473
When the context manager exits, the response will be closed, however the `stream` instance is still available outside
548474
the context manager.
549475
"""
550-
_validate_input_tools(tools)
551-
552-
extra_headers = {
553-
"X-Stainless-Helper-Method": "beta.chat.completions.stream",
554-
**(extra_headers or {}),
555-
}
556-
557-
api_request = self._client.chat.completions.create(
476+
# Delegate to the regular chat.completions.stream method
477+
return self._client.chat.completions.stream(
558478
messages=messages,
559479
model=model,
560480
audio=audio,
561-
stream=True,
562-
response_format=_type_to_response_format(response_format),
481+
response_format=response_format,
563482
frequency_penalty=frequency_penalty,
564483
function_call=function_call,
565484
functions=functions,
@@ -585,16 +504,11 @@ def stream(
585504
top_logprobs=top_logprobs,
586505
top_p=top_p,
587506
user=user,
507+
web_search_options=web_search_options,
588508
extra_headers=extra_headers,
589509
extra_query=extra_query,
590510
extra_body=extra_body,
591511
timeout=timeout,
592-
web_search_options=web_search_options,
593-
)
594-
return AsyncChatCompletionStreamManager(
595-
api_request,
596-
response_format=response_format,
597-
input_tools=tools,
598512
)
599513

600514

0 commit comments

Comments
 (0)