@@ -78,6 +78,7 @@ class ChatOCIModelDeployment(BaseChatModel, BaseOCIModelDeployment):
7878 .. code-block:: python
7979
8080 import ads
81+
8182 ads.set_auth("resource_principal")
8283
8384 For more details on authentication, see:
@@ -109,7 +110,7 @@ class ChatOCIModelDeployment(BaseChatModel, BaseOCIModelDeployment):
109110
110111 chat = ChatOCIModelDeployment(
111112 endpoint="https://modeldeployment.<region>.oci.customer-oci.com/<ocid>/predict",
112- model="odsc-llm", # this is the default model name if deployed with AQUA
113+ model="odsc-llm", # this is the default model name if deployed with AQUA
113114 streaming=True,
114115 max_retries=3,
115116 model_kwargs={
@@ -135,18 +136,14 @@ class ChatOCIModelDeployment(BaseChatModel, BaseOCIModelDeployment):
135136 .. code-block:: python
136137
137138 AIMessage(
138- content=' Bonjour le monde!' ,
139+ content=" Bonjour le monde!" ,
139140 response_metadata={
140- 'token_usage': {
141- 'prompt_tokens': 40,
142- 'total_tokens': 50,
143- 'completion_tokens': 10
144- },
145- 'model_name': 'odsc-llm',
146- 'system_fingerprint': '',
147- 'finish_reason': 'stop'
141+ "token_usage": {"prompt_tokens": 40, "total_tokens": 50, "completion_tokens": 10},
142+ "model_name": "odsc-llm",
143+ "system_fingerprint": "",
144+ "finish_reason": "stop",
148145 },
149- id=' run-cbed62da-e1b3-4abd-9df3-ec89d69ca012-0'
146+ id=" run-cbed62da-e1b3-4abd-9df3-ec89d69ca012-0",
150147 )
151148
152149 Streaming:
@@ -179,9 +176,9 @@ class ChatOCIModelDeployment(BaseChatModel, BaseOCIModelDeployment):
179176 .. code-block:: python
180177
181178 AIMessage(
182- content=' Bonjour le monde!' ,
183- response_metadata={' finish_reason': ' stop' },
184- id=' run-8657a105-96b7-4bb6-b98e-b69ca420e5d1-0'
179+ content=" Bonjour le monde!" ,
180+ response_metadata={" finish_reason": " stop" },
181+ id=" run-8657a105-96b7-4bb6-b98e-b69ca420e5d1-0",
185182 )
186183
187184 Structured output:
@@ -190,22 +187,18 @@ class ChatOCIModelDeployment(BaseChatModel, BaseOCIModelDeployment):
190187 from typing import Optional
191188 from pydantic import BaseModel, Field
192189
190+
193191 class Joke(BaseModel):
194192 setup: str = Field(description="The setup of the joke")
195193 punchline: str = Field(description="The punchline to the joke")
196194
195+
197196 structured_llm = chat.with_structured_output(Joke, method="json_mode")
198- structured_llm.invoke(
199- "Tell me a joke about cats, "
200- "respond in JSON with `setup` and `punchline` keys"
201- )
197+ structured_llm.invoke("Tell me a joke about cats, respond in JSON with `setup` and `punchline` keys")
202198
203199 .. code-block:: python
204200
205- Joke(
206- setup='Why did the cat get stuck in the tree?',
207- punchline='Because it was chasing its tail!'
208- )
201+ Joke(setup="Why did the cat get stuck in the tree?", punchline="Because it was chasing its tail!")
209202
210203 See ``ChatOCIModelDeployment.with_structured_output()`` for more.
211204
@@ -247,14 +240,10 @@ def _construct_json_body(self, messages: list, params: dict) -> dict:
247240 .. code-block:: python
248241
249242 {
250- 'token_usage': {
251- 'prompt_tokens': 40,
252- 'total_tokens': 50,
253- 'completion_tokens': 10
254- },
255- 'model_name': 'odsc-llm',
256- 'system_fingerprint': '',
257- 'finish_reason': 'stop'
243+ "token_usage": {"prompt_tokens": 40, "total_tokens": 50, "completion_tokens": 10},
244+ "model_name": "odsc-llm",
245+ "system_fingerprint": "",
246+ "finish_reason": "stop",
258247 }
259248
260249 """ # noqa: E501
@@ -275,8 +264,7 @@ def validate_openai(cls, values: Any) -> Any:
275264 """Checks if langchain_openai is installed."""
276265 if not importlib .util .find_spec ("langchain_openai" ):
277266 raise ImportError (
278- "Could not import langchain_openai package. "
279- "Please install it with `pip install langchain_openai`."
267+ "Could not import langchain_openai package. Please install it with `pip install langchain_openai`."
280268 )
281269 return values
282270
@@ -303,9 +291,7 @@ def _default_params(self) -> Dict[str, Any]:
303291 "stream" : self .streaming ,
304292 }
305293
306- def _headers (
307- self , is_async : Optional [bool ] = False , body : Optional [dict ] = None
308- ) -> Dict :
294+ def _headers (self , is_async : Optional [bool ] = False , body : Optional [dict ] = None ) -> Dict :
309295 """Construct and return the headers for a request.
310296
311297 Args:
@@ -357,17 +343,13 @@ def _generate(
357343 response = chat.invoke(messages)
358344 """ # noqa: E501
359345 if self .streaming :
360- stream_iter = self ._stream (
361- messages , stop = stop , run_manager = run_manager , ** kwargs
362- )
346+ stream_iter = self ._stream (messages , stop = stop , run_manager = run_manager , ** kwargs )
363347 return generate_from_stream (stream_iter )
364348
365349 requests_kwargs = kwargs .pop ("requests_kwargs" , {})
366350 params = self ._invocation_params (stop , ** kwargs )
367351 body = self ._construct_json_body (messages , params )
368- res = self .completion_with_retry (
369- data = body , run_manager = run_manager , ** requests_kwargs
370- )
352+ res = self .completion_with_retry (data = body , run_manager = run_manager , ** requests_kwargs )
371353 return self ._process_response (res .json ())
372354
373355 def _stream (
@@ -415,9 +397,7 @@ def _stream(
415397 params = self ._invocation_params (stop , ** kwargs )
416398 body = self ._construct_json_body (messages , params ) # request json body
417399
418- response = self .completion_with_retry (
419- data = body , run_manager = run_manager , stream = True , ** requests_kwargs
420- )
400+ response = self .completion_with_retry (data = body , run_manager = run_manager , stream = True , ** requests_kwargs )
421401 default_chunk_class = AIMessageChunk
422402 for line in self ._parse_stream (response .iter_lines ()):
423403 chunk = self ._handle_sse_line (line , default_chunk_class )
@@ -467,9 +447,7 @@ async def _agenerate(
467447
468448 """ # noqa: E501
469449 if self .streaming :
470- stream_iter = self ._astream (
471- messages , stop = stop , run_manager = run_manager , ** kwargs
472- )
450+ stream_iter = self ._astream (messages , stop = stop , run_manager = run_manager , ** kwargs )
473451 return await agenerate_from_stream (stream_iter )
474452
475453 requests_kwargs = kwargs .pop ("requests_kwargs" , {})
@@ -593,19 +571,14 @@ def with_structured_output(
593571 else JsonOutputParser ()
594572 )
595573 else :
596- raise ValueError (
597- f"Unrecognized method argument. Expected `json_mode`."
598- f"Received: `{ method } `."
599- )
574+ raise ValueError (f"Unrecognized method argument. Expected `json_mode`.Received: `{ method } `." )
600575
601576 if include_raw :
602577 parser_assign = RunnablePassthrough .assign (
603578 parsed = itemgetter ("raw" ) | output_parser , parsing_error = lambda _ : None
604579 )
605580 parser_none = RunnablePassthrough .assign (parsed = lambda _ : None )
606- parser_with_fallback = parser_assign .with_fallbacks (
607- [parser_none ], exception_key = "parsing_error"
608- )
581+ parser_with_fallback = parser_assign .with_fallbacks ([parser_none ], exception_key = "parsing_error" )
609582 return RunnableMap (raw = llm ) | parser_with_fallback
610583 else :
611584 return llm | output_parser
@@ -688,9 +661,7 @@ def _process_stream_response(
688661 if not isinstance (choice , dict ):
689662 raise TypeError ("Endpoint response is not well formed." )
690663 except (KeyError , IndexError , TypeError ) as e :
691- raise ValueError (
692- "Error while formatting response payload for chat model of type"
693- ) from e
664+ raise ValueError ("Error while formatting response payload for chat model of type" ) from e
694665
695666 chunk = _convert_delta_to_message_chunk (choice ["delta" ], default_chunk_cls )
696667 default_chunk_cls = chunk .__class__
@@ -702,9 +673,7 @@ def _process_stream_response(
702673 if usage is not None :
703674 gen_info .update ({"usage" : usage })
704675
705- return ChatGenerationChunk (
706- message = chunk , generation_info = gen_info if gen_info else None
707- )
676+ return ChatGenerationChunk (message = chunk , generation_info = gen_info if gen_info else None )
708677
709678 def _process_response (self , response_json : dict ) -> ChatResult :
710679 """Formats response in OpenAI spec.
@@ -729,9 +698,7 @@ def _process_response(self, response_json: dict) -> ChatResult:
729698 if not isinstance (choices , list ):
730699 raise TypeError ("Endpoint response is not well formed." )
731700 except (KeyError , TypeError ) as e :
732- raise ValueError (
733- "Error while formatting response payload for chat model of type"
734- ) from e
701+ raise ValueError ("Error while formatting response payload for chat model of type" ) from e
735702
736703 for choice in choices :
737704 message = _convert_dict_to_message (choice ["message" ])
0 commit comments