Skip to content

Commit 36d9e69

Browse files
Remove noqa: E501 comments and reformat long lines
This commit removes unnecessary '# noqa: E501' comments throughout the codebase and reformats long lines for improved readability. Docstrings, function signatures, and code examples have been wrapped or split across lines where appropriate. No functional changes were made.
1 parent 3596291 commit 36d9e69

File tree

12 files changed

+133
-121
lines changed

12 files changed

+133
-121
lines changed

libs/oci/langchain_oci/chat_models/oci_data_science.py

Lines changed: 25 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ class ChatOCIModelDeployment(BaseChatModel, BaseOCIModelDeployment):
110110
111111
chat = ChatOCIModelDeployment(
112112
endpoint="https://modeldeployment.<region>.oci.customer-oci.com/<ocid>/predict",
113-
model="odsc-llm", # this is the default model name if deployed with AQUA
113+
model="odsc-llm", # default model name if deployed with AQUA
114114
streaming=True,
115115
max_retries=3,
116116
model_kwargs={
@@ -167,9 +167,10 @@ class ChatOCIModelDeployment(BaseChatModel, BaseOCIModelDeployment):
167167
content=' le' id='run-02c6-c43f-42de'
168168
content=' monde' id='run-02c6-c43f-42de'
169169
content='!' id='run-02c6-c43f-42de'
170-
content='' response_metadata={'finish_reason': 'stop'} id='run-02c6-c43f-42de'
170+
content='' response_metadata={'finish_reason': 'stop'} \
171+
id='run-02c6-c43f-42de'
171172
172-
Async:
173+
Async:
173174
.. code-block:: python
174175
175176
await chat.ainvoke(messages)
@@ -199,7 +200,8 @@ class Joke(BaseModel):
199200
200201
structured_llm = chat.with_structured_output(Joke, method="json_mode")
201202
structured_llm.invoke(
202-
"Tell me a joke about cats, respond in JSON with `setup` and `punchline` keys"
203+
"Tell me a joke about cats, respond in JSON with "
204+
"`setup` and `punchline` keys"
203205
)
204206
205207
.. code-block:: python
@@ -218,7 +220,9 @@ class Joke(BaseModel):
218220
.. code-block:: python
219221
220222
class MyChatModel(ChatOCIModelDeployment):
221-
def _process_stream_response(self, response_json: dict) -> ChatGenerationChunk:
223+
def _process_stream_response(
224+
self, response_json: dict
225+
) -> ChatGenerationChunk:
222226
print("My customized streaming result handler.")
223227
return GenerationChunk(...)
224228
@@ -259,7 +263,7 @@ def _construct_json_body(self, messages: list, params: dict) -> dict:
259263
"finish_reason": "stop",
260264
}
261265
262-
""" # noqa: E501
266+
"""
263267

264268
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
265269
"""Keyword arguments to pass to the model."""
@@ -351,13 +355,14 @@ def _generate(
351355
messages = [
352356
(
353357
"system",
354-
"You are a helpful assistant that translates English to French. Translate the user sentence.",
358+
"You are a helpful assistant that translates English to "
359+
"French. Translate the user sentence.",
355360
),
356361
("human", "Hello World!"),
357362
]
358363
359364
response = chat.invoke(messages)
360-
""" # noqa: E501
365+
"""
361366
if self.streaming:
362367
stream_iter = self._stream(
363368
messages, stop=stop, run_manager=run_manager, **kwargs
@@ -404,14 +409,15 @@ def _stream(
404409
messages = [
405410
(
406411
"system",
407-
"You are a helpful assistant that translates English to French. Translate the user sentence.",
412+
"You are a helpful assistant that translates English to "
413+
"French. Translate the user sentence.",
408414
),
409415
("human", "Hello World!"),
410416
]
411417
412418
chunk_iter = chat.stream(messages)
413419
414-
""" # noqa: E501
420+
"""
415421
requests_kwargs = kwargs.pop("requests_kwargs", {})
416422
self.streaming = True
417423
params = self._invocation_params(stop, **kwargs)
@@ -460,14 +466,15 @@ async def _agenerate(
460466
messages = [
461467
(
462468
"system",
463-
"You are a helpful assistant that translates English to French. Translate the user sentence.",
469+
"You are a helpful assistant that translates English to "
470+
"French. Translate the user sentence.",
464471
),
465472
("human", "I love programming."),
466473
]
467474
468475
resp = await chat.ainvoke(messages)
469476
470-
""" # noqa: E501
477+
"""
471478
if self.streaming:
472479
stream_iter = self._astream(
473480
messages, stop=stop, run_manager=run_manager, **kwargs
@@ -517,14 +524,15 @@ async def _astream(
517524
messages = [
518525
(
519526
"system",
520-
"You are a helpful assistant that translates English to French. Translate the user sentence.",
527+
"You are a helpful assistant that translates English to "
528+
"French. Translate the user sentence.",
521529
),
522530
("human", "I love programming."),
523531
]
524532
525533
chunk_iter = await chat.astream(messages)
526534
527-
""" # noqa: E501
535+
"""
528536
requests_kwargs = kwargs.pop("requests_kwargs", {})
529537
self.streaming = True
530538
params = self._invocation_params(stop, **kwargs)
@@ -583,7 +591,7 @@ def with_structured_output(
583591
584592
If schema is a dict then _DictOrPydantic is a dict.
585593
586-
""" # noqa: E501
594+
"""
587595
if kwargs:
588596
raise ValueError(f"Received unsupported arguments {kwargs}")
589597
is_pydantic_schema = _is_pydantic_class(schema)
@@ -792,7 +800,7 @@ class ChatOCIModelDeploymentVLLM(ChatOCIModelDeployment):
792800
# other model parameters...
793801
)
794802
795-
""" # noqa: E501
803+
"""
796804

797805
frequency_penalty: float = 0.0
798806
"""Penalizes repeated tokens according to frequency. Between 0 and 1."""
@@ -959,7 +967,7 @@ class ChatOCIModelDeploymentTGI(ChatOCIModelDeployment):
959967
# other model parameters...
960968
)
961969
962-
""" # noqa: E501
970+
"""
963971

964972
frequency_penalty: Optional[float] = None
965973
"""Penalizes repeated tokens according to frequency. Between 0 and 1."""

0 commit comments

Comments
 (0)