-
Notifications
You must be signed in to change notification settings - Fork 8
Expand file tree
/
Copy pathtest_llm_payload_integration.py
More file actions
108 lines (85 loc) · 3.82 KB
/
test_llm_payload_integration.py
File metadata and controls
108 lines (85 loc) · 3.82 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
"""Integration: verify _call_with_* functions pass correct params to SyncLLMClient.
Post llm-compat migration, provider translation (reasoning_effort -> thinking params)
is handled by llm-compat internally. These tests verify the project code passes the
right model, messages, reasoning_effort, and response_format to SyncLLMClient.chat().
"""
from dataclasses import dataclass
from unittest.mock import patch, MagicMock
import pytest
@dataclass
class _FakeChatResult:
content: str = ""
fallback_from: str = None
model: str = "test"
def __str__(self):
return self.content
class TestTextOutputPassesCorrectParams:
@patch("video_transcript_api.llm.llm.get_sync_client")
def test_passes_reasoning_effort(self, mock_get_client):
from video_transcript_api.llm.llm import _call_with_text_output
mock_client = MagicMock()
mock_client.chat.return_value = _FakeChatResult(content="hello")
mock_get_client.return_value = mock_client
_call_with_text_output(
model="deepseek-v4-flash",
prompt="hi",
system_prompt="sys",
reasoning_effort="disabled",
task_type="test",
)
call_kwargs = mock_client.chat.call_args
assert call_kwargs.kwargs["reasoning_effort"] == "disabled"
assert call_kwargs.args[0] == "deepseek-v4-flash"
@patch("video_transcript_api.llm.llm.get_sync_client")
def test_none_effort_passed_as_none(self, mock_get_client):
from video_transcript_api.llm.llm import _call_with_text_output
mock_client = MagicMock()
mock_client.chat.return_value = _FakeChatResult(content="hi")
mock_get_client.return_value = mock_client
_call_with_text_output(
model="deepseek-v4-flash",
prompt="hi",
system_prompt="sys",
reasoning_effort=None,
task_type="test",
)
call_kwargs = mock_client.chat.call_args
assert call_kwargs.kwargs["reasoning_effort"] is None
class TestJsonSchemaPassesResponseFormat:
@patch("video_transcript_api.llm.llm.get_sync_client")
def test_response_format_passed(self, mock_get_client):
from video_transcript_api.llm.llm import _call_with_json_schema_mode
mock_client = MagicMock()
mock_client.chat.return_value = _FakeChatResult(content='{"ok": true}')
mock_get_client.return_value = mock_client
_call_with_json_schema_mode(
model="deepseek-v4-flash",
prompt="hi",
schema={"type": "object", "properties": {"ok": {"type": "boolean"}}, "required": ["ok"]},
system_prompt="sys",
reasoning_effort="disabled",
task_type="test",
)
call_kwargs = mock_client.chat.call_args
rf = call_kwargs.kwargs["response_format"]
assert rf["type"] == "json_schema"
assert rf["json_schema"]["strict"] is True
class TestJsonObjectPassesResponseFormat:
@patch("video_transcript_api.llm.llm.get_sync_client")
def test_response_format_json_object_passed(self, mock_get_client):
from video_transcript_api.llm.llm import _call_with_json_object_mode
mock_client = MagicMock()
mock_client.chat.return_value = _FakeChatResult(content='{"ok": true}')
mock_get_client.return_value = mock_client
_call_with_json_object_mode(
model="deepseek-v4-flash",
prompt="hi",
schema={"type": "object", "properties": {"ok": {"type": "boolean"}}, "required": ["ok"]},
config={"llm": {"json_output": {"max_retries": 0}}},
system_prompt="sys",
reasoning_effort="disabled",
task_type="test",
)
call_kwargs = mock_client.chat.call_args
rf = call_kwargs.kwargs["response_format"]
assert rf["type"] == "json_object"