Skip to content

Commit e7da0a0

Browse files
fix(graphgen): change model name
1 parent f6cebdb commit e7da0a0

File tree

2 files changed

+7
-7
lines changed

2 files changed

+7
-7
lines changed

graphgen/operators/judge.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,15 +7,15 @@
77

88

99
async def judge_statement( # pylint: disable=too-many-statements
10-
student_llm_client: OpenAIModel,
10+
training_llm_client: OpenAIModel,
1111
graph_storage: NetworkXStorage,
1212
rephrase_storage: JsonKVStorage,
1313
re_judge: bool = False,
1414
max_concurrent: int = 1000) -> NetworkXStorage:
1515
"""
1616
Get all edges and nodes and judge them
1717
18-
:param student_llm_client: judge the statements to get comprehension loss
18+
:param training_llm_client: judge the statements to get comprehension loss
1919
:param graph_storage: graph storage instance
2020
:param rephrase_storage: rephrase storage instance
2121
:param re_judge: re-judge the relations
@@ -46,7 +46,7 @@ async def _judge_single_relation(
4646
judgements = []
4747
gts = [gt for _, gt in descriptions]
4848
for description, gt in descriptions:
49-
judgement = await student_llm_client.generate_topk_per_token(
49+
judgement = await training_llm_client.generate_topk_per_token(
5050
STATEMENT_JUDGEMENT_PROMPT['TEMPLATE'].format(statement=description)
5151
)
5252
judgements.append(judgement[0].top_candidates)
@@ -94,7 +94,7 @@ async def _judge_single_entity(
9494
judgements = []
9595
gts = [gt for _, gt in descriptions]
9696
for description, gt in descriptions:
97-
judgement = await student_llm_client.generate_topk_per_token(
97+
judgement = await training_llm_client.generate_topk_per_token(
9898
STATEMENT_JUDGEMENT_PROMPT['TEMPLATE'].format(statement=description)
9999
)
100100
judgements.append(judgement[0].top_candidates)

graphgen/operators/quiz.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,15 +8,15 @@
88

99

1010
async def quiz(
11-
teacher_llm_client: OpenAIModel,
11+
synth_llm_client: OpenAIModel,
1212
graph_storage: NetworkXStorage,
1313
rephrase_storage: JsonKVStorage,
1414
max_samples: int = 1,
1515
max_concurrent: int = 1000) -> JsonKVStorage:
1616
"""
1717
Get all edges and quiz them
1818
19-
:param teacher_llm_client: generate statements
19+
:param synth_llm_client: generate statements
2020
:param graph_storage: graph storage instance
2121
:param rephrase_storage: rephrase storage instance
2222
:param max_samples: max samples for each edge
@@ -38,7 +38,7 @@ async def _process_single_quiz(
3838
if descriptions:
3939
return None
4040

41-
new_description = await teacher_llm_client.generate_answer(
41+
new_description = await synth_llm_client.generate_answer(
4242
prompt,
4343
temperature=1
4444
)

0 commit comments

Comments
 (0)