Skip to content

Commit 7b5658f

Browse files
authored
fix: 修复大模型返回json时,解析出错 #656 (#697)
1 parent 1c3ea93 commit 7b5658f

File tree

2 files changed

+12
-9
lines changed

2 files changed

+12
-9
lines changed

apps/application/chat_pipeline/step/chat_step/impl/base_chat_step.py

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,8 @@ def reset_message_list(message_list: List[BaseMessage], answer_text):
143143
def get_stream_result(message_list: List[BaseMessage],
144144
chat_model: BaseChatModel = None,
145145
paragraph_list=None,
146-
no_references_setting=None):
146+
no_references_setting=None,
147+
problem_text=None):
147148
if paragraph_list is None:
148149
paragraph_list = []
149150
directly_return_chunk_list = [AIMessageChunk(content=paragraph.content)
@@ -153,7 +154,8 @@ def get_stream_result(message_list: List[BaseMessage],
153154
return iter(directly_return_chunk_list), False
154155
elif len(paragraph_list) == 0 and no_references_setting.get(
155156
'status') == 'designated_answer':
156-
return iter([AIMessageChunk(content=no_references_setting.get('value'))]), False
157+
return iter(
158+
[AIMessageChunk(content=no_references_setting.get('value').replace('{question}', problem_text))]), False
157159
if chat_model is None:
158160
return iter([AIMessageChunk('抱歉,没有配置 AI 模型,无法优化引用分段,请先去应用中设置 AI 模型。')]), False
159161
else:
@@ -170,7 +172,7 @@ def execute_stream(self, message_list: List[BaseMessage],
170172
client_id=None, client_type=None,
171173
no_references_setting=None):
172174
chat_result, is_ai_chat = self.get_stream_result(message_list, chat_model, paragraph_list,
173-
no_references_setting)
175+
no_references_setting, problem_text)
174176
chat_record_id = uuid.uuid1()
175177
r = StreamingHttpResponse(
176178
streaming_content=event_content(chat_result, chat_id, chat_record_id, paragraph_list,
@@ -185,7 +187,8 @@ def execute_stream(self, message_list: List[BaseMessage],
185187
def get_block_result(message_list: List[BaseMessage],
186188
chat_model: BaseChatModel = None,
187189
paragraph_list=None,
188-
no_references_setting=None):
190+
no_references_setting=None,
191+
problem_text=None):
189192
if paragraph_list is None:
190193
paragraph_list = []
191194

@@ -196,7 +199,7 @@ def get_block_result(message_list: List[BaseMessage],
196199
return directly_return_chunk_list[0], False
197200
elif len(paragraph_list) == 0 and no_references_setting.get(
198201
'status') == 'designated_answer':
199-
return AIMessage(no_references_setting.get('value')), False
202+
return AIMessage(no_references_setting.get('value').replace('{question}', problem_text)), False
200203
if chat_model is None:
201204
return AIMessage('抱歉,没有配置 AI 模型,无法优化引用分段,请先去应用中设置 AI 模型。'), False
202205
else:
@@ -215,7 +218,7 @@ def execute_block(self, message_list: List[BaseMessage],
215218
# 调用模型
216219
try:
217220
chat_result, is_ai_chat = self.get_block_result(message_list, chat_model, paragraph_list,
218-
no_references_setting)
221+
no_references_setting, problem_text)
219222
if is_ai_chat:
220223
request_token = chat_model.get_num_tokens_from_messages(message_list)
221224
response_token = chat_model.get_num_tokens(chat_result.content)

apps/application/chat_pipeline/step/generate_human_message_step/impl/base_generate_human_message_step.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -48,9 +48,9 @@ def to_human_message(prompt: str,
4848
if paragraph_list is None or len(paragraph_list) == 0:
4949
if no_references_setting.get('status') == 'ai_questioning':
5050
return HumanMessage(
51-
content=no_references_setting.get('value').format(**{'question': problem}))
51+
content=no_references_setting.get('value').replace('{question}', problem))
5252
else:
53-
return HumanMessage(content=prompt.format(**{'data': "", 'question': problem}))
53+
return HumanMessage(content=prompt.replace('{data}', "").replace('{question}', problem))
5454
temp_data = ""
5555
data_list = []
5656
for p in paragraph_list:
@@ -63,4 +63,4 @@ def to_human_message(prompt: str,
6363
else:
6464
data_list.append(f"<data>{content}</data>")
6565
data = "\n".join(data_list)
66-
return HumanMessage(content=prompt.format(**{'data': data, 'question': problem}))
66+
return HumanMessage(content=prompt.replace('{data}', data).replace('{question}', problem))

0 commit comments

Comments
 (0)