From 2a5a68c7815cac19549b10195b604f27af2a4c20 Mon Sep 17 00:00:00 2001
From: EeyoreLee
Date: Thu, 26 Jun 2025 04:03:19 -0400
Subject: [PATCH] logger support utf8 on DEBUG level
---
src/agents/extensions/models/litellm_model.py | 6 +++---
src/agents/models/openai_chatcompletions.py | 6 +++---
src/agents/models/openai_responses.py | 6 +++---
3 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/src/agents/extensions/models/litellm_model.py b/src/agents/extensions/models/litellm_model.py
index c58a52dae..96f62065e 100644
--- a/src/agents/extensions/models/litellm_model.py
+++ b/src/agents/extensions/models/litellm_model.py
@@ -98,7 +98,7 @@ async def get_response(
logger.debug("Received model response")
else:
logger.debug(
- f"LLM resp:\n{json.dumps(response.choices[0].message.model_dump(), indent=2)}\n"
+ f"LLM resp:\n{json.dumps(response.choices[0].message.model_dump(), indent=2, ensure_ascii=False)}\n"
)
if hasattr(response, "usage"):
@@ -269,8 +269,8 @@ async def _fetch_response(
else:
logger.debug(
f"Calling Litellm model: {self.model}\n"
- f"{json.dumps(converted_messages, indent=2)}\n"
- f"Tools:\n{json.dumps(converted_tools, indent=2)}\n"
+ f"{json.dumps(converted_messages, indent=2, ensure_ascii=False)}\n"
+ f"Tools:\n{json.dumps(converted_tools, indent=2, ensure_ascii=False)}\n"
f"Stream: {stream}\n"
f"Tool choice: {tool_choice}\n"
f"Response format: {response_format}\n"
diff --git a/src/agents/models/openai_chatcompletions.py b/src/agents/models/openai_chatcompletions.py
index 120d726db..6de431b4d 100644
--- a/src/agents/models/openai_chatcompletions.py
+++ b/src/agents/models/openai_chatcompletions.py
@@ -87,7 +87,7 @@ async def get_response(
if message is not None:
logger.debug(
"LLM resp:\n%s\n",
- json.dumps(message.model_dump(), indent=2),
+ json.dumps(message.model_dump(), indent=2, ensure_ascii=False),
)
else:
finish_reason = first_choice.finish_reason if first_choice else "-"
@@ -256,8 +256,8 @@ async def _fetch_response(
logger.debug("Calling LLM")
else:
logger.debug(
- f"{json.dumps(converted_messages, indent=2)}\n"
- f"Tools:\n{json.dumps(converted_tools, indent=2)}\n"
+ f"{json.dumps(converted_messages, indent=2, ensure_ascii=False)}\n"
+ f"Tools:\n{json.dumps(converted_tools, indent=2, ensure_ascii=False)}\n"
f"Stream: {stream}\n"
f"Tool choice: {tool_choice}\n"
f"Response format: {response_format}\n"
diff --git a/src/agents/models/openai_responses.py b/src/agents/models/openai_responses.py
index 637adaccd..1a16b7b77 100644
--- a/src/agents/models/openai_responses.py
+++ b/src/agents/models/openai_responses.py
@@ -96,7 +96,7 @@ async def get_response(
else:
logger.debug(
"LLM resp:\n"
- f"{json.dumps([x.model_dump() for x in response.output], indent=2)}\n"
+ f"{json.dumps([x.model_dump() for x in response.output], indent=2, ensure_ascii=False)}\n"
)
usage = (
@@ -249,8 +249,8 @@ async def _fetch_response(
else:
logger.debug(
f"Calling LLM {self.model} with input:\n"
- f"{json.dumps(list_input, indent=2)}\n"
- f"Tools:\n{json.dumps(converted_tools.tools, indent=2)}\n"
+ f"{json.dumps(list_input, indent=2, ensure_ascii=False)}\n"
+ f"Tools:\n{json.dumps(converted_tools.tools, indent=2, ensure_ascii=False)}\n"
f"Stream: {stream}\n"
f"Tool choice: {tool_choice}\n"
f"Response format: {response_format}\n"