Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix: Improve serialization of completions/responses in Agents SDK instrumentation #845

Closed
wants to merge 2 commits into from
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,26 @@

logger = logging.getLogger(__name__)


# Helper function to safely convert model objects to dictionaries
def model_as_dict(model):
"""Convert a model object to a dictionary safely."""
if isinstance(model, dict):
return model
if hasattr(model, "model_dump"):
return model.model_dump()
elif hasattr(model, "dict"):
return model.dict()
elif hasattr(model, "parse"): # Raw API response
return model_as_dict(model.parse())
else:
# Try to use __dict__ as fallback
try:
return model.__dict__
except:
return model


# Global metrics objects
_agent_run_counter = None
_agent_turn_counter = None
Expand Down Expand Up @@ -184,8 +204,78 @@ def _export_span(self, span: AgentsSpan[Any]) -> None:
if hasattr(span_data, "input") and span_data.input:
attributes[SpanAttributes.LLM_PROMPTS] = str(span_data.input)[:1000] # Truncate long inputs

# Handle output - extract specific fields instead of using str()
if hasattr(span_data, "output") and span_data.output:
attributes[SpanAttributes.LLM_COMPLETIONS] = str(span_data.output)[:1000] # Truncate long outputs
output = span_data.output

# Convert to dict if possible using model_as_dict
try:
output_dict = model_as_dict(output)
except Exception:
# If conversion fails, try to access attributes directly
output_dict = None

if output_dict:
# Extract model
if "model" in output_dict:
attributes[SpanAttributes.LLM_RESPONSE_MODEL] = output_dict["model"]

# Extract ID
if "id" in output_dict:
attributes[SpanAttributes.LLM_RESPONSE_ID] = output_dict["id"]

# Extract system fingerprint (OpenAI specific)
if "system_fingerprint" in output_dict:
attributes[SpanAttributes.LLM_OPENAI_RESPONSE_SYSTEM_FINGERPRINT] = output_dict[
"system_fingerprint"
]

# Handle usage metrics
if "usage" in output_dict and output_dict["usage"]:
usage = output_dict["usage"]
if isinstance(usage, dict):
if "total_tokens" in usage:
attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = usage["total_tokens"]
if "completion_tokens" in usage:
attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = usage["completion_tokens"]
if "prompt_tokens" in usage:
attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = usage["prompt_tokens"]

# Handle completions - extract specific fields from choices
if "choices" in output_dict and output_dict["choices"]:
for choice in output_dict["choices"]:
index = choice.get("index", 0)
prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"

# Extract finish reason
if "finish_reason" in choice:
attributes[f"{prefix}.finish_reason"] = choice["finish_reason"]

# Extract message content
message = choice.get("message", {})
if message:
if "role" in message:
attributes[f"{prefix}.role"] = message["role"]
if "content" in message:
attributes[f"{prefix}.content"] = message["content"]

# Handle function calls if present
if "function_call" in message:
function_call = message["function_call"]
attributes[f"{prefix}.function_call.name"] = function_call.get("name")
attributes[f"{prefix}.function_call.arguments"] = function_call.get("arguments")

# Handle tool calls if present
if "tool_calls" in message:
for i, tool_call in enumerate(message["tool_calls"]):
if "function" in tool_call:
function = tool_call["function"]
attributes[f"{prefix}.tool_calls.{i}.id"] = tool_call.get("id")
attributes[f"{prefix}.tool_calls.{i}.name"] = function.get("name")
attributes[f"{prefix}.tool_calls.{i}.arguments"] = function.get("arguments")
else:
# Fallback to string representation if we couldn't convert to dict
attributes[SpanAttributes.LLM_COMPLETIONS] = str(span_data.output)[:1000]

# Extract model information - check for GenerationSpanData specifically
if span_type == "Generation" and hasattr(span_data, "model") and span_data.model:
Expand Down
Loading