51
51
"presence_penalty" : SPANDATA .GEN_AI_REQUEST_PRESENCE_PENALTY ,
52
52
"temperature" : SPANDATA .GEN_AI_REQUEST_TEMPERATURE ,
53
53
"tool_calls" : SPANDATA .GEN_AI_RESPONSE_TOOL_CALLS ,
54
- "tools" : SPANDATA .GEN_AI_REQUEST_AVAILABLE_TOOLS ,
55
54
"top_k" : SPANDATA .GEN_AI_REQUEST_TOP_K ,
56
55
"top_p" : SPANDATA .GEN_AI_REQUEST_TOP_P ,
57
56
}
@@ -203,8 +202,12 @@ def on_llm_start(
203
202
if key in all_params and all_params [key ] is not None :
204
203
set_data_normalized (span , attribute , all_params [key ], unpack = False )
205
204
205
+ _set_tools_on_span (span , all_params .get ("tools" ))
206
+
206
207
if should_send_default_pii () and self .include_prompts :
207
- set_data_normalized (span , SPANDATA .GEN_AI_REQUEST_MESSAGES , prompts )
208
+ set_data_normalized (
209
+ span , SPANDATA .GEN_AI_REQUEST_MESSAGES , prompts , unpack = False
210
+ )
208
211
209
212
def on_chat_model_start (self , serialized , messages , * , run_id , ** kwargs ):
210
213
# type: (SentryLangchainCallback, Dict[str, Any], List[List[BaseMessage]], UUID, Any) -> Any
@@ -246,14 +249,20 @@ def on_chat_model_start(self, serialized, messages, *, run_id, **kwargs):
246
249
if key in all_params and all_params [key ] is not None :
247
250
set_data_normalized (span , attribute , all_params [key ], unpack = False )
248
251
252
+ _set_tools_on_span (span , all_params .get ("tools" ))
253
+
249
254
if should_send_default_pii () and self .include_prompts :
255
+ normalized_messages = []
256
+ for list_ in messages :
257
+ for message in list_ :
258
+ normalized_messages .append (
259
+ self ._normalize_langchain_message (message )
260
+ )
250
261
set_data_normalized (
251
262
span ,
252
263
SPANDATA .GEN_AI_REQUEST_MESSAGES ,
253
- [
254
- [self ._normalize_langchain_message (x ) for x in list_ ]
255
- for list_ in messages
256
- ],
264
+ normalized_messages ,
265
+ unpack = False ,
257
266
)
258
267
259
268
def on_chat_model_end (self , response , * , run_id , ** kwargs ):
@@ -351,9 +360,7 @@ def on_agent_finish(self, finish, *, run_id, **kwargs):
351
360
352
361
if should_send_default_pii () and self .include_prompts :
353
362
set_data_normalized (
354
- span ,
355
- SPANDATA .GEN_AI_RESPONSE_TEXT ,
356
- finish .return_values .items (),
363
+ span , SPANDATA .GEN_AI_RESPONSE_TEXT , finish .return_values .items ()
357
364
)
358
365
359
366
self ._exit_span (span_data , run_id )
@@ -473,13 +480,11 @@ def _get_token_usage(obj):
473
480
if usage is not None :
474
481
return usage
475
482
476
- # check for usage in the object itself
477
483
for name in possible_names :
478
484
usage = _get_value (obj , name )
479
485
if usage is not None :
480
486
return usage
481
487
482
- # no usage found anywhere
483
488
return None
484
489
485
490
@@ -531,6 +536,87 @@ def _get_request_data(obj, args, kwargs):
531
536
return (agent_name , tools )
532
537
533
538
539
+ def _simplify_langchain_tools (tools ):
540
+ # type: (Any) -> Optional[List[Any]]
541
+ """Parse and simplify tools into a cleaner format."""
542
+ if not tools :
543
+ return None
544
+
545
+ if not isinstance (tools , (list , tuple )):
546
+ return None
547
+
548
+ simplified_tools = []
549
+ for tool in tools :
550
+ try :
551
+ if isinstance (tool , dict ):
552
+
553
+ if "function" in tool and isinstance (tool ["function" ], dict ):
554
+ func = tool ["function" ]
555
+ simplified_tool = {
556
+ "name" : func .get ("name" ),
557
+ "description" : func .get ("description" ),
558
+ }
559
+ if simplified_tool ["name" ]:
560
+ simplified_tools .append (simplified_tool )
561
+ elif "name" in tool :
562
+ simplified_tool = {
563
+ "name" : tool .get ("name" ),
564
+ "description" : tool .get ("description" ),
565
+ }
566
+ simplified_tools .append (simplified_tool )
567
+ else :
568
+ name = (
569
+ tool .get ("name" )
570
+ or tool .get ("tool_name" )
571
+ or tool .get ("function_name" )
572
+ )
573
+ if name :
574
+ simplified_tools .append (
575
+ {
576
+ "name" : name ,
577
+ "description" : tool .get ("description" )
578
+ or tool .get ("desc" ),
579
+ }
580
+ )
581
+ elif hasattr (tool , "name" ):
582
+ simplified_tool = {
583
+ "name" : getattr (tool , "name" , None ),
584
+ "description" : getattr (tool , "description" , None )
585
+ or getattr (tool , "desc" , None ),
586
+ }
587
+ if simplified_tool ["name" ]:
588
+ simplified_tools .append (simplified_tool )
589
+ elif hasattr (tool , "__name__" ):
590
+ simplified_tools .append (
591
+ {
592
+ "name" : tool .__name__ ,
593
+ "description" : getattr (tool , "__doc__" , None ),
594
+ }
595
+ )
596
+ else :
597
+ tool_str = str (tool )
598
+ if tool_str and tool_str != "" :
599
+ simplified_tools .append ({"name" : tool_str , "description" : None })
600
+ except Exception :
601
+ continue
602
+
603
+ return simplified_tools if simplified_tools else None
604
+
605
+
606
+ def _set_tools_on_span (span , tools ):
607
+ # type: (Span, Any) -> None
608
+ """Set available tools data on a span if tools are provided."""
609
+ if tools is not None :
610
+ simplified_tools = _simplify_langchain_tools (tools )
611
+ if simplified_tools :
612
+ set_data_normalized (
613
+ span ,
614
+ SPANDATA .GEN_AI_REQUEST_AVAILABLE_TOOLS ,
615
+ simplified_tools ,
616
+ unpack = False ,
617
+ )
618
+
619
+
534
620
def _wrap_configure (f ):
535
621
# type: (Callable[..., Any]) -> Callable[..., Any]
536
622
@@ -601,7 +687,7 @@ def new_configure(
601
687
]
602
688
elif isinstance (local_callbacks , BaseCallbackHandler ):
603
689
local_callbacks = [local_callbacks , sentry_handler ]
604
- else : # local_callbacks is a list
690
+ else :
605
691
local_callbacks = [* local_callbacks , sentry_handler ]
606
692
607
693
return f (
@@ -638,10 +724,7 @@ def new_invoke(self, *args, **kwargs):
638
724
span .set_data (SPANDATA .GEN_AI_OPERATION_NAME , "invoke_agent" )
639
725
span .set_data (SPANDATA .GEN_AI_RESPONSE_STREAMING , False )
640
726
641
- if tools :
642
- set_data_normalized (
643
- span , SPANDATA .GEN_AI_REQUEST_AVAILABLE_TOOLS , tools , unpack = False
644
- )
727
+ _set_tools_on_span (span , tools )
645
728
646
729
# Run the agent
647
730
result = f (self , * args , ** kwargs )
@@ -653,11 +736,7 @@ def new_invoke(self, *args, **kwargs):
653
736
and integration .include_prompts
654
737
):
655
738
set_data_normalized (
656
- span ,
657
- SPANDATA .GEN_AI_REQUEST_MESSAGES ,
658
- [
659
- input ,
660
- ],
739
+ span , SPANDATA .GEN_AI_REQUEST_MESSAGES , [input ], unpack = False
661
740
)
662
741
663
742
output = result .get ("output" )
@@ -666,7 +745,7 @@ def new_invoke(self, *args, **kwargs):
666
745
and should_send_default_pii ()
667
746
and integration .include_prompts
668
747
):
669
- span . set_data ( SPANDATA .GEN_AI_RESPONSE_TEXT , output )
748
+ set_data_normalized ( span , SPANDATA .GEN_AI_RESPONSE_TEXT , output )
670
749
671
750
return result
672
751
@@ -698,10 +777,7 @@ def new_stream(self, *args, **kwargs):
698
777
span .set_data (SPANDATA .GEN_AI_OPERATION_NAME , "invoke_agent" )
699
778
span .set_data (SPANDATA .GEN_AI_RESPONSE_STREAMING , True )
700
779
701
- if tools :
702
- set_data_normalized (
703
- span , SPANDATA .GEN_AI_REQUEST_AVAILABLE_TOOLS , tools , unpack = False
704
- )
780
+ _set_tools_on_span (span , tools )
705
781
706
782
input = args [0 ].get ("input" ) if len (args ) >= 1 else None
707
783
if (
@@ -710,11 +786,7 @@ def new_stream(self, *args, **kwargs):
710
786
and integration .include_prompts
711
787
):
712
788
set_data_normalized (
713
- span ,
714
- SPANDATA .GEN_AI_REQUEST_MESSAGES ,
715
- [
716
- input ,
717
- ],
789
+ span , SPANDATA .GEN_AI_REQUEST_MESSAGES , [input ], unpack = False
718
790
)
719
791
720
792
# Run the agent
@@ -737,7 +809,7 @@ def new_iterator():
737
809
and should_send_default_pii ()
738
810
and integration .include_prompts
739
811
):
740
- span . set_data ( SPANDATA .GEN_AI_RESPONSE_TEXT , output )
812
+ set_data_normalized ( span , SPANDATA .GEN_AI_RESPONSE_TEXT , output )
741
813
742
814
span .__exit__ (None , None , None )
743
815
@@ -756,7 +828,7 @@ async def new_iterator_async():
756
828
and should_send_default_pii ()
757
829
and integration .include_prompts
758
830
):
759
- span . set_data ( SPANDATA .GEN_AI_RESPONSE_TEXT , output )
831
+ set_data_normalized ( span , SPANDATA .GEN_AI_RESPONSE_TEXT , output )
760
832
761
833
span .__exit__ (None , None , None )
762
834
0 commit comments