diff --git a/sdk/ai/azure-ai-projects/assets.json b/sdk/ai/azure-ai-projects/assets.json index e71a9f48bcac..4eaa940a0b11 100644 --- a/sdk/ai/azure-ai-projects/assets.json +++ b/sdk/ai/azure-ai-projects/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "python", "TagPrefix": "python/ai/azure-ai-projects", - "Tag": "python/ai/azure-ai-projects_febb246e47" + "Tag": "python/ai/azure-ai-projects_314598932e" } diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_code_interpreter_and_function.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_code_interpreter_and_function.py index d9894ea572a8..a51bc7b5514e 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_code_interpreter_and_function.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_code_interpreter_and_function.py @@ -12,10 +12,11 @@ All tests use the same tool combination but different inputs and workflows. """ +import json from test_base import TestBase, servicePreparer from devtools_testutils import recorded_by_proxy, RecordedTransport from azure.ai.projects.models import PromptAgentDefinition, CodeInterpreterTool, CodeInterpreterToolAuto, FunctionTool - +from openai.types.responses.response_input_param import FunctionCallOutput, ResponseInputParam class TestAgentCodeInterpreterAndFunction(TestBase): """Tests for agents using Code Interpreter + Function Tool combination.""" @@ -25,6 +26,10 @@ class TestAgentCodeInterpreterAndFunction(TestBase): def test_calculate_and_save(self, **kwargs): """ Test calculation with Code Interpreter and saving with Function Tool. + + This test verifies that both tools are used: + 1. Code Interpreter: Performs a calculation that requires actual computation + 2. Function Tool: Saves the computed result """ model = self.test_agents_params["model_deployment_name"] @@ -36,24 +41,25 @@ def test_calculate_and_save(self, **kwargs): # Define function tool func_tool = FunctionTool( name="save_result", - description="Save analysis result", + description="Save the calculation result. Must be called to persist the result.", parameters={ "type": "object", "properties": { - "result": {"type": "string", "description": "The result"}, + "calculation": {"type": "string", "description": "Description of the calculation"}, + "result": {"type": "string", "description": "The numerical result"}, }, - "required": ["result"], + "required": ["calculation", "result"], "additionalProperties": False, }, strict=True, ) - # Create agent + # Create agent with explicit instructions to use both tools agent = project_client.agents.create_version( agent_name="code-func-agent", definition=PromptAgentDefinition( model=model, - instructions="Run calculations and save results.", + instructions="You are a calculator assistant. Use code interpreter to perform calculations, then ALWAYS save the result using the save_result function.", tools=[ CodeInterpreterTool(container=CodeInterpreterToolAuto()), func_tool, @@ -63,9 +69,10 @@ def test_calculate_and_save(self, **kwargs): ) print(f"Agent created (id: {agent.id})") - # Use the agent + # Request a calculation that requires Code Interpreter (not trivial math) + # 17^4 = 83521 - not something easily computed mentally response = openai_client.responses.create( - input="Calculate 5 + 3 and save the result.", + input="Calculate 17 to the power of 4 using code, then save the result.", extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(response) @@ -79,6 +86,10 @@ def test_calculate_and_save(self, **kwargs): def test_generate_data_and_report(self, **kwargs): """ Test generating data with Code Interpreter and reporting with Function. + + This test verifies that both tools are used: + 1. Code Interpreter: Generates random data and calculates statistics + 2. Function Tool: Creates a report with the computed statistics """ model = self.test_agents_params["model_deployment_name"] @@ -90,25 +101,27 @@ def test_generate_data_and_report(self, **kwargs): # Define function tool report_function = FunctionTool( name="generate_report", - description="Generate a report with the provided data", + description="Generate and save a report with the analysis results. Must be called to create the report.", parameters={ "type": "object", "properties": { "title": {"type": "string", "description": "Report title"}, - "summary": {"type": "string", "description": "Report summary"}, + "data_count": {"type": "integer", "description": "Number of data points analyzed"}, + "average": {"type": "number", "description": "Calculated average value"}, + "summary": {"type": "string", "description": "Summary of findings"}, }, - "required": ["title", "summary"], + "required": ["title", "data_count", "average", "summary"], "additionalProperties": False, }, strict=True, ) - # Create agent + # Create agent with explicit instructions agent = project_client.agents.create_version( agent_name="code-func-report-agent", definition=PromptAgentDefinition( model=model, - instructions="Generate data using code and create reports with the generate_report function.", + instructions="You are a data analyst. Use code interpreter to generate and analyze data, then ALWAYS create a report using the generate_report function with the exact statistics you computed.", tools=[ CodeInterpreterTool(container=CodeInterpreterToolAuto()), report_function, @@ -118,9 +131,9 @@ def test_generate_data_and_report(self, **kwargs): ) print(f"Agent created (id: {agent.id})") - # Request data generation and report + # Request data generation and report - use a fixed seed for reproducibility in verification response = openai_client.responses.create( - input="Generate a list of 10 random numbers between 1 and 100, calculate their average, and create a report.", + input="Using Python with random.seed(42), generate exactly 10 random integers between 1 and 100, calculate their average, and create a report with the results.", extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, ) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_code_interpreter.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_code_interpreter.py index 953bb43e037c..fadf656ca7f7 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_code_interpreter.py @@ -26,6 +26,10 @@ class TestAgentFileSearchAndCodeInterpreter(TestBase): def test_find_and_analyze_data(self, **kwargs): """ Test finding data with File Search and analyzing with Code Interpreter. + + This test verifies that both tools are used: + 1. File Search: Agent finds the data file containing numbers + 2. Code Interpreter: Agent calculates the average of those numbers """ model = self.test_agents_params["model_deployment_name"] @@ -34,12 +38,36 @@ def test_find_and_analyze_data(self, **kwargs): project_client = self.create_client(operation_group="agents", **kwargs) openai_client = project_client.get_openai_client() - # Create data file - txt_content = "Sample data: 10, 20, 30, 40, 50" + # Create data file with numbers that require actual computation + # Numbers: 31, 20, 52, 48, 45, 34, 30, 86, 28, 71, 21, 20, 28, 44, 46 + # Sum: 604, Count: 15, Average: 40.266... ≈ 40.27 + # This is impossible to calculate mentally - requires Code Interpreter + txt_content = """Sensor Readings Log - Experiment #2847 + +The following temperature readings (Celsius) were recorded over a 15-hour period: + +Hour 1: 31 +Hour 2: 20 +Hour 3: 52 +Hour 4: 48 +Hour 5: 45 +Hour 6: 34 +Hour 7: 30 +Hour 8: 86 +Hour 9: 28 +Hour 10: 71 +Hour 11: 21 +Hour 12: 20 +Hour 13: 28 +Hour 14: 44 +Hour 15: 46 + +End of sensor log. +""" vector_store = openai_client.vector_stores.create(name="DataStore") txt_file = BytesIO(txt_content.encode("utf-8")) - txt_file.name = "data.txt" + txt_file.name = "sensor_readings.txt" file = openai_client.vector_stores.files.upload_and_poll( vector_store_id=vector_store.id, @@ -47,12 +75,12 @@ def test_find_and_analyze_data(self, **kwargs): ) print(f"File uploaded (id: {file.id})") - # Create agent + # Create agent with explicit instructions to use both tools agent = project_client.agents.create_version( agent_name="file-search-code-agent", definition=PromptAgentDefinition( model=model, - instructions="Find data and analyze it.", + instructions="You are a data analyst. Use file search to find data files, then use code interpreter to perform calculations on the data.", tools=[ FileSearchTool(vector_store_ids=[vector_store.id]), CodeInterpreterTool(container=CodeInterpreterToolAuto()), @@ -62,9 +90,9 @@ def test_find_and_analyze_data(self, **kwargs): ) print(f"Agent created (id: {agent.id})") - # Use the agent + # Request that requires both tools: find data AND calculate response = openai_client.responses.create( - input="Find the data file and calculate the average.", + input="Find the sensor readings file and use code to calculate the average temperature. Show me the result.", extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(response) @@ -79,7 +107,11 @@ def test_find_and_analyze_data(self, **kwargs): @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) def test_analyze_code_file(self, **kwargs): """ - Test finding code file and analyzing it. + Test finding code file and running it with Code Interpreter. + + This test verifies that both tools are used: + 1. File Search: Agent finds the Python code file + 2. Code Interpreter: Agent executes the code and returns the computed result """ model = self.test_agents_params["model_deployment_name"] @@ -88,14 +120,18 @@ def test_analyze_code_file(self, **kwargs): project_client = self.create_client(operation_group="agents", **kwargs) openai_client = project_client.get_openai_client() - # Create Python code file - python_code = """def fibonacci(n): + # Create Python code file with a function that computes a specific value + # fibonacci(15) = 610 - this is not a commonly memorized value + python_code = """# Fibonacci sequence calculator + +def fibonacci(n): + \"\"\"Calculate the nth Fibonacci number recursively.\"\"\" if n <= 1: return n return fibonacci(n-1) + fibonacci(n-2) -result = fibonacci(10) -print(f"Fibonacci(10) = {result}") +# The code needs to be executed to find what fibonacci(15) equals +# This is not a commonly known value - it requires actual computation """ vector_store = openai_client.vector_stores.create(name="CodeAnalysisStore") @@ -109,37 +145,46 @@ def test_analyze_code_file(self, **kwargs): ) print(f"Code file uploaded (id: {file.id})") - # Create agent + # Create agent with explicit instructions to run code agent = project_client.agents.create_version( agent_name="file-search-code-analysis-agent", definition=PromptAgentDefinition( model=model, - instructions="Find code files and analyze them. You can run code to test it.", + instructions="You are a code analyst. Use file search to find code files, then use code interpreter to execute and test the code.", tools=[ FileSearchTool(vector_store_ids=[vector_store.id]), CodeInterpreterTool(container=CodeInterpreterToolAuto()), ], ), - description="Agent for code analysis.", + description="Agent for code analysis and execution.", ) print(f"Agent created (id: {agent.id})") - # Request analysis + # Request that requires both tools: find code AND execute it response = openai_client.responses.create( - input="Find the fibonacci code and explain what it does. What is the computational complexity?", + input="Find the fibonacci code file and run it to calculate fibonacci(15). What is the result?", extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, ) response_text = response.output_text - print(f"Response: {response_text[:300]}...") + print(f"Response: {response_text[:400]}...") + + # Verify response is meaningful + assert len(response_text) > 30, "Expected detailed response" - assert len(response_text) > 50 + # Verify File Search was used - response should reference the fibonacci code response_lower = response_text.lower() assert any( - keyword in response_lower for keyword in ["fibonacci", "recursive", "complexity", "exponential"] - ), "Expected analysis of fibonacci algorithm" + keyword in response_lower for keyword in ["fibonacci", "function", "recursive", "code"] + ), f"Expected response to reference the fibonacci code. Got: {response_text[:200]}" + + # Verify Code Interpreter executed the code and got the correct result + # fibonacci(15) = 610 - this requires actual execution + assert "610" in response_text, f"Expected fibonacci(15) = 610 in response. Got: {response_text[:300]}" - print("✓ Code file analysis completed") + print("[PASS] File Search + Code Interpreter both verified!") + print(" - File Search: Found the fibonacci code file") + print(" - Code Interpreter: Executed code and computed fibonacci(15) = 610") # Cleanup project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_function.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_function.py index c2373f750221..ae6d9ad86c70 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_function.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_function.py @@ -147,7 +147,7 @@ def test_data_analysis_workflow(self, **kwargs): ) print(f"Final response: {response.output_text[:200]}...") - print("\n✓ Workflow completed successfully") + print("\n[PASS] Workflow completed successfully") # Teardown project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) @@ -222,7 +222,7 @@ def test_empty_vector_store_handling(self, **kwargs): if response_text: assert len(response_text) > 10, "Non-empty response should be meaningful" - print("\n✓ Agent handled missing data gracefully") + print("\n[PASS] Agent handled missing data gracefully") # Teardown project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) @@ -233,7 +233,11 @@ def test_empty_vector_store_handling(self, **kwargs): @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) def test_python_code_file_search(self, **kwargs): """ - Test searching for Python code files. + Test searching for Python code files and saving findings. + + This test verifies that both File Search and Function Tool are used: + 1. File Search: Agent searches vector store for Python code + 2. Function Tool: Agent saves the code review findings """ model = self.test_agents_params["model_deployment_name"] @@ -268,7 +272,7 @@ def calculate_sum(numbers): # Define function tool save_function = FunctionTool( name="save_code_review", - description="Save code review findings", + description="Save code review findings. Must be called to persist the review.", parameters={ "type": "object", "properties": { @@ -283,12 +287,12 @@ def calculate_sum(numbers): strict=True, ) - # Create agent + # Create agent with explicit instructions to use both tools agent = project_client.agents.create_version( agent_name="file-search-function-code-agent", definition=PromptAgentDefinition( model=model, - instructions="You can search for code files and describe what they do. Save your findings.", + instructions="You are a code reviewer. Search for code files, analyze them, and ALWAYS save your findings using the save_code_review function.", tools=[ FileSearchTool(vector_store_ids=[vector_store.id]), save_function, @@ -298,26 +302,55 @@ def calculate_sum(numbers): ) print(f"Agent created (id: {agent.id})") - # Request code analysis - print("\nAsking agent to find and analyze the Python code...") + # Request code analysis with explicit save instruction + print("\nAsking agent to find, analyze, and save the code review...") response = openai_client.responses.create( - input="Find the Python code file and tell me what the calculate_sum function does.", + input="Find the Python code file, explain what the calculate_sum function does, and save your code review findings.", extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, ) - response_text = response.output_text - print(f"Response: {response_text[:300]}...") + # Verify function call is made (both tools should be used) + function_call_found = False + input_list: ResponseInputParam = [] + + for item in response.output: + if item.type == "function_call": + function_call_found = True + print(f"Function call detected (name: {item.name})") + assert item.name == "save_code_review", f"Expected save_code_review, got {item.name}" + + arguments = json.loads(item.arguments) + print(f"Function arguments: {arguments}") + assert "findings" in arguments + assert len(arguments["findings"]) > 20, "Expected meaningful findings" - # Verify agent found and analyzed the code - assert len(response_text) > 50, "Expected detailed analysis" + # Verify findings discuss the code (proves File Search was used) + findings_lower = arguments["findings"].lower() + assert any( + keyword in findings_lower for keyword in ["sum", "calculate", "function", "numbers", "list", "return"] + ), f"Expected findings to discuss the code content. Got: {arguments['findings'][:100]}" - response_lower = response_text.lower() - assert any( - keyword in response_lower for keyword in ["sum", "calculate", "function", "numbers", "code", "python"] - ), "Expected response to discuss the code" + input_list.append( + FunctionCallOutput( + type="function_call_output", + call_id=item.call_id, + output=json.dumps({"status": "saved", "review_id": "review_001"}), + ) + ) + + assert function_call_found, "Expected save_code_review function to be called" + + # Send function results back to get final response + response = openai_client.responses.create( + input=input_list, + previous_response_id=response.id, + extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + ) + response_text = response.output_text + print(f"Final response: {response_text[:200] if response_text else '(confirmation)'}...") - print("\n✓ Agent successfully found code file using File Search") + print("\n[PASS] Agent successfully used both File Search and Function Tool") # Teardown project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) @@ -492,7 +525,7 @@ def test_multi_turn_search_and_save_workflow(self, **kwargs): response_4_lower = response_4_text.lower() assert any(keyword in response_4_lower for keyword in ["bias", "privacy", "ethics", "accountability"]) - print("\n✓ Multi-turn File Search + Function workflow successful!") + print("\n[PASS] Multi-turn File Search + Function workflow successful!") print(" - Multiple searches across different documents") print(" - Function called after context-building searches") print(" - Topic switching works correctly") diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_code_interpreter_function.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_code_interpreter_function.py index ef44a8c644bc..c6df27f34b09 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_code_interpreter_function.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_code_interpreter_function.py @@ -12,6 +12,7 @@ All tests use the same 3-tool combination but different inputs and workflows. """ +import json from io import BytesIO from test_base import TestBase, servicePreparer from devtools_testutils import recorded_by_proxy, RecordedTransport @@ -22,6 +23,7 @@ CodeInterpreterToolAuto, FunctionTool, ) +from openai.types.responses.response_input_param import FunctionCallOutput, ResponseInputParam class TestAgentFileSearchCodeInterpreterFunction(TestBase): @@ -32,6 +34,11 @@ class TestAgentFileSearchCodeInterpreterFunction(TestBase): def test_complete_analysis_workflow(self, **kwargs): """ Test complete workflow: find data, analyze it, save results. + + This test verifies that all three tools are used: + 1. File Search: Agent finds the data file with numerical values + 2. Code Interpreter: Agent performs statistical calculations on the data + 3. Function Tool: Agent saves the computed results """ model = self.test_agents_params["model_deployment_name"] @@ -40,12 +47,30 @@ def test_complete_analysis_workflow(self, **kwargs): project_client = self.create_client(operation_group="agents", **kwargs) openai_client = project_client.get_openai_client() - # Create data file - txt_content = "Sample data for analysis" + # Create data file with numbers that require computation + # Values: 23, 47, 82, 15, 91, 38, 64, 29, 76, 55 + # Sum: 520, Count: 10, Average: 52.0, Min: 15, Max: 91 + txt_content = """Monthly Sales Report - Store #147 + +The following sales figures (in thousands) were recorded: + +January: 23 +February: 47 +March: 82 +April: 15 +May: 91 +June: 38 +July: 64 +August: 29 +September: 76 +October: 55 + +Please analyze this data for the quarterly review. +""" vector_store = openai_client.vector_stores.create(name="ThreeToolStore") txt_file = BytesIO(txt_content.encode("utf-8")) - txt_file.name = "data.txt" + txt_file.name = "sales_report.txt" file = openai_client.vector_stores.files.upload_and_poll( vector_store_id=vector_store.id, @@ -53,27 +78,30 @@ def test_complete_analysis_workflow(self, **kwargs): ) print(f"File uploaded (id: {file.id})") - # Define function tool + # Define function tool for saving analysis results func_tool = FunctionTool( - name="save_result", - description="Save analysis result", + name="save_analysis", + description="Save the statistical analysis results. Must be called to persist the analysis.", parameters={ "type": "object", "properties": { - "result": {"type": "string", "description": "The result"}, + "report_name": {"type": "string", "description": "Name of the report analyzed"}, + "total": {"type": "number", "description": "Sum of all values"}, + "average": {"type": "number", "description": "Average of all values"}, + "summary": {"type": "string", "description": "Brief summary of findings"}, }, - "required": ["result"], + "required": ["report_name", "total", "average", "summary"], "additionalProperties": False, }, strict=True, ) - # Create agent with all three tools + # Create agent with all three tools and explicit instructions agent = project_client.agents.create_version( agent_name="three-tool-agent", definition=PromptAgentDefinition( model=model, - instructions="Use file search to find data, code interpreter to analyze it, and save_result to save findings.", + instructions="You are a data analyst. Use file search to find data files, code interpreter to calculate statistics, and ALWAYS save your analysis using the save_analysis function.", tools=[ FileSearchTool(vector_store_ids=[vector_store.id]), CodeInterpreterTool(container=CodeInterpreterToolAuto()), @@ -84,9 +112,9 @@ def test_complete_analysis_workflow(self, **kwargs): ) print(f"Agent created (id: {agent.id})") - # Use the agent + # Request that requires all three tools response = openai_client.responses.create( - input="Find the data file, analyze it, and save the results.", + input="Find the sales report, use code to calculate the total and average of all monthly sales figures, then save the analysis results.", extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(response) @@ -95,81 +123,3 @@ def test_complete_analysis_workflow(self, **kwargs): # Cleanup project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) openai_client.vector_stores.delete(vector_store.id) - - @servicePreparer() - @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_four_tools_combination(self, **kwargs): - """ - Test with 4 tools: File Search + Code Interpreter + 2 Functions. - """ - - model = self.test_agents_params["model_deployment_name"] - - # Setup - project_client = self.create_client(operation_group="agents", **kwargs) - openai_client = project_client.get_openai_client() - - # Create vector store - txt_content = "Test data" - vector_store = openai_client.vector_stores.create(name="FourToolStore") - - txt_file = BytesIO(txt_content.encode("utf-8")) - txt_file.name = "data.txt" - - file = openai_client.vector_stores.files.upload_and_poll( - vector_store_id=vector_store.id, - file=txt_file, - ) - - # Define two function tools - func_tool_1 = FunctionTool( - name="save_result", - description="Save result", - parameters={ - "type": "object", - "properties": { - "result": {"type": "string", "description": "The result"}, - }, - "required": ["result"], - "additionalProperties": False, - }, - strict=True, - ) - - func_tool_2 = FunctionTool( - name="log_action", - description="Log an action", - parameters={ - "type": "object", - "properties": { - "action": {"type": "string", "description": "Action taken"}, - }, - "required": ["action"], - "additionalProperties": False, - }, - strict=True, - ) - - # Create agent with 4 tools - agent = project_client.agents.create_version( - agent_name="four-tool-agent", - definition=PromptAgentDefinition( - model=model, - instructions="Use all available tools.", - tools=[ - FileSearchTool(vector_store_ids=[vector_store.id]), - CodeInterpreterTool(container=CodeInterpreterToolAuto()), - func_tool_1, - func_tool_2, - ], - ), - description="Agent with 4 tools.", - ) - print(f"Agent with 4 tools created (id: {agent.id})") - - assert agent.id is not None - print("✓ 4 tools works!") - - # Cleanup - project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) - openai_client.vector_stores.delete(vector_store.id) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_multitool_with_conversations.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_multitool_with_conversations.py index 0e3a4cc2e9ee..25a350e35935 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_multitool_with_conversations.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_multitool_with_conversations.py @@ -149,7 +149,7 @@ def test_file_search_and_function_with_conversation(self, **kwargs): ) print(f"Response 3: {response_3.output_text[:150]}...") - print("\n✓ Mixed tools with conversation successful!") + print("\n[PASS] Mixed tools with conversation successful!") print(" - File search (server-side) worked") print(" - Function call (client-side) worked") print(" - Both tools used in same conversation") @@ -185,7 +185,7 @@ def test_file_search_and_function_with_conversation(self, **kwargs): assert function_calls >= 1, "Expected at least 1 function call (save_report)" assert function_outputs >= 1, "Expected at least 1 function output" - print("\n✓ Multi-tool conversation state verified") + print("\n[PASS] Multi-tool conversation state verified") print(" - Both server-side (FileSearch) and client-side (Function) tools tracked") print(" - All 3 turns preserved in conversation")