diff --git a/samples/python/src/common/function_call_resolver.py b/samples/python/src/common/function_call_resolver.py index 49356429..4db51323 100644 --- a/samples/python/src/common/function_call_resolver.py +++ b/samples/python/src/common/function_call_resolver.py @@ -26,6 +26,8 @@ from google import genai from google.genai import types +from common.system_utils import LLM_MODEL + DataPartContent = dict[str, Any] Tool = Callable[[list[DataPartContent], TaskUpdater, Task | None], Any] @@ -81,7 +83,7 @@ def determine_tool_to_use(self, prompt: str) -> str: """ response = self._client.models.generate_content( - model="gemini-2.5-flash", + model=LLM_MODEL, contents=prompt, config=self._config, ) diff --git a/samples/python/src/common/retrying_llm_agent.py b/samples/python/src/common/retrying_llm_agent.py index c5e50e2c..fa6afaae 100644 --- a/samples/python/src/common/retrying_llm_agent.py +++ b/samples/python/src/common/retrying_llm_agent.py @@ -39,7 +39,7 @@ async def _retry_async( author=ctx.agent.name, invocation_id=ctx.invocation_id, error_message=( - "Maximum retries exhausted. The remote Gemini server failed to" + "Maximum retries exhausted. The remote LLM server failed to" " respond. Please try again later." ), ) @@ -51,7 +51,7 @@ async def _retry_async( yield Event( author=ctx.agent.name, invocation_id=ctx.invocation_id, - error_message="Gemini server error. Retrying...", + error_message="LLM server error. Retrying...", custom_metadata={"error": str(e)}, ) async for event in self._retry_async(ctx, retries_left - 1): diff --git a/samples/python/src/common/system_utils.py b/samples/python/src/common/system_utils.py index f9403935..1968a60c 100644 --- a/samples/python/src/common/system_utils.py +++ b/samples/python/src/common/system_utils.py @@ -14,6 +14,10 @@ """Helper functions related to the system.""" +import os + +LLM_MODEL = os.environ.get("MODEL", "gemini-2.5-flash") + DEBUG_MODE_INSTRUCTIONS = """ This is really important! If the agent or user asks you to be verbose or if debug_mode is True, do the following: 1. If this is the the start of a new task, explain who you are, what you are going to do, what tools you use, and what agents you delegate to. diff --git a/samples/python/src/roles/merchant_agent/sub_agents/catalog_agent.py b/samples/python/src/roles/merchant_agent/sub_agents/catalog_agent.py index b40f72e0..87ebd999 100644 --- a/samples/python/src/roles/merchant_agent/sub_agents/catalog_agent.py +++ b/samples/python/src/roles/merchant_agent/sub_agents/catalog_agent.py @@ -44,6 +44,7 @@ from ap2.types.payment_request import PaymentRequest from common import message_utils from common.system_utils import DEBUG_MODE_INSTRUCTIONS +from common.system_utils import LLM_MODEL async def find_items_workflow( @@ -68,7 +69,7 @@ async def find_items_workflow( """ % DEBUG_MODE_INSTRUCTIONS llm_response = llm_client.models.generate_content( - model="gemini-2.5-flash", + model=LLM_MODEL, contents=prompt, config={ "response_mime_type": "application/json", diff --git a/samples/python/src/roles/shopping_agent/agent.py b/samples/python/src/roles/shopping_agent/agent.py index 37c91a31..f9cfbdb7 100644 --- a/samples/python/src/roles/shopping_agent/agent.py +++ b/samples/python/src/roles/shopping_agent/agent.py @@ -28,11 +28,12 @@ from .subagents.shopper.agent import shopper from common.retrying_llm_agent import RetryingLlmAgent from common.system_utils import DEBUG_MODE_INSTRUCTIONS +from common.system_utils import LLM_MODEL root_agent = RetryingLlmAgent( max_retries=5, - model="gemini-2.5-flash", + model=LLM_MODEL, name="root_agent", instruction=""" You are a shopping agent responsible for helping users find and diff --git a/samples/python/src/roles/shopping_agent/subagents/payment_method_collector/agent.py b/samples/python/src/roles/shopping_agent/subagents/payment_method_collector/agent.py index 446f83fe..7f2d6192 100644 --- a/samples/python/src/roles/shopping_agent/subagents/payment_method_collector/agent.py +++ b/samples/python/src/roles/shopping_agent/subagents/payment_method_collector/agent.py @@ -28,10 +28,11 @@ from . import tools from common.retrying_llm_agent import RetryingLlmAgent from common.system_utils import DEBUG_MODE_INSTRUCTIONS +from common.system_utils import LLM_MODEL payment_method_collector = RetryingLlmAgent( - model="gemini-2.5-flash", + model=LLM_MODEL, name="payment_method_collector", max_retries=5, instruction=""" diff --git a/samples/python/src/roles/shopping_agent/subagents/shipping_address_collector/agent.py b/samples/python/src/roles/shopping_agent/subagents/shipping_address_collector/agent.py index 0407b2b6..a9bbefb6 100644 --- a/samples/python/src/roles/shopping_agent/subagents/shipping_address_collector/agent.py +++ b/samples/python/src/roles/shopping_agent/subagents/shipping_address_collector/agent.py @@ -29,9 +29,10 @@ from . import tools from common.retrying_llm_agent import RetryingLlmAgent from common.system_utils import DEBUG_MODE_INSTRUCTIONS +from common.system_utils import LLM_MODEL shipping_address_collector = RetryingLlmAgent( - model="gemini-2.5-flash", + model=LLM_MODEL, name="shipping_address_collector", max_retries=5, instruction=""" diff --git a/samples/python/src/roles/shopping_agent/subagents/shopper/agent.py b/samples/python/src/roles/shopping_agent/subagents/shopper/agent.py index d380fac7..8d33b27d 100644 --- a/samples/python/src/roles/shopping_agent/subagents/shopper/agent.py +++ b/samples/python/src/roles/shopping_agent/subagents/shopper/agent.py @@ -27,10 +27,11 @@ from . import tools from common.retrying_llm_agent import RetryingLlmAgent from common.system_utils import DEBUG_MODE_INSTRUCTIONS +from common.system_utils import LLM_MODEL shopper = RetryingLlmAgent( - model="gemini-2.5-flash", + model=LLM_MODEL, name="shopper", max_retries=5, instruction="""