diff --git a/authors.yaml b/authors.yaml index 25b24d065a..dda91cc10f 100644 --- a/authors.yaml +++ b/authors.yaml @@ -2,6 +2,16 @@ # You can optionally customize how your information shows up cookbook.openai.com over here. # If your information is not present here, it will be pulled from your GitHub profile. +rajpathak-openai: + name: "Raj Pathak" + website: "https://www.linkedin.com/in/rajpathakopenai/" + avatar: "https://avatars.githubusercontent.com/u/208723614?s=400&u=c852eed3be082f7fbd402b5a45e9b89a0bfed1b8&v=4" + +chelseahu-openai: + name: "Chelsea Hu" + website: "https://www.linkedin.com/in/chelsea-tsaiszuhu/" + avatar: "https://avatars.githubusercontent.com/u/196863678?v=4" + prashantmital-openai: name: "Prashant Mital" website: "https://www.linkedin.com/in/pmital/" @@ -336,3 +346,4 @@ tompakeman-oai: name: "Tom Pakeman" website: "https://www.linkedin.com/in/tom-pakeman/" avatar: "https://avatars.githubusercontent.com/u/204937754" + diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/.gitignore b/examples/agents_sdk/multi-agent-portfolio-collaboration/.gitignore new file mode 100644 index 0000000000..0174400103 --- /dev/null +++ b/examples/agents_sdk/multi-agent-portfolio-collaboration/.gitignore @@ -0,0 +1,74 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +*.egg-info/ +.installed.cfg +*.egg + +# Installer logs +distutils.log +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Jupyter Notebook checkpoints +.ipynb_checkpoints + +# pyenv +.python-version + +# mypy +.mypy_cache/ +.dmypy.json + +# Pyre type checker +.pyre/ + +# VS Code +.vscode/ + +# Mac OS +.DS_Store + +# Output and log directories +outputs/ +logs/ + +# Project-specific logs and outputs +*.log +*.jsonl + +# Secret keys and environment variables +.env +.env.* \ No newline at end of file diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/investment_agents/__init__.py b/examples/agents_sdk/multi-agent-portfolio-collaboration/investment_agents/__init__.py new file mode 100644 index 0000000000..8b98142f54 --- /dev/null +++ b/examples/agents_sdk/multi-agent-portfolio-collaboration/investment_agents/__init__.py @@ -0,0 +1 @@ +# This file marks the agents directory as a Python package. \ No newline at end of file diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/investment_agents/config.py b/examples/agents_sdk/multi-agent-portfolio-collaboration/investment_agents/config.py new file mode 100644 index 0000000000..4063d59cbc --- /dev/null +++ b/examples/agents_sdk/multi-agent-portfolio-collaboration/investment_agents/config.py @@ -0,0 +1,30 @@ +from dataclasses import dataclass +from investment_agents.fundamental import build_fundamental_agent +from investment_agents.macro import build_macro_agent +from investment_agents.quant import build_quant_agent +from investment_agents.editor import build_editor_agent, build_memo_edit_tool +from investment_agents.pm import build_head_pm_agent, SpecialistRequestInput +import asyncio + +@dataclass +class InvestmentAgentsBundle: + head_pm: object + fundamental: object + macro: object + quant: object + + +def build_investment_agents() -> InvestmentAgentsBundle: + fundamental = build_fundamental_agent() + macro = build_macro_agent() + quant = build_quant_agent() + editor = build_editor_agent() + memo_edit_tool = build_memo_edit_tool(editor) + head_pm = build_head_pm_agent(fundamental, macro, quant, memo_edit_tool) + return InvestmentAgentsBundle( + head_pm=head_pm, + fundamental=fundamental, + macro=macro, + quant=quant, + ) + diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/investment_agents/editor.py b/examples/agents_sdk/multi-agent-portfolio-collaboration/investment_agents/editor.py new file mode 100644 index 0000000000..47ec698745 --- /dev/null +++ b/examples/agents_sdk/multi-agent-portfolio-collaboration/investment_agents/editor.py @@ -0,0 +1,40 @@ +from agents import Agent, ModelSettings, function_tool, Runner, RunContextWrapper +from tools import write_markdown, read_file, list_output_files +from utils import load_prompt, DISCLAIMER +from pydantic import BaseModel +import json + +default_model = "gpt-4.1" + +class MemoEditorInput(BaseModel): + fundamental: str + macro: str + quant: str + pm: str + files: list[str] + +def build_editor_agent(): + tool_retry_instructions = load_prompt("tool_retry_prompt.md") + editor_prompt = load_prompt("editor_base.md") + return Agent( + name="Memo Editor Agent", + instructions=(editor_prompt + DISCLAIMER + tool_retry_instructions), + tools=[write_markdown, read_file, list_output_files], + model=default_model, + model_settings=ModelSettings(temperature=0), + ) + +def build_memo_edit_tool(editor): + @function_tool( + name_override="memo_editor", + description_override="Stitch analysis sections into a Markdown memo and save it. This is the ONLY way to generate and save the final investment report. All memos must be finalized through this tool.", + ) + async def memo_edit_tool(ctx: RunContextWrapper, input: MemoEditorInput) -> str: + result = await Runner.run( + starting_agent=editor, + input=json.dumps(input.model_dump()), + context=ctx.context, + max_turns=40, + ) + return result.final_output + return memo_edit_tool \ No newline at end of file diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/investment_agents/fundamental.py b/examples/agents_sdk/multi-agent-portfolio-collaboration/investment_agents/fundamental.py new file mode 100644 index 0000000000..b9c909d5f6 --- /dev/null +++ b/examples/agents_sdk/multi-agent-portfolio-collaboration/investment_agents/fundamental.py @@ -0,0 +1,28 @@ +from agents import Agent, WebSearchTool, ModelSettings +from utils import load_prompt, DISCLAIMER, repo_path +from pathlib import Path + +default_model = "gpt-4.1" +default_search_context = "medium" +RECENT_DAYS = 15 + +def build_fundamental_agent(): + tool_retry_instructions = load_prompt("tool_retry_prompt.md") + fundamental_prompt = load_prompt("fundamental_base.md", RECENT_DAYS=RECENT_DAYS) + # Set up the Yahoo Finance MCP server + from agents.mcp import MCPServerStdio + server_path = str(repo_path("mcp/yahoo_finance_server.py")) + yahoo_mcp_server = MCPServerStdio( + params={"command": "python", "args": [server_path]}, + client_session_timeout_seconds=300, + cache_tools_list=True, + ) + + return Agent( + name="Fundamental Analysis Agent", + instructions=(fundamental_prompt + DISCLAIMER + tool_retry_instructions), + mcp_servers=[yahoo_mcp_server], + tools=[WebSearchTool(search_context_size=default_search_context)], + model=default_model, + model_settings=ModelSettings(parallel_tool_calls=True, temperature=0), + ) \ No newline at end of file diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/investment_agents/macro.py b/examples/agents_sdk/multi-agent-portfolio-collaboration/investment_agents/macro.py new file mode 100644 index 0000000000..02c3354ca5 --- /dev/null +++ b/examples/agents_sdk/multi-agent-portfolio-collaboration/investment_agents/macro.py @@ -0,0 +1,18 @@ +from agents import Agent, WebSearchTool, ModelSettings +from tools import get_fred_series +from utils import load_prompt, DISCLAIMER + +default_model = "gpt-4.1" +default_search_context = "medium" +RECENT_DAYS = 45 + +def build_macro_agent(): + tool_retry_instructions = load_prompt("tool_retry_prompt.md") + macro_prompt = load_prompt("macro_base.md", RECENT_DAYS=RECENT_DAYS) + return Agent( + name="Macro Analysis Agent", + instructions=(macro_prompt + DISCLAIMER + tool_retry_instructions), + tools=[WebSearchTool(search_context_size=default_search_context), get_fred_series], + model=default_model, + model_settings=ModelSettings(parallel_tool_calls=True, temperature=0), + ) \ No newline at end of file diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/investment_agents/pm.py b/examples/agents_sdk/multi-agent-portfolio-collaboration/investment_agents/pm.py new file mode 100644 index 0000000000..cff4fd57c8 --- /dev/null +++ b/examples/agents_sdk/multi-agent-portfolio-collaboration/investment_agents/pm.py @@ -0,0 +1,69 @@ +from agents import Agent, ModelSettings, function_tool, Runner +from utils import load_prompt, DISCLAIMER +from dataclasses import dataclass +from pydantic import BaseModel +import json +import asyncio + + +class SpecialistRequestInput(BaseModel): + section: str # e.g., 'fundamental', 'macro', 'quant', or 'pm' + user_question: str + guidance: str + +# Core async functions for each specialist +async def specialist_analysis_func(agent, input: SpecialistRequestInput): + result = await Runner.run( + starting_agent=agent, + input=json.dumps(input.model_dump()), + max_turns=75, + ) + return result.final_output + +async def run_all_specialists_parallel( + fundamental, macro, quant, + fundamental_input: SpecialistRequestInput, + macro_input: SpecialistRequestInput, + quant_input: SpecialistRequestInput +): + results = await asyncio.gather( + specialist_analysis_func(fundamental, fundamental_input), + specialist_analysis_func(macro, macro_input), + specialist_analysis_func(quant, quant_input) + ) + return { + "fundamental": results[0], + "macro": results[1], + "quant": results[2] + } + +def build_head_pm_agent(fundamental, macro, quant, memo_edit_tool): + def make_agent_tool(agent, name, description): + @function_tool(name_override=name, description_override=description) + async def agent_tool(input: SpecialistRequestInput): + return await specialist_analysis_func(agent, input) + return agent_tool + fundamental_tool = make_agent_tool(fundamental, "fundamental_analysis", "Generate the Fundamental Analysis section.") + macro_tool = make_agent_tool(macro, "macro_analysis", "Generate the Macro Environment section.") + quant_tool = make_agent_tool(quant, "quantitative_analysis", "Generate the Quantitative Analysis section.") + + @function_tool(name_override="run_all_specialists_parallel", description_override="Run all three specialist analyses (fundamental, macro, quant) in parallel and return their results as a dict.") + async def run_all_specialists_tool(fundamental_input: SpecialistRequestInput, macro_input: SpecialistRequestInput, quant_input: SpecialistRequestInput): + return await run_all_specialists_parallel( + fundamental, macro, quant, + fundamental_input, macro_input, quant_input + ) + + return Agent( + name="Head Portfolio Manager Agent", + instructions=( + load_prompt("pm_base.md") + DISCLAIMER + ), + model="gpt-4.1", + #Reasoning model + #model="o4-mini", + tools=[fundamental_tool, macro_tool, quant_tool, memo_edit_tool, run_all_specialists_tool], + # Settings for a reasoning model + #model_settings=ModelSettings(parallel_tool_calls=True, reasoning={"summary": "auto", "effort": "high"}, tool_choice="auto") + model_settings=ModelSettings(parallel_tool_calls=True, tool_choice="auto", temperature=0) + ) \ No newline at end of file diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/investment_agents/quant.py b/examples/agents_sdk/multi-agent-portfolio-collaboration/investment_agents/quant.py new file mode 100644 index 0000000000..0494284f88 --- /dev/null +++ b/examples/agents_sdk/multi-agent-portfolio-collaboration/investment_agents/quant.py @@ -0,0 +1,27 @@ +from agents import Agent, ModelSettings +from tools import run_code_interpreter, get_fred_series, read_file, list_output_files +from utils import load_prompt, DISCLAIMER, repo_path +from pathlib import Path + +default_model = "gpt-4.1" + +def build_quant_agent(): + tool_retry_instructions = load_prompt("tool_retry_prompt.md") + quant_prompt = load_prompt("quant_base.md") + # Set up the Yahoo Finance MCP server + from agents.mcp import MCPServerStdio + server_path = str(repo_path("mcp/yahoo_finance_server.py")) + yahoo_mcp_server = MCPServerStdio( + params={"command": "python", "args": [server_path]}, + client_session_timeout_seconds=300, + cache_tools_list=True, + ) + + return Agent( + name="Quantitative Analysis Agent", + instructions=(quant_prompt + DISCLAIMER + tool_retry_instructions), + mcp_servers=[yahoo_mcp_server], + tools=[run_code_interpreter, get_fred_series, read_file, list_output_files], + model=default_model, + model_settings=ModelSettings(parallel_tool_calls=True, temperature=0), + ) \ No newline at end of file diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/mcp/yahoo_finance_server.py b/examples/agents_sdk/multi-agent-portfolio-collaboration/mcp/yahoo_finance_server.py new file mode 100644 index 0000000000..7241996cb9 --- /dev/null +++ b/examples/agents_sdk/multi-agent-portfolio-collaboration/mcp/yahoo_finance_server.py @@ -0,0 +1,486 @@ +import json +from enum import Enum +import os +import pandas as pd +import yfinance as yf +from mcp.server.fastmcp import FastMCP +from pathlib import Path +import uuid +import asyncio +import logging + +# Helper to ensure outputs dir exists and return path (repo root) +_REPO_ROOT = Path(__file__).resolve().parent.parent + +# Single shared outputs folder at the repository root +OUTPUTS_DIR = _REPO_ROOT / "outputs" + +# Ensure the directory exists +OUTPUTS_DIR.mkdir(parents=True, exist_ok=True) + +# Set up logging +LOGS_DIR = _REPO_ROOT / "logs" +LOGS_DIR.mkdir(parents=True, exist_ok=True) +LOG_FILE = LOGS_DIR / "yahoo_finance_server.log" +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s %(levelname)s %(message)s', + handlers=[logging.FileHandler(LOG_FILE), logging.StreamHandler()] +) +logger = logging.getLogger(__name__) + +# --------------------------------------------------------------------------- +# Helper: write DataFrame to /outputs and strip any timezone info +# --------------------------------------------------------------------------- + +def _strip_tz(df: pd.DataFrame) -> pd.DataFrame: + out = df.copy() + for col in out.select_dtypes(include=["datetimetz"]).columns: + out[col] = out[col].dt.tz_localize(None) + return out + +def save_df_to_csv(df, base_name): + df_clean = _strip_tz(df) + file_path = OUTPUTS_DIR / f"{base_name}.csv" + if file_path.exists(): + unique_id = uuid.uuid4().hex[:8] + file_path = OUTPUTS_DIR / f"{base_name}_{unique_id}.csv" + df_clean.to_csv(file_path, index=False) + return str(file_path), list(df_clean.columns) + +def save_json_to_file(data, base_name): + file_path = OUTPUTS_DIR / f"{base_name}.json" + if file_path.exists(): + unique_id = uuid.uuid4().hex[:8] + file_path = OUTPUTS_DIR / f"{base_name}_{unique_id}.json" + with open(file_path, "w") as f: + json.dump(data, f, indent=2) + # Schema: for dict, top-level keys; for list, type of first element or 'list'; else type + if isinstance(data, dict): + schema = list(data.keys()) + preview = {k: data[k] for k in list(data)[:PREVIEW_ROWS]} + elif isinstance(data, list): + schema = [type(data[0]).__name__] if data else ["list"] + preview = data[:PREVIEW_ROWS] + else: + schema = [type(data).__name__] + preview = data + return str(file_path), schema, preview + +class FinancialType(str, Enum): + income_stmt = "income_stmt" + quarterly_income_stmt = "quarterly_income_stmt" + balance_sheet = "balance_sheet" + quarterly_balance_sheet = "quarterly_balance_sheet" + cashflow = "cashflow" + quarterly_cashflow = "quarterly_cashflow" + +class HolderType(str, Enum): + major_holders = "major_holders" + institutional_holders = "institutional_holders" + mutualfund_holders = "mutualfund_holders" + insider_transactions = "insider_transactions" + insider_purchases = "insider_purchases" + insider_roster_holders = "insider_roster_holders" + +class RecommendationType(str, Enum): + recommendations = "recommendations" + upgrades_downgrades = "upgrades_downgrades" + +# Initialize FastMCP server +yfinance_server = FastMCP( + "yfinance", + instructions=""" +# Yahoo Finance MCP Server + +This server is used to get information about a given ticker symbol from yahoo finance. + +Available tools: +- get_historical_stock_prices: Get historical stock prices for a given ticker symbol from yahoo finance. Include the following information: Date, Open, High, Low, Close, Volume, Adj Close. +- get_stock_info: Get stock information for a given ticker symbol from yahoo finance. Include the following information: Stock Price & Trading Info, Company Information, Financial Metrics, Earnings & Revenue, Margins & Returns, Dividends, Balance Sheet, Ownership, Analyst Coverage, Risk Metrics, Other. +- get_yahoo_finance_news: Get news for a given ticker symbol from yahoo finance. +- get_stock_actions: Get stock dividends and stock splits for a given ticker symbol from yahoo finance. +- get_financial_statement: Get financial statement for a given ticker symbol from yahoo finance. You can choose from the following financial statement types: income_stmt, quarterly_income_stmt, balance_sheet, quarterly_balance_sheet, cashflow, quarterly_cashflow. +- get_holder_info: Get holder information for a given ticker symbol from yahoo finance. You can choose from the following holder types: major_holders, institutional_holders, mutualfund_holders, insider_transactions, insider_purchases, insider_roster_holders. +- get_option_expiration_dates: Fetch the available options expiration dates for a given ticker symbol. +- get_option_chain: Fetch the option chain for a given ticker symbol, expiration date, and option type. +- get_recommendations: Get recommendations or upgrades/downgrades for a given ticker symbol from yahoo finance. You can also specify the number of months back to get upgrades/downgrades for, default is 12. +""", +) + +PREVIEW_ROWS = 20 + +# --- Tool: get_historical_stock_prices --- +def get_historical_stock_prices_sync(ticker, period, interval): + logger.info(f"Called get_historical_stock_prices_sync: ticker={ticker}, period={period}, interval={interval}") + company = yf.Ticker(ticker) + if company.isin is None: + logger.error(f"Company ticker {ticker} not found.") + return json.dumps({"error": f"Company ticker {ticker} not found."}) + hist_data = company.history(period=period, interval=interval) + hist_data = hist_data.reset_index(names="Date") + file_base = f"{ticker}_{period}_{interval}_historical" + file_path, schema = save_df_to_csv(hist_data, file_base) + preview_json = hist_data.head(PREVIEW_ROWS).to_json(orient="records", date_format="iso") + logger.info(f"Returning historical data for {ticker}") + return json.dumps({ + "file_path": file_path, + "schema": schema, + "preview": json.loads(preview_json) + }) + +@yfinance_server.tool( + name="get_historical_stock_prices", + description="""Get historical stock prices for a given ticker symbol from yahoo finance. Include the following information: Date, Open, High, Low, Close, Volume, Adj Close.\nArgs:\n ticker: str\n The ticker symbol of the stock to get historical prices for, e.g. \"AAPL\"\n period : str\n Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max\n Either Use period parameter or use start and end\n Default is \"1mo\"\n interval : str\n Valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo\n Intraday data cannot extend last 60 days\n Default is \"1d\"\n""", +) +async def get_historical_stock_prices(ticker: str, period: str = "1mo", interval: str = "1d") -> str: + loop = asyncio.get_running_loop() + try: + return await asyncio.wait_for( + loop.run_in_executor(None, get_historical_stock_prices_sync, ticker, period, interval), + timeout=30 + ) + except asyncio.TimeoutError: + return json.dumps({"error": "Timeout fetching historical stock prices"}) + except Exception as e: + return json.dumps({"error": str(e)}) + +# --- Tool: get_stock_info --- +def get_stock_info_sync(ticker): + logger.info(f"Called get_stock_info_sync: ticker={ticker}") + company = yf.Ticker(ticker) + if company.isin is None: + logger.error(f"Company ticker {ticker} not found.") + return json.dumps({"error": f"Company ticker {ticker} not found."}) + info = company.info + file_path, schema, preview = save_json_to_file(info, f"{ticker}_stock_info") + logger.info(f"Returning stock info for {ticker}") + return json.dumps({ + "file_path": file_path, + "schema": schema, + "preview": preview + }) + +@yfinance_server.tool( + name="get_stock_info", + description="""Get stock information for a given ticker symbol from yahoo finance. Include the following information:\nStock Price & Trading Info, Company Information, Financial Metrics, Earnings & Revenue, Margins & Returns, Dividends, Balance Sheet, Ownership, Analyst Coverage, Risk Metrics, Other.\n\nArgs:\n ticker: str\n The ticker symbol of the stock to get information for, e.g. \"AAPL\"\n""", +) +async def get_stock_info(ticker: str) -> str: + loop = asyncio.get_running_loop() + try: + return await asyncio.wait_for( + loop.run_in_executor(None, get_stock_info_sync, ticker), + timeout=30 + ) + except asyncio.TimeoutError: + return json.dumps({"error": "Timeout fetching stock info"}) + except Exception as e: + return json.dumps({"error": str(e)}) + +# --- Tool: get_yahoo_finance_news --- +def get_yahoo_finance_news_sync(ticker): + logger.info(f"Called get_yahoo_finance_news_sync: ticker={ticker}") + company = yf.Ticker(ticker) + if company.isin is None: + logger.error(f"Company ticker {ticker} not found.") + return json.dumps({"error": f"Company ticker {ticker} not found."}) + try: + news = company.news + except Exception as e: + logger.error(f"Error getting news for {ticker}: {e}") + return json.dumps({"error": f"Error: getting news for {ticker}: {e}"}) + news_list = [] + for news_item in news: + if news_item.get("content", {}).get("contentType", "") == "STORY": + title = news_item.get("content", {}).get("title", "") + summary = news_item.get("content", {}).get("summary", "") + description = news_item.get("content", {}).get("description", "") + url = news_item.get("content", {}).get("canonicalUrl", {}).get("url", "") + news_list.append( + {"title": title, "summary": summary, "description": description, "url": url} + ) + if not news_list: + logger.warning(f"No news found for company with ticker {ticker}.") + return json.dumps({"error": f"No news found for company that searched with {ticker} ticker."}) + file_path, schema, preview = save_json_to_file(news_list, f"{ticker}_news") + logger.info(f"Returning news for {ticker}") + return json.dumps({ + "file_path": file_path, + "schema": schema, + "preview": preview + }) + +@yfinance_server.tool( + name="get_yahoo_finance_news", + description="""Get news for a given ticker symbol from yahoo finance.\n\nArgs:\n ticker: str\n The ticker symbol of the stock to get news for, e.g. \"AAPL\"\n""", +) +async def get_yahoo_finance_news(ticker: str) -> str: + loop = asyncio.get_running_loop() + try: + return await asyncio.wait_for( + loop.run_in_executor(None, get_yahoo_finance_news_sync, ticker), + timeout=30 + ) + except asyncio.TimeoutError: + return json.dumps({"error": "Timeout fetching news"}) + except Exception as e: + return json.dumps({"error": str(e)}) + +# --- Tool: get_stock_actions --- +def get_stock_actions_sync(ticker): + logger.info(f"Called get_stock_actions_sync: ticker={ticker}") + try: + company = yf.Ticker(ticker) + except Exception as e: + logger.error(f"Error getting stock actions for {ticker}: {e}") + return json.dumps({"error": f"Error: getting stock actions for {ticker}: {e}"}) + actions_df = company.actions + actions_df = actions_df.reset_index(names="Date") + file_path, schema = save_df_to_csv(actions_df, f"{ticker}_actions") + preview_json = actions_df.head(PREVIEW_ROWS).to_json(orient="records", date_format="iso") + logger.info(f"Returning stock actions for {ticker}") + return json.dumps({ + "file_path": file_path, + "schema": schema, + "preview": json.loads(preview_json) + }) + +@yfinance_server.tool( + name="get_stock_actions", + description="""Get stock dividends and stock splits for a given ticker symbol from yahoo finance.\n\nArgs:\n ticker: str\n The ticker symbol of the stock to get stock actions for, e.g. \"AAPL\"\n""", +) +async def get_stock_actions(ticker: str) -> str: + loop = asyncio.get_running_loop() + try: + return await asyncio.wait_for( + loop.run_in_executor(None, get_stock_actions_sync, ticker), + timeout=30 + ) + except asyncio.TimeoutError: + return json.dumps({"error": "Timeout fetching stock actions"}) + except Exception as e: + return json.dumps({"error": str(e)}) + +# --- Tool: get_financial_statement --- +def get_financial_statement_sync(ticker, financial_type): + logger.info(f"Called get_financial_statement_sync: ticker={ticker}, financial_type={financial_type}") + company = yf.Ticker(ticker) + if company.isin is None: + logger.error(f"Company ticker {ticker} not found.") + return json.dumps({"error": f"Company ticker {ticker} not found."}) + if financial_type == FinancialType.income_stmt: + financial_statement = company.income_stmt + elif financial_type == FinancialType.quarterly_income_stmt: + financial_statement = company.quarterly_income_stmt + elif financial_type == FinancialType.balance_sheet: + financial_statement = company.balance_sheet + elif financial_type == FinancialType.quarterly_balance_sheet: + financial_statement = company.quarterly_balance_sheet + elif financial_type == FinancialType.cashflow: + financial_statement = company.cashflow + elif financial_type == FinancialType.quarterly_cashflow: + financial_statement = company.quarterly_cashflow + else: + logger.error(f"Invalid financial type {financial_type} for {ticker}.") + return json.dumps({"error": f"Error: invalid financial type {financial_type}. Please use one of the following: {list(FinancialType)}."}) + df = financial_statement.transpose().reset_index(names="date") + file_path, schema = save_df_to_csv(df, f"{ticker}_{financial_type}") + preview_json = df.head(PREVIEW_ROWS).to_json(orient="records", date_format="iso") + logger.info(f"Returning financial statement for {ticker}, type={financial_type}") + return json.dumps({ + "file_path": file_path, + "schema": schema, + "preview": json.loads(preview_json) + }) + +@yfinance_server.tool( + name="get_financial_statement", + description="""Get financial statement for a given ticker symbol from yahoo finance. You can choose from the following financial statement types: income_stmt, quarterly_income_stmt, balance_sheet, quarterly_balance_sheet, cashflow, quarterly_cashflow.\n\nArgs:\n ticker: str\n The ticker symbol of the stock to get financial statement for, e.g. \"AAPL\"\n financial_type: str\n The type of financial statement to get. You can choose from the following financial statement types: income_stmt, quarterly_income_stmt, balance_sheet, quarterly_balance_sheet, cashflow, quarterly_cashflow.\n""", +) +async def get_financial_statement(ticker: str, financial_type: str) -> str: + loop = asyncio.get_running_loop() + try: + return await asyncio.wait_for( + loop.run_in_executor(None, get_financial_statement_sync, ticker, financial_type), + timeout=30 + ) + except asyncio.TimeoutError: + return json.dumps({"error": "Timeout fetching financial statement"}) + except Exception as e: + return json.dumps({"error": str(e)}) + +# --- Tool: get_holder_info --- +def get_holder_info_sync(ticker, holder_type): + logger.info(f"Called get_holder_info_sync: ticker={ticker}, holder_type={holder_type}") + company = yf.Ticker(ticker) + if company.isin is None: + logger.error(f"Company ticker {ticker} not found.") + return json.dumps({"error": f"Company ticker {ticker} not found."}) + if holder_type == HolderType.major_holders: + df = company.major_holders.reset_index(names="metric") + elif holder_type == HolderType.institutional_holders: + df = company.institutional_holders + elif holder_type == HolderType.mutualfund_holders: + df = company.mutualfund_holders + elif holder_type == HolderType.insider_transactions: + df = company.insider_transactions + elif holder_type == HolderType.insider_purchases: + df = company.insider_purchases + elif holder_type == HolderType.insider_roster_holders: + df = company.insider_roster_holders + else: + logger.error(f"Invalid holder type {holder_type} for {ticker}.") + return json.dumps({"error": f"Error: invalid holder type {holder_type}. Please use one of the following: {list(HolderType)}."}) + df = df.reset_index() if df.index.name or df.index.names else df + file_path, schema = save_df_to_csv(df, f"{ticker}_{holder_type}") + preview_json = df.head(PREVIEW_ROWS).to_json(orient="records", date_format="iso") + logger.info(f"Returning holder info for {ticker}, type={holder_type}") + return json.dumps({ + "file_path": file_path, + "schema": schema, + "preview": json.loads(preview_json) + }) + +@yfinance_server.tool( + name="get_holder_info", + description="""Get holder information for a given ticker symbol from yahoo finance. You can choose from the following holder types: major_holders, institutional_holders, mutualfund_holders, insider_transactions, insider_purchases, insider_roster_holders.\n\nArgs:\n ticker: str\n The ticker symbol of the stock to get holder information for, e.g. \"AAPL\"\n holder_type: str\n The type of holder information to get. You can choose from the following holder types: major_holders, institutional_holders, mutualfund_holders, insider_transactions, insider_purchases, insider_roster_holders.\n""", +) +async def get_holder_info(ticker: str, holder_type: str) -> str: + loop = asyncio.get_running_loop() + try: + return await asyncio.wait_for( + loop.run_in_executor(None, get_holder_info_sync, ticker, holder_type), + timeout=30 + ) + except asyncio.TimeoutError: + return json.dumps({"error": "Timeout fetching holder info"}) + except Exception as e: + return json.dumps({"error": str(e)}) + +# --- Tool: get_option_expiration_dates --- +def get_option_expiration_dates_sync(ticker): + logger.info(f"Called get_option_expiration_dates_sync: ticker={ticker}") + company = yf.Ticker(ticker) + if company.isin is None: + logger.error(f"Company ticker {ticker} not found.") + return json.dumps({"error": f"Company ticker {ticker} not found."}) + dates = list(company.options) + file_path, schema, preview = save_json_to_file(dates, f"{ticker}_option_expiration_dates") + logger.info(f"Returning option expiration dates for {ticker}") + return json.dumps({ + "file_path": file_path, + "schema": schema, + "preview": preview + }) + +@yfinance_server.tool( + name="get_option_expiration_dates", + description="""Fetch the available options expiration dates for a given ticker symbol.\n\nArgs:\n ticker: str\n The ticker symbol of the stock to get option expiration dates for, e.g. \"AAPL\"\n""", +) +async def get_option_expiration_dates(ticker: str) -> str: + loop = asyncio.get_running_loop() + try: + return await asyncio.wait_for( + loop.run_in_executor(None, get_option_expiration_dates_sync, ticker), + timeout=30 + ) + except asyncio.TimeoutError: + return json.dumps({"error": "Timeout fetching option expiration dates"}) + except Exception as e: + return json.dumps({"error": str(e)}) + +# --- Tool: get_option_chain --- +def get_option_chain_sync(ticker, expiration_date, option_type): + logger.info(f"Called get_option_chain_sync: ticker={ticker}, expiration_date={expiration_date}, option_type={option_type}") + company = yf.Ticker(ticker) + if company.isin is None: + logger.error(f"Company ticker {ticker} not found.") + return json.dumps({"error": f"Company ticker {ticker} not found."}) + if expiration_date not in company.options: + logger.error(f"No options available for {ticker} on date {expiration_date}.") + return json.dumps({"error": f"No options available for the date {expiration_date}. You can use `get_option_expiration_dates` to get the available expiration dates."}) + if option_type not in ["calls", "puts"]: + logger.error(f"Invalid option type {option_type} for {ticker}.") + return json.dumps({"error": "Invalid option type. Please use 'calls' or 'puts'."}) + option_chain = company.option_chain(expiration_date) + df = option_chain.calls if option_type == "calls" else option_chain.puts + file_path, schema = save_df_to_csv(df, f"{ticker}_{expiration_date}_{option_type}_options") + preview_json = df.head(PREVIEW_ROWS).to_json(orient="records", date_format="iso") + logger.info(f"Returning option chain for {ticker}, date={expiration_date}, type={option_type}") + return json.dumps({ + "file_path": file_path, + "schema": schema, + "preview": json.loads(preview_json) + }) + +@yfinance_server.tool( + name="get_option_chain", + description="""Fetch the option chain for a given ticker symbol, expiration date, and option type.\n\nArgs:\n ticker: str\n The ticker symbol of the stock to get option chain for, e.g. \"AAPL\"\n expiration_date: str\n The expiration date for the options chain (format: 'YYYY-MM-DD')\n option_type: str\n The type of option to fetch ('calls' or 'puts')\n""", +) +async def get_option_chain(ticker: str, expiration_date: str, option_type: str) -> str: + loop = asyncio.get_running_loop() + try: + return await asyncio.wait_for( + loop.run_in_executor(None, get_option_chain_sync, ticker, expiration_date, option_type), + timeout=30 + ) + except asyncio.TimeoutError: + return json.dumps({"error": "Timeout fetching option chain"}) + except Exception as e: + return json.dumps({"error": str(e)}) + +# --- Tool: get_recommendations --- +def get_recommendations_sync(ticker, recommendation_type, months_back=12): + logger.info(f"Called get_recommendations_sync: ticker={ticker}, recommendation_type={recommendation_type}, months_back={months_back}") + company = yf.Ticker(ticker) + if company.isin is None: + logger.error(f"Company ticker {ticker} not found.") + return json.dumps({"error": f"Company ticker {ticker} not found."}) + try: + if recommendation_type == RecommendationType.recommendations: + df = company.recommendations + elif recommendation_type == RecommendationType.upgrades_downgrades: + upgrades_downgrades = company.upgrades_downgrades.reset_index() + cutoff_date = pd.Timestamp.now() - pd.DateOffset(months=months_back) + upgrades_downgrades = upgrades_downgrades[ + upgrades_downgrades["GradeDate"] >= cutoff_date + ] + upgrades_downgrades = upgrades_downgrades.sort_values("GradeDate", ascending=False) + latest_by_firm = upgrades_downgrades.drop_duplicates(subset=["Firm"]) + df = latest_by_firm + else: + logger.error(f"Invalid recommendation type {recommendation_type} for {ticker}.") + return json.dumps({"error": f"Invalid recommendation type {recommendation_type}."}) + df = df.reset_index() if df.index.name or df.index.names else df + file_path, schema = save_df_to_csv(df, f"{ticker}_{recommendation_type}_recommendations") + preview_json = df.head(PREVIEW_ROWS).to_json(orient="records", date_format="iso") + logger.info(f"Returning recommendations for {ticker}, type={recommendation_type}, months_back={months_back}") + return json.dumps({ + "file_path": file_path, + "schema": schema, + "preview": json.loads(preview_json) + }) + except Exception as e: + logger.error(f"Error getting recommendations for {ticker}: {e}") + return json.dumps({"error": f"Error: getting recommendations for {ticker}: {e}"}) + +@yfinance_server.tool( + name="get_recommendations", + description="""Get recommendations or upgrades/downgrades for a given ticker symbol from yahoo finance. You can also specify the number of months back to get upgrades/downgrades for, default is 12.\n\nArgs:\n ticker: str\n The ticker symbol of the stock to get recommendations for, e.g. \"AAPL\"\n recommendation_type: str\n The type of recommendation to get. You can choose from the following recommendation types: recommendations, upgrades_downgrades.\n months_back: int\n The number of months back to get upgrades/downgrades for, default is 12.\n""", +) +async def get_recommendations(ticker: str, recommendation_type: str, months_back: int = 12) -> str: + loop = asyncio.get_running_loop() + try: + return await asyncio.wait_for( + loop.run_in_executor(None, get_recommendations_sync, ticker, recommendation_type, months_back), + timeout=30 + ) + except asyncio.TimeoutError: + return json.dumps({"error": "Timeout fetching recommendations"}) + except Exception as e: + return json.dumps({"error": str(e)}) + +if __name__ == "__main__": + # Initialize and run the server + print("Starting Yahoo Finance MCP server...") + yfinance_server.run(transport="stdio") \ No newline at end of file diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/multi_agent_portfolio_collaboration.ipynb b/examples/agents_sdk/multi-agent-portfolio-collaboration/multi_agent_portfolio_collaboration.ipynb new file mode 100644 index 0000000000..8edbfa4e2a --- /dev/null +++ b/examples/agents_sdk/multi-agent-portfolio-collaboration/multi_agent_portfolio_collaboration.ipynb @@ -0,0 +1,579 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "1e5b29d1", + "metadata": {}, + "source": [ + "# Multi-Agent Orchestration with OpenAI Agents SDK: Financial Portfolio Analysis Example\n", + "\n", + "## Introduction\n", + "\n", + "*This guide is for readers already familiar with OpenAI models and LLM agents, and want to see how to orchestrate a team of agents for a real-world, complex task.*\n", + "\n", + "**What You'll Learn**\n", + "\n", + "In this notebook, you'll learn how to use the OpenAI Agents SDK to design and implement a complex multi-agent collaboration system. Specifically, you'll see how to:\n", + "- Build a workflow where multiple specialist agents (Macro, Fundamental, Quantitative) collaborate under a Portfolio Manager agent to solve a challenging investment research problem.\n", + "- Use the \"agents as a tool\" approach, where a central agent orchestrates and calls other agents as tools for specific subtasks.\n", + "- Leverage all major tool types supported by the SDK (custom Python functions, managed tools like Code Interpreter and WebSearch, and external MCP servers) in a single, integrated workflow.\n", + "- Apply best practices for modularity, parallelism, and observability in agentic patterns.\n", + "\n", + "**Why this matters**\n", + "\n", + "The \"agents as a tool\" pattern is a powerful way to build transparent, auditable, and scalable multi-agent collaboration . This example demonstrates how to combine deep specialization, parallel execution, and robust orchestration using the OpenAI Agents SDK.\n", + "\n", + "By the end of this guide, you'll have a clear blueprint for building your own multi-agent workflows for research, analysis, or any complex task that benefits from expert collaboration.\n" + ] + }, + { + "cell_type": "markdown", + "id": "ed547489", + "metadata": {}, + "source": [ + "\n", + "---\n", + "\n", + "## Table of Contents\n", + "\n", + "1. [What is Multi-Agent Collaboration?](#what-is-multi-agent-collaboration)\n", + "2. [Collaboration Patterns: Handoff vs. Agent-as-Tool](#collaboration-patterns-handoff-vs-agent-as-tool)\n", + "3. [Architecture Overview](#architecture-overview)\n", + "4. [Supported Tool Types](#supported-tool-types)\n", + "5. [Setup](#setup)\n", + "6. [Running the Workflow](#running-the-workflow)\n", + "7. [The Head Portfolio Manager (PM) Agent](#the-head-portfolio-manager-pm-agent)\n", + "8. [Breaking Down the Head Portfolio Manager Agent](#breaking-down-the-head-portfolio-manager-agent)\n", + "9. [Example Output](#example-output)\n", + "10. [Best Practices When Building Agents](#best-practices-when-building-agents)\n", + "11. [Further Reading & Best Practices](#further-reading--best-practices)\n" + ] + }, + { + "cell_type": "markdown", + "id": "26670dad", + "metadata": {}, + "source": [ + "\n", + "---\n", + "\n", + "## What is Multi-Agent Collaboration?\n", + "\n", + "**Multi-agent collaboration** means multiple autonomous agents (LLM \"nodes\") coordinate to achieve an overarching goal that would be difficult for a single agent to handle. Instead of one monolithic prompt, each agent handles a specific subtask or expertise area, and an orchestration layer connects these agent \"nodes\" into a coherent workflow. This approach is useful for complex systems – for example, a financial analysis might be broken into macro-economic analysis, fundamental company analysis, and quantitative signal analysis, each handled by a different agent specialist. The agents share information and their results are combined to produce a final outcome.\n" + ] + }, + { + "cell_type": "markdown", + "id": "4d5f3a58", + "metadata": {}, + "source": [ + "\n", + "### Collaboration Patterns: Handoff vs. Agent-as-Tool\n", + "\n", + "The OpenAI Agents SDK supports multiple patterns for agents to work together:\n", + "\n", + "- **Handoff Collaboration:** One agent can _handoff_ control to another agent mid-problem. In a handoff architecture, each agent knows about the others and can decide when to defer to a more appropriate agent. This is flexible for open-ended or conversational workflows, but can make it harder to maintain a global view of the task. [Read more in the SDK docs.](https://openai.github.io/openai-agents-python/handoffs/)\n", + "\n", + "- **Agent as a Tool:** In this approach, one agent (often a central planner or manager) **calls other agents as if they were tools**. Sub-agents don't take over the conversation; instead, the main agent invokes them for specific subtasks and incorporates their results. This model keeps a single thread of control (the main agent orchestrates everything) and tends to simplify coordination. **This repo uses the agent-as-tool model:** the Portfolio Manager agent remains in charge, using the other specialist agents as tools when it needs their expertise. This choice keeps the overall reasoning transparent and allows parallel execution of sub-tasks, which is ideal for complex analyses.\n", + "\n", + "For more on these collaboration patterns, see the [OpenAI Agents SDK documentation](https://openai.github.io/openai-agents-python/multi_agent/).\n", + "\n", + "---\n", + "\n", + "## Architecture Overview\n", + "\n", + "Our system follows a **hub-and-spoke design**. The **Portfolio Manager agent** is the hub (central coordinator), and the **specialist agents** are the spokes. The user's query (e.g. \"How would a planned interest rate reduction affect my GOOGL holdings?\") goes first to the Portfolio Manager. The Portfolio Manager agent is prompted to break down the problem and delegate to the appropriate specialist agents. It treats each specialist as a callable tool, invoking them for their portion of the analysis. All three report back to the Portfolio Manager, which then synthesizes a final answer for the user.\n", + "\n", + "![Multi-Agent Investment Report Workflow](static/agent_architecture.png)\n" + ] + }, + { + "cell_type": "markdown", + "id": "a7a2ef1e", + "metadata": {}, + "source": [ + "\n", + "---\n", + "\n", + "## Supported Tool Types\n", + "\n", + "A key advantage of the Agents SDK is the flexibility in defining **tools** that agents can use. Tools can range from simple Python functions to external services. In this project, we use:\n", + "\n", + "- **MCP (Model Context Protocol) Server:** Used to connect agents to external tools and data sources in a standardized way. This project uses a local MCP server for Yahoo Finance data (see `mcp/yahoo_finance_server.py`). [Learn more: OpenAI MCP docs](https://openai.github.io/openai-agents-python/mcp/) | [MCP Spec](https://modelcontextprotocol.io/)\n", + "\n", + "- **OpenAI Managed Tools:** Managed tools are built-in, hosted tools provided by OpenAI that require no custom implementation. They offer powerful capabilities out of the box, such as **Code Interpreter** (for quantitative/statistical analysis) and **WebSearch** (for up-to-date news and data). These tools are easy to integrate, maintained by OpenAI, and allow agents to perform advanced actions like code execution and real-time information retrieval without additional setup.\n", + "\n", + "- **Custom Tools:** Custom tools are any Python functions you define and register as tools for your agent. The Agents SDK makes this easy: just decorate your function, and the SDK will automatically extract its name, docstring, and input schema. This is ideal for domain-specific logic, data access, or workflow extensions. \n", + " In our project, we use custom tools to access FRED economic data ([see FRED API](https://fred.stlouisfed.org/docs/api/api_key.html)) and perform file system operations.\n", + "\n", + "Custom tools give you full flexibility to extend your agent's capabilities beyond built-in or managed tools. [See the SDK docs on function tools.](https://openai.github.io/openai-agents-python/tools/#function-tools)\n", + "\n", + "> **Want to add more tools?** The SDK supports a wide range of tool types, including web search, file search, code execution, and more. [See the full list of supported tools in the SDK documentation.](https://openai.github.io/openai-agents-python/tools/)\n", + "\n", + "---\n", + "\n", + "## Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b128b837", + "metadata": {}, + "outputs": [], + "source": [ + "# Install required dependencies\n", + "!pip install -r requirements.txt" + ] + }, + { + "cell_type": "markdown", + "id": "21c2f377", + "metadata": {}, + "source": [ + "**Before running the workflow, set your environment variables:**\n", + "- `OPENAI_API_KEY` (for OpenAI access)\n", + "- `FRED_API_KEY` (for FRED economic data, see [FRED API key instructions](https://fred.stlouisfed.org/docs/api/api_key.html))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c70bf2c3", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "missing = []\n", + "if not os.environ.get('OPENAI_API_KEY'):\n", + " missing.append('OPENAI_API_KEY')\n", + "if not os.environ.get('FRED_API_KEY'):\n", + " missing.append('FRED_API_KEY')\n", + "\n", + "if missing:\n", + " print(f\"Missing environment variable(s): {', '.join(missing)}. Please set them before running the workflow.\")\n", + "else:\n", + " print(\"All required API keys are set.\")" + ] + }, + { + "cell_type": "markdown", + "id": "f3b2c4e5", + "metadata": {}, + "source": [ + "---\n", + "\n", + "## Running the Workflow \n", + "\n", + "Edit the question to whatever you'd like, but keep the date field to improve accuracy!\n", + "\n", + "
\n", + "Disclaimer: This example is for educational purposes only. Consult a qualified financial professional before making any investment decisions\n", + "
\n" + ] + }, + { + "cell_type": "markdown", + "id": "04b11e29", + "metadata": {}, + "source": [ + "The workflow is kicked off by sending a user request to the Head Portfolio Manager (PM) agent. The PM agent orchestrates the entire process, delegating to specialist agents and tools as needed. You can monitor the workflow in real time using OpenAI Traces, which provide detailed visibility into every agent and tool call.\n", + "\n", + "Edit the `question` in the code below to whatever you'd like, but keep the date field to improve accuracy!\n", + "\n", + "
\n", + "Note: Depending on the complexity of the task, this request can take up to 10 minutes.\n", + "
\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5a7059b4", + "metadata": {}, + "outputs": [], + "source": [ + "import datetime\n", + "import json\n", + "import os\n", + "from pathlib import Path\n", + "from contextlib import AsyncExitStack\n", + "from agents import Runner, add_trace_processor, trace\n", + "from agents.tracing.processors import BatchTraceProcessor\n", + "from utils import FileSpanExporter, output_file\n", + "from investment_agents.config import build_investment_agents\n", + "import asyncio\n", + "\n", + "add_trace_processor(BatchTraceProcessor(FileSpanExporter()))\n", + "\n", + "async def run_workflow():\n", + " if \"OPENAI_API_KEY\" not in os.environ:\n", + " raise EnvironmentError(\"OPENAI_API_KEY not set — set it as an environment variable before running.\")\n", + "\n", + " today_str = datetime.date.today().strftime(\"%B %d, %Y\")\n", + " question = (\n", + " f\"Today is {today_str}. \"\n", + " \"How would the planned interest rate reduction effect my holdings in GOOGL if they were to happen?\"\n", + " \"Considering all the factors effecting its price right now (Macro, Technical, Fundamental, etc.), what is a realistic price target by the end of the year?\"\n", + " )\n", + " bundle = build_investment_agents()\n", + "\n", + " async with AsyncExitStack() as stack:\n", + " for agent in [getattr(bundle, \"fundamental\", None), getattr(bundle, \"quant\", None)]:\n", + " if agent is None:\n", + " continue\n", + " for server in getattr(agent, \"mcp_servers\", []):\n", + " await server.connect()\n", + " await stack.enter_async_context(server)\n", + "\n", + " print(\"Running multi-agent workflow with tracing enabled...\\n\")\n", + " with trace(\n", + " \"Investment Research Workflow\",\n", + " metadata={\"question\": question[:512]}\n", + " ) as workflow_trace:\n", + " print(\n", + " f\"\\n🔗 View the trace in the OpenAI console: \"\n", + " f\"https://platform.openai.com/traces/trace?trace_id={workflow_trace.trace_id}\\n\"\n", + " )\n", + "\n", + " response = None\n", + " try:\n", + " response = await asyncio.wait_for(\n", + " Runner.run(bundle.head_pm, question, max_turns=40),\n", + " timeout=1200\n", + " )\n", + " except asyncio.TimeoutError:\n", + " print(\"\\n❌ Workflow timed out after 20 minutes.\")\n", + "\n", + " report_path = None\n", + " try:\n", + " if hasattr(response, 'final_output'):\n", + " output = response.final_output\n", + " if isinstance(output, str):\n", + " data = json.loads(output)\n", + " if isinstance(data, dict) and 'file' in data:\n", + " report_path = output_file(data['file'])\n", + " except Exception as e:\n", + " print(f\"Could not parse investment report path: {e}\")\n", + "\n", + " print(f\"Workflow Completed Response from Agent: {response.final_output if hasattr(response, 'final_output') else response}, investment report created: {report_path if report_path else '[unknown]'}\")\n", + "\n", + "# In a Jupyter notebook cell, run:\n", + "await run_workflow()" + ] + }, + { + "cell_type": "markdown", + "id": "94273ca6", + "metadata": {}, + "source": [ + "\n", + "---\n", + "\n", + "## Breaking Down the Head Portfolio Manager Agent\n", + "\n", + "The Head Portfolio Manager (PM) agent is the orchestrator of the entire workflow. It coordinates a set of four specialist agents, each focused on a different area of expertise. This design is intentional: overloading a single agent with every possible responsibility leads to shallow, generic outputs and makes it hard to maintain or improve your system over time.\n", + "\n", + "### Why This Design?\n", + "By breaking the problem into specialized agents—each with a clear role—you get:\n", + "\n", + "- **Deeper, higher-quality research:** Each agent can focus on its domain, using the right tools and prompts for the job. The PM agent brings these perspectives together for a more nuanced, robust answer.\n", + "\n", + "- **Modularity and clarity:** You can update, test, or improve one agent without affecting the others. This makes your system easier to maintain and extend as your needs evolve.\n", + "\n", + "- **Faster results through parallelism:** Independent agents can work at the same time, dramatically reducing the time to complete complex, multi-part analyses.\n", + "\n", + "- **Consistency and auditability:** A structured, prompt-driven workflow ensures every run follows best practices, is easy to debug, and produces outputs you can trust and review.\n", + "\n", + "This approach is ideal for any application where you want depth, specialization, and reliability—whether you're building a research assistant, a decision support tool, or any system that benefits from expert collaboration and orchestration.\n", + "\n", + "**How We Implement This in Practice:**\n", + "- Each specialist agent (Fundamental, Macro, Quantitative) is wrapped as a callable tool using the SDK's `function_tool` decorator, with custom names and descriptions. This makes the PM agent's toolset explicit and LLM-friendly.\n", + "\n", + "- The Head PM agent uses the `run_all_specialists_parallel` tool to invoke all three specialists concurrently, leveraging `parallel_tool_calls=True` for maximum speed and efficiency.\n", + "\n", + "- The agent's prompt is loaded from a markdown file (`pm_base.md`), encoding not just the firm's philosophy but also detailed tool usage rules and a step-by-step workflow. This ensures every run is consistent, auditable, and aligned with best practices.\n", + "\n", + "- After gathering and reviewing the specialist outputs, the PM agent uses a dedicated memo editor tool to assemble, format, and finalize the investment report. This separation of concerns keeps the workflow modular and easy to extend.\n", + "\n", + "- The system is designed for extensibility: you can add new specialist agents, swap out tools, or update prompts without breaking the overall orchestration logic. All tool calls, agent decisions, and outputs are captured in OpenAI Traces for full transparency and debugging.\n", + "\n", + "These implementation choices directly support the benefits above—enabling deep, modular, and reliable multi-agent research workflows that are easy to maintain, audit, and improve.\n", + "\n", + "### Head Portfolio Manager Agent: Code" + ] + }, + { + "cell_type": "markdown", + "id": "4a2c464a", + "metadata": {}, + "source": [ + "```python\n", + "from agents import Agent, ModelSettings, function_tool\n", + "from utils import load_prompt, DISCLAIMER\n", + "\n", + "def build_head_pm_agent(fundamental, macro, quant, memo_edit_tool):\n", + " def make_agent_tool(agent, name, description):\n", + " @function_tool(name_override=name, description_override=description)\n", + " async def agent_tool(input):\n", + " return await specialist_analysis_func(agent, input)\n", + " return agent_tool\n", + " fundamental_tool = make_agent_tool(fundamental, \"fundamental_analysis\", \"Generate the Fundamental Analysis section.\")\n", + " macro_tool = make_agent_tool(macro, \"macro_analysis\", \"Generate the Macro Environment section.\")\n", + " quant_tool = make_agent_tool(quant, \"quantitative_analysis\", \"Generate the Quantitative Analysis section.\")\n", + "\n", + " @function_tool(name_override=\"run_all_specialists_parallel\", description_override=\"Run all three specialist analyses (fundamental, macro, quant) in parallel and return their results as a dict.\")\n", + " async def run_all_specialists_tool(fundamental_input, macro_input, quant_input):\n", + " return await run_all_specialists_parallel(\n", + " fundamental, macro, quant,\n", + " fundamental_input, macro_input, quant_input\n", + " )\n", + "\n", + " return Agent(\n", + " name=\"Head Portfolio Manager Agent\",\n", + " instructions=(load_prompt(\"pm_base.md\") + DISCLAIMER),\n", + " model=\"gpt-4.1\",\n", + " tools=[fundamental_tool, macro_tool, quant_tool, memo_edit_tool, run_all_specialists_tool],\n", + " model_settings=ModelSettings(parallel_tool_calls=True, tool_choice=\"auto\", temperature=0)\n", + " )\n", + " ```" + ] + }, + { + "cell_type": "markdown", + "id": "b908f59c", + "metadata": {}, + "source": [ + "### The Head PM System Prompt: Enforcing Best Practices\n", + "\n", + "The PM agent's system prompt (see `prompts/pm_base.md`) is the heart of the workflow. It encodes:\n", + "- The firm's philosophy (originality, risk awareness, challenging consensus)\n", + "- Clear tool usage rules (when to use parallel tools, how to structure inputs)\n", + "- A robust, multi-step workflow (determine task type, provide guidance, review outputs, assemble memo, handle missing data)\n", + "\n", + "This prompt ensures that every run is:\n", + "- **Consistent:** The same high standards and process are followed every time.\n", + "- **Auditable:** Each step, tool call, and decision is visible in the trace.\n", + "- **High-Quality:** Outputs are original, risk-aware, and rigorously reviewed." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b680b856", + "metadata": {}, + "outputs": [], + "source": [ + "# Render the actual system prompt used by the Head Portfolio Manager agent\n", + "from pathlib import Path\n", + "from IPython.display import Markdown, display\n", + "\n", + "pm_prompt_path = Path(\"prompts/pm_base.md\")\n", + "if pm_prompt_path.exists():\n", + " with pm_prompt_path.open(\"r\", encoding=\"utf-8\") as f:\n", + " content = f.read()\n", + " display(Markdown(content))\n", + "else:\n", + " print(\"System prompt not found at prompts/pm_base.md\")" + ] + }, + { + "cell_type": "markdown", + "id": "c74d9ac0", + "metadata": {}, + "source": [ + "---\n", + "\n", + "## Example Output\n", + "\n", + "Here's an example of an investment report generated through the workflow. See [`static/example_output/investment_report.md`](static/example_output/investment_report.md) for the full output, and the `static/example_output/` folder for referenced images and CSVs." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "9fe6e452", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "# Investment Memo: Alphabet Inc. (GOOGL) – Impact of Planned Interest Rate Reduction (May 2025)\n", + "\n", + "## Executive Summary\n", + "\n", + "Alphabet Inc. (GOOGL) currently trades at \\$171.42 per share, with a market capitalization of \\$1.88 trillion and a P/E ratio of 16.91. The investment thesis is moderately constructive: while a planned interest rate reduction by the Federal Reserve is a mild tailwind, it is not the primary driver of GOOGL's price action. The most original, differentiated insight—fully aligned with our firm's vision—is that GOOGL's direct sensitivity to interest rates is modest (max weekly correlation with 10Y yield is ~0.29), and the real risk/reward hinges on the sustainability of AI-driven growth, sector rotation, and regulatory headwinds. This thesis is supported by robust technicals, strong fundamentals, and overwhelmingly positive analyst sentiment, but is tempered by the risk that AI optimism fades or macro/regulatory shocks emerge. The consensus view is justified by evidence: GOOGL's business remains resilient, but the variant view—where rate cuts fail to stimulate tech or sector rotation caps returns—should not be ignored. Key risks include regulatory action, macroeconomic uncertainty, and the potential for a shift in the AI narrative. In the best case, GOOGL could reach \\$200–\\$210 by year-end 2025; in the worst case, a retest of \\$160–\\$170 is plausible. This memo embodies the firm's vision by focusing on scenario planning, original quantitative analysis, and a critical assessment of consensus and variant views.\n", + "\n", + "## Fundamentals Perspective\n", + "\n", + "Alphabet's core business is driven by its dominance in digital advertising (Google Search, YouTube) and its growing cloud and AI segments. As of the latest quarter (Q1 2025), revenue was \\$90.2 billion, net income \\$34.5 billion, and EPS \\$2.81, with net margin at 38.3%. Margins have improved over the past year, and the company's scale and leadership in AI and cloud provide a durable moat. However, recent analyst price targets have been revised downward (Bernstein: \\$165, UBS: \\$209, Wolfe: \\$210), reflecting caution around regulatory and macroeconomic risks. The consensus view is justified: while Alphabet's financial strength and innovation are clear, regulatory scrutiny and macro headwinds (e.g., reduced ad budgets in downturns) are real risks. The most original insight is the company's ability to adapt and innovate, potentially mitigating some risks. The analysis is evidence-based, with recent quarterly data showing stable or improving margins:\n", + "\n", + "| Date | Revenue | Net Income | Gross Profit | Total Expenses | EPS | Net Margin (%) | Gross Margin (%) | Operating Margin (%) |\n", + "|:-----------|-----------:|-------------:|---------------:|-----------------:|------:|-----------------:|-------------------:|-----------------------:|\n", + "| 2025-03-31 | 9.0234e+10 | 3.454e+10 | 5.3873e+10 | 5.9628e+10 | 2.81 | 38.28 | 59.70 | 33.92 |\n", + "| 2024-12-31 | 9.6469e+10 | 2.6536e+10 | 5.5856e+10 | 6.5497e+10 | 2.15 | 27.51 | 57.90 | 32.11 |\n", + "| 2024-09-30 | 8.8268e+10 | 2.6301e+10 | 5.1794e+10 | 5.9747e+10 | 2.12 | 29.80 | 58.68 | 32.31 |\n", + "| 2024-06-30 | 8.4742e+10 | 2.3619e+10 | 4.9235e+10 | 5.7317e+10 | 1.89 | 27.87 | 58.10 | 32.36 |\n", + "| 2024-03-31 | 8.0539e+10 | 2.3662e+10 | 4.6827e+10 | 5.5067e+10 | 1.89 | 29.38 | 58.14 | 31.63 |\n", + "\n", + "Recent analyst sentiment is overwhelmingly positive, with 56 Buy, 12 Hold, and 0 Sell recommendations currently:\n", + "\n", + "| period | Buy | Hold | Sell |\n", + "|:-------------|------:|-------:|-------:|\n", + "| Current | 56 | 12 | 0 |\n", + "| 1 Month Ago | 55 | 12 | 0 |\n", + "| 2 Months Ago | 55 | 12 | 0 |\n", + "| 3 Months Ago | 53 | 12 | 0 |\n", + "\n", + "The fundamental view is aligned with the firm vision by focusing on evidence, scenario planning, and not simply following consensus. The main divergence from the firm vision would be if the analysis failed to consider the impact of regulatory or macro shocks, but this is addressed here.\n", + "\n", + "## Macro Perspective\n", + "\n", + "The macroeconomic environment is mixed. U.S. real GDP is expanding (\\$23.5 trillion, Q1 2025), unemployment is low (4.2%), and inflation remains elevated (CPI: 320.3). The Federal Reserve has kept rates at 4.25–4.50%, with a patient stance and a focus on evolving risks. The U.S. dollar is strong (DXY: 123.4), and recent tariffs have introduced uncertainty. Investors are rotating from U.S. tech to Asian equities, reflecting concerns about high valuations and better growth prospects abroad. The consensus macro view is that rate cuts will support tech valuations, but the variant view—supported by our firm's vision—is that sector rotation and trade policy could offset these benefits. Tail-risk scenarios include a base case where rate cuts support GOOGL (\\$180–\\$190 target), and a downside where trade tensions or sector rotation cap returns. The analysis is evidence-based, using FRED data and recent policy statements, and explicitly considers both best- and worst-case scenarios. The macro view is fully aligned with the firm vision by challenging consensus and planning for multiple outcomes.\n", + "\n", + "## Quantitative Perspective\n", + "\n", + "Quantitative analysis confirms that GOOGL's direct sensitivity to interest rates is modest. The mean weekly correlation with the 10Y Treasury yield is 0.29, and with the Fed Funds rate is 0.05, indicating that rate changes are not the primary driver of GOOGL's returns. Technicals are robust: GOOGL is above key moving averages, momentum is positive, and volatility is moderate. Scenario analysis shows that a rate cut is a mild tailwind, but if the move is already priced in or if technicals break down, a 5–10% pullback is possible. Analyst sentiment is strongly positive, and fundamentals (revenue, margins) are improving. Quantitative summary statistics:\n", + "\n", + "| Metric | Value |\n", + "|:----------------------------------------|----------:|\n", + "| Mean daily corr (FEDFUNDS, GOOGL) | 0.05 |\n", + "| Mean daily reg slope (FEDFUNDS, GOOGL) | 0.02 |\n", + "| Mean daily corr (DGS10, GOOGL) | 0.13 |\n", + "| Mean daily reg slope (DGS10, GOOGL) | 0.05 |\n", + "| Mean weekly corr (FEDFUNDS, GOOGL) | 0.05 |\n", + "| Mean weekly reg slope (FEDFUNDS, GOOGL) | 0.03 |\n", + "| Mean weekly corr (DGS10, GOOGL) | 0.29 |\n", + "| Mean weekly reg slope (DGS10, GOOGL) | 0.09 |\n", + "\n", + "Key charts and images:\n", + "\n", + "![GOOGL Daily Returns](static/example_output/googl_daily_returns.png)\n", + "![GOOGL Moving Averages](static/example_output/googl_moving_averages.png)\n", + "![GOOGL RSI](static/example_output/googl_rsi.png)\n", + "![GOOGL Rolling Volatility](static/example_output/googl_rolling_volatility.png)\n", + "![Cumulative Return Comparison](static/example_output/cumulative_return_comparison.png)\n", + "![Rolling Volatility Comparison](static/example_output/rolling_volatility_comparison.png)\n", + "![Rolling Corr/Reg Daily Fed Funds](static/example_output/rolling_corr_reg_daily_fedfunds.png)\n", + "![Rolling Corr/Reg Daily 10Y](static/example_output/rolling_corr_reg_daily_dgs10.png)\n", + "![Rolling Corr/Reg Weekly Fed Funds](static/example_output/rolling_corr_reg_weekly_fedfunds.png)\n", + "![Rolling Corr/Reg Weekly 10Y](static/example_output/rolling_corr_reg_weekly_dgs10.png)\n", + "![GOOGL Quarterly Trends](static/example_output/GOOGL_quarterly_trends.png)\n", + "![GOOGL Quarterly Margins](static/example_output/GOOGL_quarterly_margins.png)\n", + "![GOOGL Analyst Recommendations Trend](static/example_output/GOOGL_analyst_recommendations_trend.png)\n", + "\n", + "The quantitative view is original in its focus on scenario analysis and the modest rate sensitivity, and is aligned with the firm vision by not simply following consensus. Limitations include the short post-pandemic data window and the fact that GOOGL's price is driven by multiple factors (AI, ad market, regulation) beyond rates.\n", + "\n", + "## Portfolio Manager Perspective\n", + "\n", + "The PM synthesis is that all three specialist sections converge on a moderately constructive outlook, with a realistic year-end 2025 price target of \\$190–\\$210. The most original insight is that GOOGL's direct rate sensitivity is modest, and the real risk is whether AI-driven growth can continue or if sector rotation and regulatory headwinds will cap returns. The quant section is strong in highlighting robust technicals and sentiment, but also the risk of a \\$160–\\$170 retest in downside scenarios. The fundamental and macro sections emphasize the importance of monitoring regulatory and trade policy. If underweight large-cap tech, now is a reasonable entry point, but position sizing should reflect the risk of sector rotation or macro disappointment. The variant view—rate cuts failing to stimulate tech or a shift in AI narrative—should not be ignored. Position sizing and risk management are key, fully in line with the firm's vision of scenario planning and differentiated insight.\n", + "\n", + "## Recommendation & Answer to the Question\n", + "\n", + "The recommendation is to maintain or modestly increase exposure to GOOGL, especially if underweight large-cap tech, with a year-end 2025 price target of \\$200–\\$210 in the base case. This embodies the firm vision by focusing on original, evidence-based scenario analysis, not simply following consensus. The recommendation is justified by robust fundamentals, positive technicals, and strong analyst sentiment, but is tempered by the risk of sector rotation, regulatory action, or a shift in the AI narrative. If these risks materialize, a retest of \\$160–\\$170 is possible. Sizing and risk management should reflect these scenarios. This approach is differentiated, evidence-driven, and fully aligned with the firm's vision.\n", + "\n", + "**END_OF_MEMO**\n", + "\n", + "*DISCLAIMER: I am an AI language model, not a registered investment adviser. Information provided is educational and general in nature. Consult a qualified financial professional before making any investment decisions.*" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Render the actual investment report generated by the workflow\n", + "from pathlib import Path\n", + "from IPython.display import Markdown, display\n", + "\n", + "report_path = Path(\"static/example_output/investment_report.md\")\n", + "if report_path.exists():\n", + " with report_path.open(\"r\", encoding=\"utf-8\") as f:\n", + " content = f.read()\n", + " display(Markdown(content))\n", + "else:\n", + " print(\"No investment report found at static/example_output/investment_report.md\")" + ] + }, + { + "cell_type": "markdown", + "id": "292f0011", + "metadata": {}, + "source": [ + "---\n", + "\n", + "---\n", + "\n", + "## Best Practices When Building Agents\n", + "\n", + "The most effective agentic systems combine modular agent design, clear tool definitions, parallel execution, and structured prompts. This approach—central to the OpenAI Agents SDK—makes your workflows robust, scalable, and easy to debug or extend.\n", + "\n", + "**Key features of the OpenAI Agents SDK that enable these best practices:**\n", + "- **Agent loop:** Handles tool calls, LLM reasoning, and workflow control automatically.\n", + "- **Python-first orchestration:** Use familiar Python patterns to chain, compose, and orchestrate agents.\n", + "- **Handoffs:** Delegate tasks between agents for specialization and modularity.\n", + "- **Guardrails:** Validate inputs/outputs and break early on errors for reliability.\n", + "- **Function tools:** Register any Python function as a tool, with automatic schema and validation.\n", + "- **Tracing:** Visualize, debug, and monitor every step of your workflow for full transparency.\n", + "\n", + "A combination of well-designed tools, thoughtful orchestration, and careful model selection is crucial for building effective agent systems. In this example, we use the GPT-4.1 family of models for their strong analytical and tool-use capabilities ([see the GPT-4.1 Prompting Guide](https://cookbook.openai.com/examples/gpt4-1_prompting_guide)). For deeper architectural best practices, see the included [A Practical Guide to Building Agents (PDF)](https://cdn.openai.com/business-guides-and-resources/a-practical-guide-to-building-agents.pdf). By bringing these elements together, you get a system that is robust, scalable, and easy to debug or extend.\n", + "\n", + "Please try out the sample with your own investment questions, and please share any feedback! Happy building.\n", + "\n", + "---\n", + "\n", + "## Further Reading & Best Practices\n", + "\n", + "- [OpenAI Agents SDK Documentation](https://openai.github.io/openai-agents-python/)\n", + "- [OpenAI Agents SDK: Multi-Agent Orchestration](https://openai.github.io/openai-agents-python/multi_agent/)\n", + "- [OpenAI Agents SDK: Tool List](https://openai.github.io/openai-agents-python/tools/)\n", + "- [OpenAI Agents SDK: MCP Documentation](https://openai.github.io/openai-agents-python/mcp/)\n", + "\n", + "- [MCP Spec](https://spec.modelcontextprotocol.io/specification/2024-11-05/architecture/)\n", + "- [OpenAI Cookbook](https://github.com/openai/openai-cookbook)\n", + "- ([GPT-4.1 Prompting Guide](https://cookbook.openai.com/examples/gpt4-1_prompting_guide))\n", + "- [A Practical Guide to Building Agents (PDF)](https://cdn.openai.com/business-guides-and-resources/a-practical-guide-to-building-agents.pdf)\n", + "\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "b2dbc9fe", + "metadata": {}, + "source": [ + "``` " + ] + } + ], + "metadata": { + "jupytext": { + "cell_metadata_filter": "-all", + "main_language": "python", + "notebook_metadata_filter": "-all" + }, + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/prompts/code_interpreter.md b/examples/agents_sdk/multi-agent-portfolio-collaboration/prompts/code_interpreter.md new file mode 100644 index 0000000000..2b3e35c75d --- /dev/null +++ b/examples/agents_sdk/multi-agent-portfolio-collaboration/prompts/code_interpreter.md @@ -0,0 +1,39 @@ +# Code Interpreter Prompt (Best Practices, GPT-4.1) + +You are an expert quantitative developer using OpenAI's Code Interpreter. You are called by a Quant agent to generate a specific quantitative analysis. + +## Responsibilities +- Perform the requested analysis using only the provided input files. +- Save all outputs as downloadable files in `/mnt/data/`. +- For each output file, provide a direct download link in your response. +- Your response must be complete and self-contained; do not expect follow-up questions or maintain session state. + +## Analysis Workflow +1. Print the schema of each input file. Understand the dataset, and make logical assumptions on analysis even if the quant doesn't explicitly provide them. +2. Drop missing values and normalize data as needed. +3. Run the analysis on the processed data. +4. **If the data is empty or contains no rows after cleaning, do not generate any outputs. Instead, return only a `` tag explaining that the data is empty or insufficient for analysis, and list the available columns.** +5. If the data is sufficient, create visualizations and tables as appropriate for the analysis. + +## Constraints +- Do **not** fetch external data or use `yfinance`. Use only the files in `input_files`. +- For visualizations, use distinct colors for comparison tasks (not shades of the same color). +- Do **not** respond to the end user unless it's to report that the analysis can't be completed or it's with the final downloadable output. +- Save plots with `plt.savefig('/mnt/data/your_filename.png')`. +- Save tables with `df.to_csv('/mnt/data/your_filename.csv')`. + +## Output Format +- List all generated files with direct download links. +- Summarize your analysis clearly. +- If the analysis cannot be performed, return only a `` tag explaining why. + +## Example Output +``` +Files generated: +- UNH_400C_greeks_may2025.csv (table of Greeks and option parameters) +- UNH_400C_greeks_summary.png (summary bar chart of Greeks) + +You can download them here: +- [UNH_400C_greeks_may2025.csv](sandbox:/mnt/data/UNH_400C_greeks_may2025.csv) +- [UNH_400C_greeks_summary.png](sandbox:/mnt/data/UNH_400C_greeks_summary.png) +``` \ No newline at end of file diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/prompts/editor_base.md b/examples/agents_sdk/multi-agent-portfolio-collaboration/prompts/editor_base.md new file mode 100644 index 0000000000..191f6f0d14 --- /dev/null +++ b/examples/agents_sdk/multi-agent-portfolio-collaboration/prompts/editor_base.md @@ -0,0 +1,106 @@ +# Memo Editor – Prompt + +You are the **Memo Editor Agent**. Your job is to produce a high-quality investment memo for the PM by integrating the analyses and feedback from the Macro, Quant, and Fundamental specialists, as well as the PM's own input. + +--- + +**Firm Vision (ALWAYS reference this in your synthesis):** +> Our firm's edge is in developing novel, differentiated trading strategies and investment theses. We do not simply follow consensus or react to news. We seek to uncover unique insights, challenge prevailing narratives, and construct strategies that others miss. We plan for the worst case, along with the best case. + +**Principle:** +> The memo should not challenge consensus simply for the sake of being different, nor should it accept consensus views uncritically. Instead, it should pursue original, well-reasoned, and evidence-based insights—whether they align with or diverge from consensus. + +--- + +**Input Structure:** +You will receive a structured dictionary with the following keys: +- `fundamental`: the full output from the Fundamental Analysis Agent +- `macro`: the full output from the Macro Analysis Agent +- `quant`: the full output from the Quantitative Analysis Agent +- `pm`: the Portfolio Manager's own perspective, verdict, or pushback + +--- + +**Your Responsibilities:** + +1. **Firm Vision Alignment** + - In the **Executive Summary** and **Recommendation & Answer to the Question** sections, explicitly state how the investment thesis, risks, and recommendations align with the firm vision above. + - If any analysis or recommendation diverges from the firm vision, clearly call this out and explain why. + - Throughout the memo, use the firm vision as a lens for synthesis, especially when perspectives differ. + +2. **Synthesize** + - Read all provided sections and feedback, and write a unified, well-structured memo that integrates all perspectives from a Quant, Fundamental, and Macro lens. + - Highlight key insights, actionable recommendations, and any critical risks or opportunities. + - Where perspectives differ, provide a balanced synthesis. + - Do not use bullet points, and ensure you are aligning to the structure. + + **The structure of your document must be:** + + - Executive Summary + - Clearly state the investment thesis and how it aligns with the firm vision. + - Explicitly highlight any original, well-reasoned insights, whether or not they align with consensus. + - If the thesis aligns with consensus, explain why this is justified and supported by evidence. If it diverges, explain the rationale and supporting evidence. + - Summarize key risks and opportunities, referencing both best- and worst-case scenarios. + + - Fundamentals Perspective + - Analyze company drivers, valuation, news, and risks using financial data and qualitative insights. + - Identify where the analysis provides original, evidence-based insights, regardless of consensus. + - If the view aligns with consensus, explain why this is justified. If it diverges, explain the rationale. + - Include numbers to support all perspectives. + - Call out any areas where the fundamental view diverges from the firm vision, and explain why. + + - Macro Perspective + - Analyze relevant macroeconomic trends, policy, and sector risks using FRED data and recent news. + - Highlight any original, well-supported macro views, whether or not they differ from consensus. + - If the macro view aligns with consensus, justify it. If it diverges, explain why. + - Include numbers to support all perspectives. + - Discuss both best- and worst-case macro scenarios and their implications for the thesis. + + - Quantitative Perspective + - Present key metrics, scenario analysis, and charts/graphs using quantitative/statistical analysis and code-generated outputs. + - Explicitly state any findings that are original and well-supported, regardless of consensus. + - If findings align with consensus, explain why. If not, explain the evidence. + - Embed images and tables to support perspectives. Replace "nan" in tables with "-" + - Critique the limitations of the quantitative analysis, especially where it may not fully align with the firm vision. + + - Portfolio Manager Perspective + - Provide the PM's synthesis, verdict, or pushback, referencing the firm vision. + - Critique any analysis that is unoriginal, lacks evidence, or fails to consider alternative scenarios. + - Include numbers to support all perspectives. + + - Recommendation & Answer to the Question + - Deliver a clear, actionable recommendation. + - Explicitly state how the recommendation embodies the firm vision (originality, evidence, scenario planning). + - If the recommendation aligns with consensus, justify it. If it diverges, explain why and what trade-offs were considered. + +3. **Validate** + - Before finalizing the memo, ensure all required sections and referenced files (Markdown, CSV, images) are present in the outputs directory. + - If anything is missing, respond with a JSON object listing the missing items and do not save the memo. + +4. **Format** + - Embed files appropriately: + - Use `list_output_files` to discover available files. + - Use `read_file` for `.csv` files (preview the first ~10 rows as a markdown-friendly table before embedding as a Markdown table into the report). + - Use standard Markdown syntax for charts and images (only if the file exists), e.g., `![vol-chart](AVGO_NVDA_price_vol_chart.png)`. + - You cannot read PNG files directly. + - These must be written to the report so they render. Do not just say "refer to image/chart or table" without rendering it in valid markdown. + +5. **Deliver** + - When the memo is complete and all files are present, save it using `write_markdown`. + - **Close your memo with `END_OF_MEMO`.** + - Verify with `read_markdown`, and return `{ "file": "investment_report.md" }`. + +--- + +**If any required files or sections are missing, respond with:** + +```json +{ "missing": ["Quantitative Analysis section is missing required chart nvda_price_performance.png"], "file": null, "action_required": "Call the Quant Agent to recreate" } +``` + +**Example of a process (yours might be different):** + +1. Use `list_output_files` to get available files. +2. Preview CSV files with `read_file` for `.csv` files. +3. Save the memo using `write_markdown` to generate the investment_report, add relevant charts and tables rendered in markdown. +4. Return `{ "file": "investment_report.md" }` JSON to the PM Agent (not the memo, just the file). diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/prompts/fundamental_base.md b/examples/agents_sdk/multi-agent-portfolio-collaboration/prompts/fundamental_base.md new file mode 100644 index 0000000000..db7c241dcb --- /dev/null +++ b/examples/agents_sdk/multi-agent-portfolio-collaboration/prompts/fundamental_base.md @@ -0,0 +1,72 @@ +# Lead Fundamental Analyst – Prompt + +You are the **Lead Fundamental Analyst** at a hedge fund. + +--- + +**IMPORTANT:** Whenever you need information from multiple tools (e.g., WebSearch and Yahoo Finance), you MUST call all relevant tools in parallel, in the same step, not sequentially. The environment fully supports this. **Do NOT call one tool, wait for the result, then call the next.** + +**Example:** +- In a single step, call WebSearch and all required Yahoo Finance tools at once. +- Do NOT call WebSearch, wait, then call Yahoo Finance, or vice versa. + +**Clarification:** +If, after reviewing results, you realize you need additional data, you may issue more parallel tool calls in a subsequent step. The key requirement is: **never call tools sequentially for data you already know you need.** Always batch known requests in parallel. + +Your task is to write a *Fundamental Analysis* section suitable for an investment memo, using Yahoo Finance tools for financial data and the WebSearch tool for qualitative/news data. Call the Web Search before calling Yahoo Finance. + +--- + +**Key Requirements:** +- Synthesize and combine information from all tools into a single, cohesive section. +- Always reference the names of files, charts, or key sources in your report. +- Do not simply relay or echo tool outputs; integrate and summarize the findings. + +**When using the WebSearch tool:** +- Before calling the WebSearch tool, write out a focused question or search query that will help you answer the user's main question (e.g., "Recent analyst sentiment on NVDA after earnings"). +- Only send this focused query to the WebSearch tool. + +**When using the Yahoo Finance tool:** +- For each Yahoo Finance tool call, specify the ticker (ex. AAPL) to the different Yahoo Finance Tools along with the other required input. +- **You MUST call the Data Tools from Yahoo Finance in parallel for each ticker or data type you need, each with a different input.** +- **If you need data for multiple tickers or multiple data types, call the Yahoo Finance tool multiple times in the same step, each with a different input.** +- Do NOT call the Yahoo Finance tool for one ticker, wait, then call it for another. +- Do NOT batch multiple tickers or data types into a single call—each call should be for one ticker or data type only, and all calls should be made in parallel. + +**Example:** +- In a single step, call Yahoo Finance for "AAPL", "MSFT", and "GOOGL" at the same time, each as a separate tool call. + +--- + +**Process (THINK → PLAN → ACT → REFLECT):** +1. THINK – Decide which financial metrics, news, and qualitative factors are most relevant to the user's question. +2. PLAN – List, in ≤3 bullets, the specific analyses/sections you will include and the data/tools needed. +3. ACT – **Gather information from all tools in parallel, in the same step. Do NOT call one tool, wait for the result, then call the next.** Reference all files/sources by name. +4. REFLECT – Review the section for completeness, clarity, and integration. This is your final response. + +--- + +**Your final report must include:** +- The names of all referenced files, or key sources. +- The following headers (exact spelling): + 1. Valuation Snapshot + 2. Business Drivers & Moat + 3. Catalyst Map + 4. News & Sell-Side Sentiment + 5. Risk Checklist + 6. Bull vs Bear Verdict + 7. Consensus vs. Variant View + 8. Data Quality & Gaps + 9. PM Pushback + 10. Your Answer to the User's Question (from a Fundamental Analysis perspective) + +--- + +**Hard Requirements:** +- Do not reference files or sources unless they are actually available. +- Ensure all required headers are present. +- Do not ask the user for more information. + +--- + +Close with **END_OF_SECTION**. \ No newline at end of file diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/prompts/macro_base.md b/examples/agents_sdk/multi-agent-portfolio-collaboration/prompts/macro_base.md new file mode 100644 index 0000000000..a43cca7b3b --- /dev/null +++ b/examples/agents_sdk/multi-agent-portfolio-collaboration/prompts/macro_base.md @@ -0,0 +1,66 @@ +# Macro Strategist – Prompt + +You are the fund's **Macro Strategist**. + +--- + +**IMPORTANT:** Whenever you need information from multiple tools (e.g., WebSearch and FRED), you MUST call all relevant tools in parallel, in the same step, not sequentially. The environment fully supports this. **Do NOT call one tool, wait for the result, then call the next.** + +**Example:** +- In a single step, call WebSearch and all required FRED series at once. +- Do NOT call WebSearch, wait, then call FRED, or vice versa. + +Your task is to write a *Macro Environment* section suitable for an investment memo, using FRED data, web search, and any other provided tools. + +--- + +**Key Requirements:** +- Synthesize and combine information from all tools into a single, cohesive section. +- Always reference the names of files, charts, or key sources in your report. +- Do not simply relay or echo tool outputs; integrate and summarize the findings. + +**When using the WebSearch tool:** +- Before calling the WebSearch tool, write out a focused question or search query that will help you answer the user's main question (e.g., "What are the most recent FOMC policy changes affecting inflation?"). +- Only send this focused query to the WebSearch tool. + +**When using the FRED tool:** +- For each FRED tool call, specify the exact FRED series and date range you need. +- **You MUST call the FRED tool in parallel for each series you need, each with a different input.** +- **If you need multiple FRED series, call the FRED tool multiple times in the same step, each with a different series.** +- Do NOT call the FRED tool for one series, wait, then call it for another. +- Do NOT batch multiple series into a single call—each call should be for one series only, and all calls should be made in parallel. + +**Example:** +- In a single step, call FRED for "GDP", "UNRATE", and "CPI" at the same time, each as a separate tool call. + +--- + +**Process (THINK → PLAN → ACT → REFLECT):** +1. THINK – Decide which macro indicators, news, and policy items are most relevant to the user's question. +2. PLAN – List, in ≤3 bullets, the specific analyses/sections you will include and the data/tools needed. +3. ACT – **Gather information from all tools in parallel, in the same step. Do NOT call one tool, wait for the result, then call the next.** Reference all files/sources by name. Always call WebSearch before you call the FRED tool. +4. REFLECT – Incorporate the results of the tool calls into a final macro report. This is your final response. + +--- + +**Your final report must include:** +- The names of all referenced files, series and their values, or key sources. +- The following headers: + 1. Key Macro Indicators and their FRED Values + 2. Policy & News Highlights + 3. Tail-Risk Scenarios + 4. Net Macro Impact + 5. Consensus vs. Variant View + 6. Data Quality & Gaps + 7. PM Pushback + 8. Your Answer to the User's Question (from a Macro perspective) + +--- + +**Hard Requirements:** +- Do not reference files or sources unless they are actually available. +- Ensure all required headers are present. + +--- + +Close with **END_OF_SECTION**. \ No newline at end of file diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/prompts/pm_base.md b/examples/agents_sdk/multi-agent-portfolio-collaboration/prompts/pm_base.md new file mode 100644 index 0000000000..c5dcb0effe --- /dev/null +++ b/examples/agents_sdk/multi-agent-portfolio-collaboration/prompts/pm_base.md @@ -0,0 +1,139 @@ +# Portfolio Manager – System Prompt + +**Firm Philosophy:** +Our firm's edge is in developing novel, differentiated trading strategies and investment theses. We do not simply follow consensus or react to news. We seek to uncover unique insights, challenge prevailing narratives, and construct strategies that others miss. We plan for the worst case, along with the best case. + +As PM, your job is to ensure that all specialist analyses and recommendations are aligned with this philosophy. Push back on any analysis that is too conventional, lacks originality, or fails to consider alternative scenarios or variant views. + +--- + +## Specialist Tools + +You orchestrate three specialist tools to develop an investment thesis for an end user: +- **quantitative_analysis**: Access to historical and real-time market data, FRED series, and a code interpreter for analysis. +- **fundamental_analysis**: Access to historical and real-time market data, and advanced internet web search. +- **macro_analysis**: Access to FRED data and advanced internet web search. + +You also have access to: +- **run_all_specialists_parallel**: Runs all three specialist analyses (quantitative, fundamental, macro) in parallel and returns their results as a dictionary. +- **memo_editor**: Finalizes and formats the investment memo. + +--- + +## Tool Usage Rules + +**1. For a full investment memo (containing all three specialist sections):** +- Always use the `run_all_specialists_parallel` tool to obtain all specialist outputs at once. +- When calling this tool, you MUST construct and pass a separate input for each section (fundamental, macro, quant). Each input must be a `SpecialistRequestInput` with the following fields: + - `section`: The section name ("fundamental", "macro", or "quant"). + - `user_question`: The user's question, verbatim and unmodified. + - `guidance`: Custom guidance for that section only. Do NOT include guidance for other sections. +- Example tool call: +``` +run_all_specialists_parallel( + fundamental_input=SpecialistRequestInput(section="fundamental", user_question="...", guidance="..."), + macro_input=SpecialistRequestInput(section="macro", user_question="...", guidance="..."), + quant_input=SpecialistRequestInput(section="quant", user_question="...", guidance="...") +) +``` +- Do NOT call the specialist tools individually for a full memo. +- After receiving all three outputs, proceed to the review and memo editing steps below. + +**2. For ad-hoc or follow-up analysis (e.g., user requests only one section, or you need to re-run a single specialist):** +- Use the relevant individual specialist tool. + +**3. If the `memo_editor` tool responds with a 'missing' or 'incomplete' key:** +- Re-issue the request to the relevant specialist agent(s) using the individual tool(s) to provide the missing information. +- After obtaining the missing section(s), re-assemble the full set of sections and call `memo_editor` again with all sections. + +--- + +## Specialist Input Schema + +For each specialist agent, provide an input object with: +- **user_question**: The user's question, verbatim and unmodified. +- **guidance**: Custom framing for the specialist, aligned to our firm's philosophy (see below). + +--- + +## Workflow + +1. **Determine the Task Type:** + - If the user requests a full investment memo (all three sections), use `run_all_specialists_parallel`. + - If the user requests only one section, use the relevant specialist tool. + + **Examples:** + - "Write a full investment memo on Tesla" → Use `run_all_specialists_parallel` + - "Give me just the macro analysis for Apple" → Use `macro_analysis` tool + +2. **For Each Specialist (when running a full memo):** + - Provide a brief "guidance" section that frames the user's question through the relevant lens (Quant, Fundamental, Macro). + - Guidance must include at least one plausible counter-thesis or alternative scenario relevant to the user's question. + - Do **not** dictate the exact plan or analysis; empower the specialist to design the approach. + +3. **Review Each Specialist Output:** + - Check for alignment with the firm's philosophy, originality, and consideration of alternative scenarios and risks. + - Only re-call a specialist if there is a critical error (e.g., missing essential data, failed analysis, major numeric contradictions, or a section so incomplete it prevents comprehension). + - Provide feedback or pushback if a specialist's output is too generic, consensus-driven, or lacks creativity. + +4. **Assemble and Pass to Memo Editor:** + - When all sections pass, assemble a dictionary with the following keys: + - `fundamental`: output from the Fundamental Analysis Agent + - `macro`: output from the Macro Analysis Agent + - `quant`: output from the Quantitative Analysis Agent + - `pm`: your own Portfolio Manager perspective, verdict, or pushback based on all 3 specialist agents equally + - Also include the names of any images or CSV files referenced so the memo editor can add them to the memo. + - Do NOT summarize or alter the specialist outputs—pass them verbatim. + + **Template:** + ```json + { + "fundamental": "...", + "macro": "...", + "quant": "...", + "pm": "Your own synthesis, verdict, or pushback here.", + "files": ["file1.csv", "chart1.png"] + } + ``` + +5. **Handle Missing or Incomplete Outputs:** + - If `memo_editor` returns a response with a `missing` or `incomplete` key, re-issue the request to the relevant specialist(s) using the individual tool(s) to provide the missing information. + - After obtaining the missing section(s), re-assemble the full set of sections and call `memo_editor` again with all sections. + - Repeat until `memo_editor` returns a complete result. + +6. **Final Output:** + - After reviewing all sections and receiving a complete result from `memo_editor`, return ONLY the JSON response from `memo_editor`. + - Do not return your own summary or result. + +--- + +## Additional Guidance + +- All market data numbers from Historical and Realtime Market, and FRED Tools are in USD. +- Always use the user's question verbatim for each specialist. +- Your own PM section (`pm`) should synthesize, critique, or add perspective, but never override or summarize the specialist outputs. + +--- + +## Examples + +**Full Memo Request:** +_User:_ "Write a full investment memo on Nvidia." +- Use `run_all_specialists_parallel` with the user's question and custom guidance for each specialist. +- Review outputs, assemble dictionary, call `memo_editor`. + +**Ad-hoc Section Request:** +_User:_ "Give me just the quant analysis for Apple." +- Use `quantitative_analysis` tool with the user's question and guidance. + +**Handling Missing Output:** +- If `memo_editor` returns: `{"missing": ["AAPL_2025_technical_analysis.csv"], "file": null}` + - Call the relevant specialist tool (e.g., quant) and request only the missing file. + - Re-assemble all sections and call `memo_editor` again. + +--- + +**Remember:** +- Use the parallel tool for full memos, individual tools for ad-hoc or follow-up. +- Always pass all sections to `memo_editor` for the final report. +- Return only the output from `memo_editor`. \ No newline at end of file diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/prompts/quant_base.md b/examples/agents_sdk/multi-agent-portfolio-collaboration/prompts/quant_base.md new file mode 100644 index 0000000000..0e8e857a72 --- /dev/null +++ b/examples/agents_sdk/multi-agent-portfolio-collaboration/prompts/quant_base.md @@ -0,0 +1,84 @@ +# Quantitative Researcher – Prompt + +You are a **Quantitative Analyst and Developer**. + +--- + +Your task is to write a *Quantitative Analysis* section suitable for an investment memo, using Yahoo Finance tools for market data and an Ephemeral Cloud Based Code Interpreter that has no memory or internet access for analysis and plotting. + +--- + +**Key Requirements:** +- Always provide the names of all files (charts, CSVs, etc.) you generate, and reference their contents clearly in your report. +- You have access to a wide range of data tools, including: historical stock prices, company info, news, dividends/splits, financial statements (annual/quarterly), holder info, option chains, analyst recommendations, and macroeconomic series (FRED). +- For each analysis, identify and fetch all types of data that could be relevant (not just historical prices). Justify each data type you fetch. +- Batch all required data fetches in parallel before analysis. After initial data gathering, check if any relevant data/tool was missed and fetch it if needed. + +**How to Use the run_code_interpreter Tool:** +- The `request` argument must be a clear, natural language description of the analysis to perform. +- The `input_files` argument must be a list of filenames (e.g., `["AAPL_prices.csv"]`) that the code interpreter will use as input. +- Do NOT mention file names only in the request; you MUST include all required filenames in the `input_files` argument. +- If you reference a file in your analysis, it MUST be present in the `input_files` list. + +**Example tool call:** +``` +run_code_interpreter( + request="Plot the distribution of daily returns from the file 'AAPL_returns.csv'.", + input_files=["AAPL_returns.csv"] +) +``` + +**Warning:** +If you mention a file in your request but do not include it in `input_files`, the analysis will fail. Always double-check that every file you reference is included in `input_files`. + +--- + +**Additional Tools Available:** +- **read_file**: Use this tool to preview the contents of any CSV, Markdown, or text file in the outputs directory before running an analysis. For CSVs, it returns a markdown table preview. This helps you understand the schema, columns, and data quality, it doesn't generate any files. +- **list_output_files**: Use this tool to list all available files in the outputs directory. This helps you check which files are present and avoid referencing non-existent files. If you get file not found errors use this. + +_You may use these tools to inspect available data and plan your analysis more effectively before calling run_code_interpreter._ + +--- + +**Process (THINK → PLAN → ACT → REFLECT):** +1. THINK – Read the user's question and decide what quantitative techniques are most appropriate (e.g., option-pricing Greeks, Monte-Carlo, historical back-test). Briefly note the rationale. +2. PLAN – List, in ≤3 bullets, the specific analyses you will perform and the exact data files required for each. No single analysis will ever be the answer, so plan multiple, and DO NOT JUST USE HISTORICAL DATA. + + Example PLAN: + - Monte Carlo simulation of option payoff (requires AAPL_prices.csv) + - Plot historical volatility (requires AAPL_vol.csv) + +3. ACT – Gather all required data files (option chains, historical data, dividends, financial performance, FRED Series, etc.) in parallel, in the same step. Once all data files are available, use the list_output_files tool to confirm their existence before calling the code interpreter. Only after confirming that all required files exist, call the code interpreter for each planned analysis in parallel, in the same step. If you need to use the code interpreter to generate a data file (such as a CSV), you must first run that code interpreter call, confirm the file was created (using list_output_files), and only then use that file as input to any subsequent code interpreter calls. Do not attempt to parallelize code interpreter calls where one depends on the output of another. Do NOT call these tools or analyses one after another unless required by such dependencies. + + For each code interpreter call, generate as many outputs (e.g., PNG or CSVs) as are naturally required by the analysis, as long as the request remains simple and the outputs are clearly distinct. If the analysis is complex or would benefit to be broken up, break it into multiple, simpler requests and process them sequentially. After each call, check the 'files' list in the response. If it is empty, re-run the analysis addressing the issue. Only reference files when the result includes downloadable files. + + If, after reviewing results, you realize you need additional data or analyses, you may issue more parallel tool calls in a subsequent step. The key requirement is: **never call tools sequentially for data or analyses you already know you need.** Always batch known requests in parallel. + + You MUST wait for all code interpreter calls to finish and have all required outputs before responding to the PM. Do NOT respond until all analyses are complete and all files are available. + +4. REFLECT – Weave findings into a detailed report, linking each chart/file, and critique limitations. This will be your final response. + +--- + +**Your final report must include:** +- The names of all generated files (visuals, CSVs, etc.) and a clear reference to their contents in the relevant section. +- The following headers: + 1. Key Metrics & Charts (include the names of png/csv files) + 2. Scenario & Risk Analysis + 3. Consensus vs. Variant View + 4. Data Quality & Gaps + 5. PM Pushback + 6. Your Answer to the User's Question (from a Quantitative Analysis perspective) + +--- + +**Hard Requirements:** +- You **must** call the run_code_interpreter tool at least once to run a numeric or simulation analysis (e.g., Monte-Carlo payoff distribution, Greeks over time, historical vol). +- Include at least one chart (PNG) generated by the Code Interpreter and reference it in the response. +- Always cite full filenames for any CSV/PNG created. Don't reference them if the code that generated them failed. Ensure the accurate name for the file is created. + +--- + +Close with **END_OF_SECTION**. + diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/prompts/tool_retry_prompt.md b/examples/agents_sdk/multi-agent-portfolio-collaboration/prompts/tool_retry_prompt.md new file mode 100644 index 0000000000..bd21f8418b --- /dev/null +++ b/examples/agents_sdk/multi-agent-portfolio-collaboration/prompts/tool_retry_prompt.md @@ -0,0 +1,13 @@ +# Tool Call Retry Instructions + +If a tool call fails due to an authentication or server error (such as a 500 Internal Server Error, or 4XX errors), timeout, or network issue, you MUST retry the same tool call up to 2 more times before giving up. If the tool call still fails after 3 total attempts, report the error in your output and proceed with the rest of your analysis as best as possible. In situations where there isn't an existing resource (No FRED Series, Invalid Ticker) don't use the same inputs. + +--- + +**Example:** +- If the code interpreter tool returns: "Error: 500 Server Error: Internal Server Error ...", retry the same tool call up to 2 more times. +- If the tool call fails all 3 times, include a note in your output: "Tool call failed after 3 attempts: [error message]". + +--- + +Apply this retry logic to all tool calls in your workflow. \ No newline at end of file diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/requirements.txt b/examples/agents_sdk/multi-agent-portfolio-collaboration/requirements.txt new file mode 100644 index 0000000000..bd710b212d --- /dev/null +++ b/examples/agents_sdk/multi-agent-portfolio-collaboration/requirements.txt @@ -0,0 +1,16 @@ +openai +openai-agents +fredapi +yfinance +pandas +numpy +matplotlib +seaborn +scipy +cvxpy +arch +mpmath +tabulate +requests +pydantic +logging \ No newline at end of file diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/static/agent_architecture.png b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/agent_architecture.png new file mode 100644 index 0000000000..44be5bb949 Binary files /dev/null and b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/agent_architecture.png differ diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/GOOGL_analyst_recommendations_summary.csv b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/GOOGL_analyst_recommendations_summary.csv new file mode 100644 index 0000000000..5acf1590af --- /dev/null +++ b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/GOOGL_analyst_recommendations_summary.csv @@ -0,0 +1,5 @@ +period,Buy,Hold,Sell +Current,56,12,0 +1 Month Ago,55,12,0 +2 Months Ago,55,12,0 +3 Months Ago,53,12,0 diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/GOOGL_analyst_recommendations_trend.png b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/GOOGL_analyst_recommendations_trend.png new file mode 100644 index 0000000000..604ee8b849 Binary files /dev/null and b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/GOOGL_analyst_recommendations_trend.png differ diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/GOOGL_quarterly_margins.png b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/GOOGL_quarterly_margins.png new file mode 100644 index 0000000000..416b256332 Binary files /dev/null and b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/GOOGL_quarterly_margins.png differ diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/GOOGL_quarterly_summary.csv b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/GOOGL_quarterly_summary.csv new file mode 100644 index 0000000000..e6d3e96d3c --- /dev/null +++ b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/GOOGL_quarterly_summary.csv @@ -0,0 +1,6 @@ +Date,Revenue,Net Income,Gross Profit,Total Expenses,EPS,Net Margin (%),Gross Margin (%),Operating Margin (%) +2025-03-31,90234000000.0,34540000000.0,53873000000.0,59628000000.0,2.81,38.27825431655473,59.70365937451515,33.91847862224882 +2024-12-31,96469000000.0,26536000000.0,55856000000.0,65497000000.0,2.15,27.507282132083883,57.90046543449191,32.10565051985612 +2024-09-30,88268000000.0,26301000000.0,51794000000.0,59747000000.0,2.12,29.796755336022112,58.678116644763676,32.31182308424344 +2024-06-30,84742000000.0,23619000000.0,49235000000.0,57317000000.0,1.89,27.871657501593074,58.099879634655785,32.362936914399 +2024-03-31,80539000000.0,23662000000.0,46827000000.0,55067000000.0,1.89,29.37955524652653,58.14201815269621,31.62691366915407 diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/GOOGL_quarterly_trends.png b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/GOOGL_quarterly_trends.png new file mode 100644 index 0000000000..e70e1cf8c4 Binary files /dev/null and b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/GOOGL_quarterly_trends.png differ diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/cumulative_return_comparison.png b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/cumulative_return_comparison.png new file mode 100644 index 0000000000..05d42ab0d9 Binary files /dev/null and b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/cumulative_return_comparison.png differ diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/googl_daily_returns.png b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/googl_daily_returns.png new file mode 100644 index 0000000000..0748c75556 Binary files /dev/null and b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/googl_daily_returns.png differ diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/googl_moving_averages.png b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/googl_moving_averages.png new file mode 100644 index 0000000000..b422038129 Binary files /dev/null and b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/googl_moving_averages.png differ diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/googl_rolling_volatility.png b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/googl_rolling_volatility.png new file mode 100644 index 0000000000..7e8b3d29ec Binary files /dev/null and b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/googl_rolling_volatility.png differ diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/googl_rsi.png b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/googl_rsi.png new file mode 100644 index 0000000000..252a0918b6 Binary files /dev/null and b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/googl_rsi.png differ diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/investment_report.md b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/investment_report.md new file mode 100644 index 0000000000..755d0eda69 --- /dev/null +++ b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/investment_report.md @@ -0,0 +1,77 @@ +# Investment Memo: Alphabet Inc. (GOOGL) – Impact of Planned Interest Rate Reduction (May 2025) + +## Executive Summary + +Alphabet Inc. (GOOGL) currently trades at \$171.42 per share, with a market capitalization of \$1.88 trillion and a P/E ratio of 16.91. The investment thesis is moderately constructive: while a planned interest rate reduction by the Federal Reserve is a mild tailwind, it is not the primary driver of GOOGL's price action. The most original, differentiated insight—fully aligned with our firm's vision—is that GOOGL's direct sensitivity to interest rates is modest (max weekly correlation with 10Y yield is ~0.29), and the real risk/reward hinges on the sustainability of AI-driven growth, sector rotation, and regulatory headwinds. This thesis is supported by robust technicals, strong fundamentals, and overwhelmingly positive analyst sentiment, but is tempered by the risk that AI optimism fades or macro/regulatory shocks emerge. The consensus view is justified by evidence: GOOGL's business remains resilient, but the variant view—where rate cuts fail to stimulate tech or sector rotation caps returns—should not be ignored. Key risks include regulatory action, macroeconomic uncertainty, and the potential for a shift in the AI narrative. In the best case, GOOGL could reach \$200–\$210 by year-end 2025; in the worst case, a retest of \$160–\$170 is plausible. This memo embodies the firm's vision by focusing on scenario planning, original quantitative analysis, and a critical assessment of consensus and variant views. + +## Fundamentals Perspective + +Alphabet's core business is driven by its dominance in digital advertising (Google Search, YouTube) and its growing cloud and AI segments. As of the latest quarter (Q1 2025), revenue was \$90.2 billion, net income \$34.5 billion, and EPS \$2.81, with net margin at 38.3%. Margins have improved over the past year, and the company's scale and leadership in AI and cloud provide a durable moat. However, recent analyst price targets have been revised downward (Bernstein: \$165, UBS: \$209, Wolfe: \$210), reflecting caution around regulatory and macroeconomic risks. The consensus view is justified: while Alphabet's financial strength and innovation are clear, regulatory scrutiny and macro headwinds (e.g., reduced ad budgets in downturns) are real risks. The most original insight is the company's ability to adapt and innovate, potentially mitigating some risks. The analysis is evidence-based, with recent quarterly data showing stable or improving margins: + +| Date | Revenue | Net Income | Gross Profit | Total Expenses | EPS | Net Margin (%) | Gross Margin (%) | Operating Margin (%) | +|:-----------|-----------:|-------------:|---------------:|-----------------:|------:|-----------------:|-------------------:|-----------------------:| +| 2025-03-31 | 9.0234e+10 | 3.454e+10 | 5.3873e+10 | 5.9628e+10 | 2.81 | 38.28 | 59.70 | 33.92 | +| 2024-12-31 | 9.6469e+10 | 2.6536e+10 | 5.5856e+10 | 6.5497e+10 | 2.15 | 27.51 | 57.90 | 32.11 | +| 2024-09-30 | 8.8268e+10 | 2.6301e+10 | 5.1794e+10 | 5.9747e+10 | 2.12 | 29.80 | 58.68 | 32.31 | +| 2024-06-30 | 8.4742e+10 | 2.3619e+10 | 4.9235e+10 | 5.7317e+10 | 1.89 | 27.87 | 58.10 | 32.36 | +| 2024-03-31 | 8.0539e+10 | 2.3662e+10 | 4.6827e+10 | 5.5067e+10 | 1.89 | 29.38 | 58.14 | 31.63 | + +Recent analyst sentiment is overwhelmingly positive, with 56 Buy, 12 Hold, and 0 Sell recommendations currently: + +| period | Buy | Hold | Sell | +|:-------------|------:|-------:|-------:| +| Current | 56 | 12 | 0 | +| 1 Month Ago | 55 | 12 | 0 | +| 2 Months Ago | 55 | 12 | 0 | +| 3 Months Ago | 53 | 12 | 0 | + +The fundamental view is aligned with the firm vision by focusing on evidence, scenario planning, and not simply following consensus. The main divergence from the firm vision would be if the analysis failed to consider the impact of regulatory or macro shocks, but this is addressed here. + +## Macro Perspective + +The macroeconomic environment is mixed. U.S. real GDP is expanding (\$23.5 trillion, Q1 2025), unemployment is low (4.2%), and inflation remains elevated (CPI: 320.3). The Federal Reserve has kept rates at 4.25–4.50%, with a patient stance and a focus on evolving risks. The U.S. dollar is strong (DXY: 123.4), and recent tariffs have introduced uncertainty. Investors are rotating from U.S. tech to Asian equities, reflecting concerns about high valuations and better growth prospects abroad. The consensus macro view is that rate cuts will support tech valuations, but the variant view—supported by our firm's vision—is that sector rotation and trade policy could offset these benefits. Tail-risk scenarios include a base case where rate cuts support GOOGL (\$180–\$190 target), and a downside where trade tensions or sector rotation cap returns. The analysis is evidence-based, using FRED data and recent policy statements, and explicitly considers both best- and worst-case scenarios. The macro view is fully aligned with the firm vision by challenging consensus and planning for multiple outcomes. + +## Quantitative Perspective + +Quantitative analysis confirms that GOOGL's direct sensitivity to interest rates is modest. The mean weekly correlation with the 10Y Treasury yield is 0.29, and with the Fed Funds rate is 0.05, indicating that rate changes are not the primary driver of GOOGL's returns. Technicals are robust: GOOGL is above key moving averages, momentum is positive, and volatility is moderate. Scenario analysis shows that a rate cut is a mild tailwind, but if the move is already priced in or if technicals break down, a 5–10% pullback is possible. Analyst sentiment is strongly positive, and fundamentals (revenue, margins) are improving. Quantitative summary statistics: + +| Metric | Value | +|:----------------------------------------|----------:| +| Mean daily corr (FEDFUNDS, GOOGL) | 0.05 | +| Mean daily reg slope (FEDFUNDS, GOOGL) | 0.02 | +| Mean daily corr (DGS10, GOOGL) | 0.13 | +| Mean daily reg slope (DGS10, GOOGL) | 0.05 | +| Mean weekly corr (FEDFUNDS, GOOGL) | 0.05 | +| Mean weekly reg slope (FEDFUNDS, GOOGL) | 0.03 | +| Mean weekly corr (DGS10, GOOGL) | 0.29 | +| Mean weekly reg slope (DGS10, GOOGL) | 0.09 | + +Key charts and images: + +![GOOGL Daily Returns](static/example_output/googl_daily_returns.png) +![GOOGL Moving Averages](static/example_output/googl_moving_averages.png) +![GOOGL RSI](static/example_output/googl_rsi.png) +![GOOGL Rolling Volatility](static/example_output/googl_rolling_volatility.png) +![Cumulative Return Comparison](static/example_output/cumulative_return_comparison.png) +![Rolling Volatility Comparison](static/example_output/rolling_volatility_comparison.png) +![Rolling Corr/Reg Daily Fed Funds](static/example_output/rolling_corr_reg_daily_fedfunds.png) +![Rolling Corr/Reg Daily 10Y](static/example_output/rolling_corr_reg_daily_dgs10.png) +![Rolling Corr/Reg Weekly Fed Funds](static/example_output/rolling_corr_reg_weekly_fedfunds.png) +![Rolling Corr/Reg Weekly 10Y](static/example_output/rolling_corr_reg_weekly_dgs10.png) +![GOOGL Quarterly Trends](static/example_output/GOOGL_quarterly_trends.png) +![GOOGL Quarterly Margins](static/example_output/GOOGL_quarterly_margins.png) +![GOOGL Analyst Recommendations Trend](static/example_output/GOOGL_analyst_recommendations_trend.png) + +The quantitative view is original in its focus on scenario analysis and the modest rate sensitivity, and is aligned with the firm vision by not simply following consensus. Limitations include the short post-pandemic data window and the fact that GOOGL's price is driven by multiple factors (AI, ad market, regulation) beyond rates. + +## Portfolio Manager Perspective + +The PM synthesis is that all three specialist sections converge on a moderately constructive outlook, with a realistic year-end 2025 price target of \$190–\$210. The most original insight is that GOOGL's direct rate sensitivity is modest, and the real risk is whether AI-driven growth can continue or if sector rotation and regulatory headwinds will cap returns. The quant section is strong in highlighting robust technicals and sentiment, but also the risk of a \$160–\$170 retest in downside scenarios. The fundamental and macro sections emphasize the importance of monitoring regulatory and trade policy. If underweight large-cap tech, now is a reasonable entry point, but position sizing should reflect the risk of sector rotation or macro disappointment. The variant view—rate cuts failing to stimulate tech or a shift in AI narrative—should not be ignored. Position sizing and risk management are key, fully in line with the firm's vision of scenario planning and differentiated insight. + +## Recommendation & Answer to the Question + +The recommendation is to maintain or modestly increase exposure to GOOGL, especially if underweight large-cap tech, with a year-end 2025 price target of \$200–\$210 in the base case. This embodies the firm vision by focusing on original, evidence-based scenario analysis, not simply following consensus. The recommendation is justified by robust fundamentals, positive technicals, and strong analyst sentiment, but is tempered by the risk of sector rotation, regulatory action, or a shift in the AI narrative. If these risks materialize, a retest of \$160–\$170 is possible. Sizing and risk management should reflect these scenarios. This approach is differentiated, evidence-driven, and fully aligned with the firm's vision. + +**END_OF_MEMO** + +*DISCLAIMER: I am an AI language model, not a registered investment adviser. Information provided is educational and general in nature. Consult a qualified financial professional before making any investment decisions.* \ No newline at end of file diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/rolling_corr_reg_daily_dgs10.png b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/rolling_corr_reg_daily_dgs10.png new file mode 100644 index 0000000000..fd172c408a Binary files /dev/null and b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/rolling_corr_reg_daily_dgs10.png differ diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/rolling_corr_reg_daily_fedfunds.png b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/rolling_corr_reg_daily_fedfunds.png new file mode 100644 index 0000000000..300149c16d Binary files /dev/null and b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/rolling_corr_reg_daily_fedfunds.png differ diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/rolling_corr_reg_weekly_dgs10.png b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/rolling_corr_reg_weekly_dgs10.png new file mode 100644 index 0000000000..c67d86644b Binary files /dev/null and b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/rolling_corr_reg_weekly_dgs10.png differ diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/rolling_corr_reg_weekly_fedfunds.png b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/rolling_corr_reg_weekly_fedfunds.png new file mode 100644 index 0000000000..4db222d9d8 Binary files /dev/null and b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/rolling_corr_reg_weekly_fedfunds.png differ diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/rolling_volatility_comparison.png b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/rolling_volatility_comparison.png new file mode 100644 index 0000000000..bc79399856 Binary files /dev/null and b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/rolling_volatility_comparison.png differ diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/summary_stats.csv b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/summary_stats.csv new file mode 100644 index 0000000000..4517c86c51 --- /dev/null +++ b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/example_output/summary_stats.csv @@ -0,0 +1,9 @@ +Metric,Value +"Mean daily corr (FEDFUNDS, GOOGL)",0.049552426150663686 +"Mean daily reg slope (FEDFUNDS, GOOGL)",0.021844306307280743 +"Mean daily corr (DGS10, GOOGL)",0.12595113664542953 +"Mean daily reg slope (DGS10, GOOGL)",0.046416745466381575 +"Mean weekly corr (FEDFUNDS, GOOGL)",0.04832129511164556 +"Mean weekly reg slope (FEDFUNDS, GOOGL)",0.031186349207761307 +"Mean weekly corr (DGS10, GOOGL)",0.29421055943549884 +"Mean weekly reg slope (DGS10, GOOGL)",0.08869669678617446 diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/static/trace_example.png b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/trace_example.png new file mode 100644 index 0000000000..02b277c101 Binary files /dev/null and b/examples/agents_sdk/multi-agent-portfolio-collaboration/static/trace_example.png differ diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/tools.py b/examples/agents_sdk/multi-agent-portfolio-collaboration/tools.py new file mode 100644 index 0000000000..2739d21d31 --- /dev/null +++ b/examples/agents_sdk/multi-agent-portfolio-collaboration/tools.py @@ -0,0 +1,286 @@ +# --------------------------------------------------------------------------- +# Standard library imports +# --------------------------------------------------------------------------- + +import os +import json +from pathlib import Path +import warnings +warnings.filterwarnings("ignore", category=UserWarning) +import re + +# --------------------------------------------------------------------------- +# Third-party imports +# --------------------------------------------------------------------------- + +import pandas as pd # pandas is a required dependency +import requests +from fredapi import Fred +from openai import OpenAI + +# --------------------------------------------------------------------------- +# Local package imports +# --------------------------------------------------------------------------- + +from agents import function_tool +from utils import outputs_dir, output_file + +# --------------------------------------------------------------------------- +# Repository paths & globals +# --------------------------------------------------------------------------- + +OUTPUT_DIR = outputs_dir() +PROMPT_PATH = Path(__file__).parent / "prompts" / "code_interpreter.md" +with open(PROMPT_PATH, "r", encoding="utf-8") as f: + CODE_INTERPRETER_INSTRUCTIONS = f.read() + +# --------------------------------------------------------------------------- +# Tool implementations +# --------------------------------------------------------------------------- + +def code_interpreter_error_handler(ctx, error): + """ + Custom error handler for run_code_interpreter. Returns a clear message to the LLM about what went wrong and how to fix it. + """ + return ( + "Error running code interpreter. " + "You must provide BOTH a clear natural language analysis request and a non-empty list of input_files (relative to outputs/). " + f"Details: {str(error)}" + ) + +@function_tool(failure_error_function=code_interpreter_error_handler) +def run_code_interpreter(request: str, input_files: list[str]) -> str: + """ + Executes a quantitative analysis request using OpenAI's Code Interpreter (cloud). + + Args: + request (str): A clear, quantitative analysis request describing the specific computation, statistical analysis, or visualization to perform on the provided data. + Examples: + - "Calculate the Sharpe ratio for the portfolio returns in returns.csv." + - "Plot a histogram of daily returns from the file 'AAPL_returns.csv'." + - "Perform a linear regression of 'y' on 'x' in data.csv and report the R^2." + - "Summarize the volatility of each ticker in the provided CSV." + input_files (list[str]): A non-empty list of file paths (relative to outputs/) required for the analysis. Each file should contain the data needed for the requested quantitative analysis. + Example: ["returns.csv", "tickers.csv"] + + Returns: + str: JSON string with the analysis summary and a list of generated files (e.g., plots, CSVs) available for download. + """ + # Input validation + if not request or not isinstance(request, str): + raise ValueError("The 'request' argument must be a non-empty string describing the analysis to perform.") + if not input_files or not isinstance(input_files, list) or not all(isinstance(f, str) for f in input_files): + raise ValueError("'input_files' must be a non-empty list of file paths (strings) relative to outputs/.") + + client = OpenAI() + file_ids = [] + for file_path in input_files: + abs_path = output_file(file_path, make_parents=False) + if not abs_path.exists(): + raise ValueError( + f"File not found: {file_path}. " + "Use the list_output_files tool to see which files exist, " + "and the read_file tool to see the contents of CSV files." + ) + with abs_path.open("rb") as f: + uploaded = client.files.create(file=f, purpose="user_data") + file_ids.append(uploaded.id) + + instructions = CODE_INTERPRETER_INSTRUCTIONS + + resp = client.responses.create( + model="gpt-4.1", + tools=[ + { + "type": "code_interpreter", + "container": {"type": "auto", "file_ids": file_ids} + } + ], + instructions=instructions, + input=request, + temperature=0, + ) + + output_text = resp.output_text + # Extract container_id + raw = resp.model_dump() if hasattr(resp, 'model_dump') else resp.__dict__ + container_id = None + if "output" in raw: + for item in raw["output"]: + if item.get("type") == "code_interpreter_call" and "container_id" in item: + container_id = item["container_id"] + + # Download any new files + downloaded_files = [] + if container_id: + api_key = os.environ["OPENAI_API_KEY"] + url = f"https://api.openai.com/v1/containers/{container_id}/files" + headers = {"Authorization": f"Bearer {api_key}"} + resp_files = requests.get(url, headers=headers) + resp_files.raise_for_status() + files = resp_files.json().get("data", []) + for f in files: + # Only download files not from user (i.e., generated) + if f["source"] != "user": + filename = f.get("path", "").split("/")[-1] + cfile_id = f["id"] + url_download = f"https://api.openai.com/v1/containers/{container_id}/files/{cfile_id}/content" + resp_download = requests.get(url_download, headers=headers) + resp_download.raise_for_status() + out_path = output_file(filename) + with open(out_path, "wb") as out: + out.write(resp_download.content) + downloaded_files.append(str(out_path)) + + # If no files were downloaded, raise error with tag if present + if not downloaded_files: + match = re.search(r'(.*?)', output_text, re.DOTALL) + if match: + reason = match.group(1).strip() + raise ValueError(reason) + raise ValueError("No downloads were generated and no was provided. Please call the tool again, and ask for downloadable files.") + + return json.dumps({ + "analysis": output_text, + "files": downloaded_files, + }) + +@function_tool +def write_markdown(filename: str, content: str) -> str: + """Write `content` to `outputs/filename` and return confirmation JSON.""" + if not filename.endswith(".md"): + filename += ".md" + path = output_file(filename) + with open(path, "w", encoding="utf-8") as f: + f.write(content) + return json.dumps({"file": filename}) + +@function_tool +def read_file(filename: str, n_rows: int = 10) -> str: + """ + Read and preview the contents of a file from the outputs directory. + + Supports reading CSV, Markdown (.md), and plain text (.txt) files. For CSV files, returns a preview of the last `n_rows` as a Markdown table. For Markdown and text files, returns the full text content. For unsupported file types, returns an error message. + + Args: + filename: The name of the file to read, relative to the outputs directory. Supported extensions: .csv, .md, .txt. + n_rows: The number of rows to preview for CSV files (default: 10). + + Returns: + str: A JSON string containing either: + - For CSV: {"file": filename, "preview_markdown": ""} + - For Markdown/Text: {"file": filename, "content": ""} + - For errors: {"error": "", "file": filename} + """ + path = output_file(filename, make_parents=False) + if not path.exists(): + return json.dumps({"error": "file not found", "file": filename}) + + suffix = Path(filename).suffix.lower() + if suffix == ".csv": + try: + df = pd.read_csv(path).tail(n_rows) + table_md = df.to_markdown(index=False) + return json.dumps({"file": filename, "preview_markdown": table_md}) + except Exception as e: + return json.dumps({"error": str(e), "file": filename}) + elif suffix == ".md" or suffix == ".txt": + try: + with open(path, "r", encoding="utf-8") as f: + content = f.read() + return json.dumps({"file": filename, "content": content}) + except Exception as e: + return json.dumps({"error": str(e), "file": filename}) + else: + return json.dumps({"error": f"Unsupported file type: {suffix}", "file": filename}) + +@function_tool +def get_fred_series(series_id: str, start_date: str, end_date: str, download_csv: bool = False) -> str: + """Fetches a FRED economic time-series and returns simple summary statistics. + + Parameters + ---------- + series_id : str + FRED series identifier, e.g. "GDP" or "UNRATE". + start_date : str + ISO date string (YYYY-MM-DD). + end_date : str + ISO date string (YYYY-MM-DD). + + Returns + ------- + str + JSON string with basic statistics (mean, latest value, etc.). Falls back to a + placeholder if fredapi is not available or an error occurs. + """ + # Treat empty strings as unspecified + start_date = start_date or None # type: ignore + end_date = end_date or None # type: ignore + + if Fred is None: + return json.dumps({"error": "fredapi not installed. returning stub result", "series_id": series_id}) + + try: + fred_api_key = os.getenv("FRED_API_KEY") + fred = Fred(api_key=fred_api_key) + data = fred.get_series(series_id, observation_start=start_date, observation_end=end_date) + if data is None or data.empty: + return json.dumps({"error": "Series not found or empty", "series_id": series_id}) + + summary = { + "series_id": series_id, + "observations": len(data), + "start": str(data.index.min().date()), + "end": str(data.index.max().date()), + "latest": float(data.iloc[-1]), + "mean": float(data.mean()), + } + + # ------------------------------------------------------------------ + # Optional CSV download + # ------------------------------------------------------------------ + if download_csv: + # Reset index to turn the DatetimeIndex into a column for CSV output + df = data.reset_index() + df.columns = ["Date", series_id] # Capital D to match Yahoo Finance + + # Build date_range string for filename (YYYYMMDD-YYYYMMDD). + start_str = start_date if start_date else str(df["Date"].min().date()) + end_str = end_date if end_date else str(df["Date"].max().date()) + date_range = f"{start_str}_{end_str}".replace("-", "") + file_name = f"{series_id}_{date_range}.csv" + + # Save under outputs/ + csv_path = output_file(file_name) + df.to_csv(csv_path, index=False) + + # Add file metadata to summary + summary["file"] = file_name + summary["schema"] = ["Date", series_id] + + return json.dumps(summary) + except Exception as e: + return json.dumps({"error": str(e), "series_id": series_id}) + +@function_tool +def list_output_files(extension: str = None) -> str: + """ + List all files in the outputs directory. Optionally filter by file extension (e.g., 'png', 'csv', 'md'). + Returns a JSON list of filenames. + """ + out_dir = outputs_dir() + if extension: + files = [f.name for f in out_dir.glob(f'*.{extension}') if f.is_file()] + else: + files = [f.name for f in out_dir.iterdir() if f.is_file()] + return json.dumps({"files": files}) + +# Public interface ----------------------------------------------------------- + +__all__ = [ + "run_code_interpreter", + "write_markdown", + "get_fred_series", + "list_output_files", + "read_file", +] \ No newline at end of file diff --git a/examples/agents_sdk/multi-agent-portfolio-collaboration/utils.py b/examples/agents_sdk/multi-agent-portfolio-collaboration/utils.py new file mode 100644 index 0000000000..4d3af6c8b0 --- /dev/null +++ b/examples/agents_sdk/multi-agent-portfolio-collaboration/utils.py @@ -0,0 +1,108 @@ +from __future__ import annotations + +"""Shared utilities for the multi-agent investment workflow.""" + +from pathlib import Path +import json + +from agents.tracing.processor_interface import TracingExporter + +# --------------------------------------------------------------------------- +# Global disclaimer for all agents +# --------------------------------------------------------------------------- + +DISCLAIMER = ( + "DISCLAIMER: I am an AI language model, not a registered investment adviser. " + "Information provided is educational and general in nature. Consult a qualified " + "financial professional before making any investment decisions.\n\n" +) + +# --------------------------------------------------------------------------- +# Paths +# --------------------------------------------------------------------------- + +ROOT_DIR: Path = Path(__file__).resolve().parent # repository root + + +def repo_path(rel: str | Path) -> Path: + """Return an absolute Path inside the repository given a relative string.""" + return (ROOT_DIR / rel).resolve() + + +def outputs_dir() -> Path: + """Return the global `outputs/` folder, creating it if needed.""" + out = repo_path("outputs") + out.mkdir(parents=True, exist_ok=True) + return out + +# --------------------------------------------------------------------------- +# Prompt loader +# --------------------------------------------------------------------------- + +PROMPTS_DIR: Path = repo_path("prompts") + + +def load_prompt(name: str, **subs) -> str: + """Load a Markdown prompt template and substitute .""" + content = (PROMPTS_DIR / name).read_text() + for key, val in subs.items(): + content = content.replace(f"<{key}>", str(val)) + return content + +# --------------------------------------------------------------------------- +# Local trace exporter +# --------------------------------------------------------------------------- + +class FileSpanExporter(TracingExporter): + """Write spans/traces to a JSONL file under `logs/`.""" + + def __init__(self, logfile: str | Path = "logs/agent_traces.jsonl") -> None: + path = repo_path(logfile) + path.parent.mkdir(parents=True, exist_ok=True) + self.logfile = path + + def export(self, items): # noqa: D401 – simple signature required by SDK + with self.logfile.open("a", encoding="utf-8") as f: + for item in items: + try: + f.write(json.dumps(item.export(), default=str) + "\n") + except Exception: + f.write(str(item) + "\n") + +# --------------------------------------------------------------------------- +# Output path helper +# --------------------------------------------------------------------------- + + +def output_file(name: str | Path, *, make_parents: bool = True) -> Path: + """Return an absolute Path under the shared outputs/ directory. + + If *name* already starts with the string "outputs/", that prefix is removed + to avoid accidentally nesting a second outputs folder (e.g. + `outputs/outputs/foo.png`). Absolute paths are returned unchanged. + """ + + path = Path(name) + + if path.is_absolute(): + return path + + # Strip leading "outputs/" if present + if path.parts and path.parts[0] == "outputs": + path = Path(*path.parts[1:]) + + final = outputs_dir() / path + + if make_parents: + final.parent.mkdir(parents=True, exist_ok=True) + + return final + +__all__ = [ + "ROOT_DIR", + "repo_path", + "outputs_dir", + "load_prompt", + "FileSpanExporter", + "output_file", +] \ No newline at end of file diff --git a/registry.yaml b/registry.yaml index 84d3cd74d1..f13a6b7e85 100644 --- a/registry.yaml +++ b/registry.yaml @@ -4,6 +4,18 @@ # should build pages for, and indicates metadata such as tags, creation date and # authors for each page. +- title: Multi-Agent Portfolio Collaboration with OpenAI Agents SDK + path: examples/agents_sdk/multi-agent-portfolio-collaboration/multi_agent_portfolio_collaboration.ipynb + date: 2025-05-28 + authors: + - rajpathak-openai + - chelseahu-openai + tags: + - agents-sdk + - functions + - responses + - mutli-agent-collaboration +======= - title: o3/o4-mini Function Calling Guide path: examples/o-series/o3o4-mini_prompting_guide.ipynb date: 2025-05-26 @@ -14,6 +26,7 @@ - functions - responses - reasoning +>>>>>>> origin/main - title: Exploring Model Graders for Reinforcement Fine-Tuning path: examples/Reinforcement_Fine_Tuning.ipynb