diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..9d7bc2aaa --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,21 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file + +version: 2 +updates: + - package-ecosystem: "npm" + directory: "/" + schedule: + interval: "daily" + open-pull-requests-limit: 100 + commit-message: + prefix: ci + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "daily" + open-pull-requests-limit: 100 + commit-message: + prefix: ci diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bc916a4ac..f9fde4a62 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -21,7 +21,7 @@ jobs: - name: Setup Node.js uses: actions/setup-node@v4 with: - node-version: '20' + node-version: '22' - name: Install dependencies run: npm install diff --git a/.github/workflows/deploy-github-pages.yml b/.github/workflows/deploy-github-pages.yml index 7b291d0f2..dd4f2185e 100644 --- a/.github/workflows/deploy-github-pages.yml +++ b/.github/workflows/deploy-github-pages.yml @@ -19,7 +19,7 @@ jobs: - name: Set up Node.js uses: actions/setup-node@v4 with: - node-version: '20' + node-version: '22' - name: Install dependencies run: npm install diff --git a/.github/workflows/docs-preview.yml b/.github/workflows/docs-preview.yml index 000c261fe..7c906ae64 100644 --- a/.github/workflows/docs-preview.yml +++ b/.github/workflows/docs-preview.yml @@ -61,7 +61,7 @@ jobs: - name: Set up Node.js uses: actions/setup-node@v4 with: - node-version: '20' + node-version: '22' - name: Install dependencies run: npm install diff --git a/.node-version b/.node-version index 0317576e3..dc0bb0f43 100644 --- a/.node-version +++ b/.node-version @@ -1 +1 @@ -v20.19.5 +v22.12.0 diff --git a/docs/examples/python/structured_output.py b/docs/examples/python/structured_output.py index c90662122..fd8f5379d 100644 --- a/docs/examples/python/structured_output.py +++ b/docs/examples/python/structured_output.py @@ -5,13 +5,12 @@ This example demonstrates how to use structured output with Strands Agents to get type-safe, validated responses using Pydantic models. """ -import asyncio -import tempfile - from typing import List, Optional + from pydantic import BaseModel, Field from strands import Agent + def basic_example(): """Basic example extracting structured information from text.""" print("\n--- Basic Example ---") @@ -22,80 +21,14 @@ class PersonInfo(BaseModel): occupation: str agent = Agent() - result = agent.structured_output( - PersonInfo, - "John Smith is a 30-year-old software engineer" + result = agent( + "John Smith is a 30-year-old software engineer", + structured_output_model=PersonInfo, ) - print(f"Name: {result.name}") # "John Smith" - print(f"Age: {result.age}") # 30 - print(f"Job: {result.occupation}") # "software engineer" - - -def multimodal_example(): - """Basic example extracting structured information from a document.""" - print("\n--- Multi-Modal Example ---") - - class PersonInfo(BaseModel): - name: str - age: int - occupation: str - - with tempfile.NamedTemporaryFile(delete=False) as person_file: - person_file.write(b"John Smith is a 30-year old software engineer") - person_file.flush() - - with open(person_file.name, "rb") as fp: - document_bytes = fp.read() - - agent = Agent() - result = agent.structured_output( - PersonInfo, - [ - {"text": "Please process this application."}, - { - "document": { - "format": "txt", - "name": "application", - "source": { - "bytes": document_bytes, - }, - }, - }, - ] - ) - - print(f"Name: {result.name}") # "John Smith" - print(f"Age: {result.age}") # 30 - print(f"Job: {result.occupation}") # "software engineer" - - -def conversation_history_example(): - """Example using conversation history with structured output.""" - print("\n--- Conversation History Example ---") - - agent = Agent() - - # Build up conversation context - print("Building conversation context...") - agent("What do you know about Paris, France?") - agent("Tell me about the weather there in spring.") - - # Extract structured information without additional prompt - class CityInfo(BaseModel): - city: str - country: str - population: Optional[int] = None - climate: str - - # Uses existing conversation context with a prompt - print("Extracting structured information from conversation context...") - result = agent.structured_output(CityInfo, "Extract structured information about Paris") - - print(f"City: {result.city}") - print(f"Country: {result.country}") - print(f"Population: {result.population}") - print(f"Climate: {result.climate}") + print(f"Name: {result.structured_output.name}") # "John Smith" + print(f"Age: {result.structured_output.age}") # 30 + print(f"Job: {result.structured_output.occupation}") # "software engineer" def complex_nested_model_example(): @@ -121,47 +54,24 @@ class Person(BaseModel): skills: List[str] = Field(default_factory=list, description="Professional skills") agent = Agent() - result = agent.structured_output( - Person, - "Extract info: Jane Doe, a systems admin, 28, lives at 123 Main St, New York, USA. Email: jane@example.com" - ) - - print(f"Name: {result.name}") # "Jane Doe" - print(f"Age: {result.age}") # 28 - print(f"Street: {result.address.street}") # "123 Main St" - print(f"City: {result.address.city}") # "New York" - print(f"Country: {result.address.country}") # "USA" - print(f"Email: {result.contacts[0].email}") # "jane@example.com" - print(f"Skills: {result.skills}") # ["systems admin"] - - -async def async_example(): - """Basic example extracting structured information from text asynchronously.""" - print("\n--- Async Example ---") - - class PersonInfo(BaseModel): - name: str - age: int - occupation: str - - agent = Agent() - result = await agent.structured_output_async( - PersonInfo, - "John Smith is a 30-year-old software engineer" + result = agent( + "Extract info: Jane Doe, a systems admin, 28, lives at 123 Main St, New York, USA. Email: jane@example.com", + structured_output_model=Person, ) - print(f"Name: {result.name}") # "John Smith" - print(f"Age: {result.age}") # 30 - print(f"Job: {result.occupation}") # "software engineer" + print(f"Name: {result.structured_output.name}") # "Jane Doe" + print(f"Age: {result.structured_output.age}") # 28 + print(f"Street: {result.structured_output.address.street}") # "123 Main St" + print(f"City: {result.structured_output.address.city}") # "New York" + print(f"Country: {result.structured_output.address.country}") # "USA" + print(f"Email: {result.structured_output.contacts[0].email}") # "jane@example.com" + print(f"Skills: {result.structured_output.skills}") # ["systems admin"] if __name__ == "__main__": print("Structured Output Examples\n") basic_example() - multimodal_example() - conversation_history_example() complex_nested_model_example() - asyncio.run(async_example()) print("\nExamples completed.") diff --git a/docs/examples/typescript/structured_output/README.md b/docs/examples/typescript/structured_output/README.md new file mode 100644 index 000000000..9008f89a1 --- /dev/null +++ b/docs/examples/typescript/structured_output/README.md @@ -0,0 +1,27 @@ +# Structured Output Example + +Demonstrates how to use structured output with Strands Agents to get type-safe, validated responses using Zod schemas. + +## Prerequisites + +- Node.js 20+ +- AWS credentials configured for Amazon Bedrock + +## Setup + +```bash +npm install +``` + +## Run + +```bash +npm start +``` + +## What It Covers + +- Basic structured output with Zod schemas +- Complex nested schemas + +See the [Structured Output documentation](https://strandsagents.com/docs/examples/structured-output/) for more details. diff --git a/docs/examples/typescript/structured_output/package.json b/docs/examples/typescript/structured_output/package.json new file mode 100644 index 000000000..5ea29374d --- /dev/null +++ b/docs/examples/typescript/structured_output/package.json @@ -0,0 +1,17 @@ +{ + "name": "structured-output-example", + "version": "1.0.0", + "description": "Structured output example using Strands Agents TypeScript SDK with Zod schemas", + "type": "module", + "scripts": { + "start": "npx tsx structured_output.ts" + }, + "dependencies": { + "@strands-agents/sdk": "latest", + "zod": "^4.1.12" + }, + "devDependencies": { + "tsx": "^4.21.0", + "typescript": "^5.3.3" + } +} diff --git a/docs/examples/typescript/structured_output/structured_output.ts b/docs/examples/typescript/structured_output/structured_output.ts new file mode 100644 index 000000000..b41dcef7b --- /dev/null +++ b/docs/examples/typescript/structured_output/structured_output.ts @@ -0,0 +1,76 @@ +/** + * Structured Output Example + * + * This example demonstrates how to use structured output with Strands Agents to + * get type-safe, validated responses using Zod schemas. + */ +import { Agent } from '@strands-agents/sdk' +import { z } from 'zod' + +async function basicExample(): Promise { + console.log('\n--- Basic Example ---') + + const PersonInfo = z.object({ + name: z.string(), + age: z.number(), + occupation: z.string(), + }) + + const agent = new Agent() + const result = await agent.invoke('John Smith is a 30-year-old software engineer', { + structuredOutputSchema: PersonInfo, + }) + + console.log(`Name: ${result.structuredOutput.name}`) // "John Smith" + console.log(`Age: ${result.structuredOutput.age}`) // 30 + console.log(`Job: ${result.structuredOutput.occupation}`) // "software engineer" +} + +async function complexNestedSchemaExample(): Promise { + console.log('\n--- Complex Nested Schema Example ---') + + const Address = z.object({ + street: z.string(), + city: z.string(), + country: z.string(), + postalCode: z.string().optional(), + }) + + const Contact = z.object({ + email: z.string().optional(), + phone: z.string().optional(), + }) + + const Person = z.object({ + name: z.string().describe('Full name of the person'), + age: z.number().describe('Age in years'), + address: Address.describe('Home address'), + contacts: z.array(Contact).describe('Contact methods'), + skills: z.array(z.string()).describe('Professional skills'), + }) + + const agent = new Agent() + const result = await agent.invoke( + 'Extract info: Jane Doe, a systems admin, 28, lives at 123 Main St, New York, USA. Email: jane@example.com', + { structuredOutputSchema: Person }, + ) + + console.log(`Name: ${result.structuredOutput.name}`) // "Jane Doe" + console.log(`Age: ${result.structuredOutput.age}`) // 28 + console.log(`Street: ${result.structuredOutput.address.street}`) // "123 Main St" + console.log(`City: ${result.structuredOutput.address.city}`) // "New York" + console.log(`Country: ${result.structuredOutput.address.country}`) // "USA" + console.log(`Email: ${result.structuredOutput.contacts[0].email}`) // "jane@example.com" + console.log(`Skills: ${result.structuredOutput.skills}`) // ["systems admin"] +} + +async function main(): Promise { + console.log('Structured Output Examples\n') + + await basicExample() + await complexNestedSchemaExample() + + console.log('\nExamples completed.') +} + +main() diff --git a/mkdocs.yml b/mkdocs.yml index e0d697adc..cccca9fc3 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -114,7 +114,7 @@ nav: - Amazon Bedrock: user-guide/concepts/model-providers/amazon-bedrock.md - Amazon Nova: user-guide/concepts/model-providers/amazon-nova.md - Anthropic: user-guide/concepts/model-providers/anthropic.md - - Gemini: user-guide/concepts/model-providers/gemini.md + - Google: user-guide/concepts/model-providers/google.md - LiteLLM: user-guide/concepts/model-providers/litellm.md - llama.cpp: user-guide/concepts/model-providers/llamacpp.md - LlamaAPI: user-guide/concepts/model-providers/llamaapi.md diff --git a/package.json b/package.json index 66e23ad3e..c241d86ef 100644 --- a/package.json +++ b/package.json @@ -48,15 +48,16 @@ "turndown": "^7.2.2", "turndown-plugin-gfm": "^1.0.2", "typedoc": "^0.28.14", - "typescript": "^5.9.3" + "typescript": "^5.9.3", + "vite": "^8.0.0" }, "devDependencies": { - "@astrojs/starlight": "^0.37.3", + "@astrojs/starlight": "^0.38.1", "@types/js-yaml": "^4.0.9", "@types/mdast": "^4.0.4", "@types/node": "^24.10.1", "acorn": "^8.14.0", - "astro": "^5.6.1", + "astro": "^6.0.5", "js-yaml": "^4.1.1", "pino": "^9.7.0", "pino-pretty": "^13.0.0", diff --git a/src/config/navigation.yml b/src/config/navigation.yml index 1a1738214..8f79143ad 100644 --- a/src/config/navigation.yml +++ b/src/config/navigation.yml @@ -66,6 +66,7 @@ sidebar: - docs/user-guide/concepts/tools/mcp-tools - docs/user-guide/concepts/tools/executors - docs/user-guide/concepts/tools/community-tools-package + - docs/user-guide/concepts/tools/vended-tools - label: Plugins items: - docs/user-guide/concepts/plugins @@ -77,7 +78,7 @@ sidebar: - docs/user-guide/concepts/model-providers/amazon-bedrock - docs/user-guide/concepts/model-providers/amazon-nova - docs/user-guide/concepts/model-providers/anthropic - - docs/user-guide/concepts/model-providers/gemini + - docs/user-guide/concepts/model-providers/google - docs/user-guide/concepts/model-providers/litellm - docs/user-guide/concepts/model-providers/llamacpp - docs/user-guide/concepts/model-providers/llamaapi @@ -189,7 +190,7 @@ sidebar: - docs/examples/python/file_operations - docs/examples/python/agents_workflows - docs/examples/python/knowledge_base_agent - - docs/examples/python/structured_output + - docs/examples/structured_output - docs/examples/python/multi_agent_example/multi_agent_example - docs/examples/python/graph_loops_example - docs/examples/python/meta_tooling @@ -217,6 +218,7 @@ sidebar: - docs/community/model-providers/sglang - docs/community/model-providers/vllm - docs/community/model-providers/mlx + - docs/community/model-providers/ovhcloud-ai-endpoints - docs/community/model-providers/xai - label: Session Managers items: diff --git a/src/content/docs/community/get-featured.mdx b/src/content/docs/community/get-featured.mdx index 37c579b68..44161695d 100644 --- a/src/content/docs/community/get-featured.mdx +++ b/src/content/docs/community/get-featured.mdx @@ -18,6 +18,10 @@ We feature **reusable packages** that extend Strands Agents capabilities: We're not looking for example agents or one-off projects — the focus is on packages published to PyPI that others can `pip install` or `npm install` and use in their own agents. See [Community Packages](./community-packages.md) for guidance on creating and publishing your package. +:::tip[Starting from scratch?] +The [extension template](https://github.com/strands-agents/extension-template-python) gives you a ready-made project structure with testing, linting, and PyPI publishing already set up. +::: + ## Quick Steps 1. **Create a PR** to [strands-agents/docs](https://github.com/strands-agents/docs) diff --git a/src/content/docs/community/model-providers/ovhcloud-ai-endpoints.mdx b/src/content/docs/community/model-providers/ovhcloud-ai-endpoints.mdx new file mode 100644 index 000000000..565d107d7 --- /dev/null +++ b/src/content/docs/community/model-providers/ovhcloud-ai-endpoints.mdx @@ -0,0 +1,105 @@ +--- +title: OVHcloud AI Endpoints +community: true +description: OVHcloud AI Endpoints +integrationType: model-provider +languages: Python +--- + +[OVHcloud](https://www.ovhcloud.com) is a leading European cloud provider operating over 450,000 servers within 40 data centers across 4 continents. [OVHcloud AI Endpoints](https://www.ovhcloud.com/en/public-cloud/ai-endpoints/) offers access to various models with sovereignty, data privacy and GDPR compliance. + +OVHcloud AI Endpoints provides OpenAI-compatible API access to a wide range of language models. This allows easy integration with the Strands Agents SDK using the OpenAI compatibility layer. + +## Installation + +The Strands Agents SDK provides access to OVHcloud AI Endpoints models through the OpenAI compatibility layer, configured as an optional dependency: + +```bash +pip install 'strands-agents[openai]' +``` + +## Usage + +After installing the `openai` package, you can import and initialize the OpenAI-compatible provider for OVHcloud AI Endpoints: + +```python +from strands import Agent +from strands.models.openai import OpenAIModel + +model = OpenAIModel( + client_args={ + "api_key": "", # Optional: empty string or omit for free tier with rate limit + "base_url": "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1", + }, + model_id="Meta-Llama-3_3-70B-Instruct", # See catalog for available models + params={ + "max_tokens": 5000, + "temperature": 0.1 + } +) + +agent = Agent(model=model) +agent("What is 2+2?") +``` + +### Using with an API Key + +If you have an API key, you can use it to access higher rate limits and additional features: + +```python +model = OpenAIModel( + client_args={ + "api_key": "", # Your OVHcloud AI Endpoints API key + "base_url": "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1", + }, + model_id="Meta-Llama-3_3-70B-Instruct", + params={ + "max_tokens": 5000, + "temperature": 0.1 + } +) +``` + +## Configuration + +### Client Configuration + +The `client_args` configure the underlying OpenAI-compatible client: + +- `api_key`: Your OVHcloud AI Endpoints API key (optional). + - **Free tier**: Use an empty string `""` or omit the parameter entirely to access the API with rate limits. + - **With API key**: Generate one via [OVHcloud Manager](https://ovh.com/manager) > **Public Cloud** > **AI & Machine Learning** > **AI Endpoints** > **API keys**. +- `base_url`: `https://oai.endpoints.kepler.ai.cloud.ovh.net/v1` + +### Model Configuration + +| Parameter | Description | Example | Options | +| ---------- | ------------------------- | ------------------------------------------ | ------------------------------------------------------------------ | +| `model_id` | Model name | `Meta-Llama-3_3-70B-Instruct` | See [OVHcloud AI Endpoints Catalog](https://www.ovhcloud.com/en/public-cloud/ai-endpoints/catalog/) | +| `params` | Model-specific parameters | `{"max_tokens": 5000, "temperature": 0.7}` | Standard OpenAI-compatible parameters | + +## Troubleshooting + +### `ModuleNotFoundError: No module named 'openai'` + +You must install the `openai` dependency to use this provider: + +```bash +pip install 'strands-agents[openai]' +``` + +### Unexpected model behavior? + +Ensure you're using a model ID from the [OVHcloud AI Endpoints Catalog](https://www.ovhcloud.com/en/public-cloud/ai-endpoints/catalog/), and your `base_url` is set to `https://oai.endpoints.kepler.ai.cloud.ovh.net/v1`. + +### Rate limit errors + +If you encounter rate limit errors, consider: +- Using an API key for higher rate limits +- Reviewing the [OVHcloud AI Endpoints documentation](https://www.ovhcloud.com/en/public-cloud/ai-endpoints/) for rate limit details + +## References + +- [OVHcloud AI Endpoints](https://www.ovhcloud.com/en/public-cloud/ai-endpoints/) +- [OVHcloud AI Endpoints Catalog](https://www.ovhcloud.com/en/public-cloud/ai-endpoints/catalog/) +- [OVHcloud Manager](https://ovh.com/manager) diff --git a/src/content/docs/community/tools/strands-google.mdx b/src/content/docs/community/tools/strands-google.mdx new file mode 100644 index 000000000..82fb03bc3 --- /dev/null +++ b/src/content/docs/community/tools/strands-google.mdx @@ -0,0 +1,76 @@ +--- +project: + pypi: https://pypi.org/project/strands-google/ + github: https://github.com/cagataycali/strands-google + maintainer: cagataycali +service: + name: google + link: https://console.cloud.google.com/ +title: strands-google +community: true +description: Google API integration +integrationType: tool +languages: Python +sidebar: + label: "google" +--- + + +[strands-google](https://github.com/cagataycali/strands-google) is a universal Google API integration tool that provides access to 200+ Google APIs (Gmail, Drive, Calendar, YouTube, Sheets, Docs, and more) directly from your Strands agent. + +## Installation + +```bash +pip install strands-google +``` + +## Usage + +```python +from strands import Agent +from strands_google import use_google, gmail_send, gmail_reply + +agent = Agent(tools=[use_google, gmail_send, gmail_reply]) + +# Send an email +agent("Send an email to friend@example.com saying hello") + +# Search Gmail +agent("Find all unread emails from last week") + +# List Google Drive files +agent("Show me my recent Drive files") + +# List calendar events +agent("What meetings do I have today?") +``` + +## Key Features + +- **Universal Google API Access**: Gmail, Drive, Calendar, YouTube, Sheets, Docs, and 200+ more via the Discovery API +- **Flexible Authentication**: OAuth 2.0, Service Accounts, and API Keys +- **Gmail Helpers**: Easy email sending and replying with automatic encoding +- **Dynamic Scopes**: Configure OAuth scopes on-the-fly +- **Safety Prompts**: Mutative operations require confirmation by default + +## Configuration + +```bash +GOOGLE_OAUTH_CREDENTIALS=~/gmail_token.json # OAuth token path +GOOGLE_APPLICATION_CREDENTIALS=~/service-key.json # Service account path +GOOGLE_API_KEY=your_api_key # API key for public APIs +GOOGLE_API_SCOPES=gmail.readonly,drive.file # Default OAuth scopes +``` + +Run the authentication helper to set up OAuth: + +```bash +python -m strands_google.google_auth +``` + +## Resources + +- [PyPI Package](https://pypi.org/project/strands-google/) +- [GitHub Repository](https://github.com/cagataycali/strands-google) +- [Google Cloud Console](https://console.cloud.google.com/) +- [Google API Client Library](https://github.com/googleapis/google-api-python-client) diff --git a/src/content/docs/community/tools/strands-perplexity.mdx b/src/content/docs/community/tools/strands-perplexity.mdx new file mode 100644 index 000000000..c2d6e45f2 --- /dev/null +++ b/src/content/docs/community/tools/strands-perplexity.mdx @@ -0,0 +1,67 @@ +--- +project: + pypi: https://pypi.org/project/strands-perplexity/ + github: https://github.com/mkmeral/strands-perplexity + maintainer: mkmeral +service: + name: perplexity + link: https://docs.perplexity.ai/ +title: strands-perplexity +community: true +description: Perplexity web search +integrationType: tool +languages: Python +sidebar: + label: "perplexity" +--- + + +[strands-perplexity](https://github.com/mkmeral/strands-perplexity) is a real-time web search tool powered by the [Perplexity Search API](https://docs.perplexity.ai/guides/search-quickstart) with citation support, regional filtering, and multi-query capabilities. + +## Installation + +```bash +pip install strands-perplexity +``` + +## Usage + +```python +from strands import Agent +from strands_perplexity import perplexity_search + +agent = Agent(tools=[perplexity_search]) + +# Search the web +agent("What are the latest developments in AI?") + +# Research a topic +agent("Find recent papers on quantum computing breakthroughs") + +# Get current information +agent("What happened in tech news today?") +``` + +## Key Features + +- **Real-time Web Search**: Access ranked web search results from Perplexity's continuously refreshed index +- **Citations Included**: Every result includes URLs for proper attribution +- **Regional Search**: Filter results by country using ISO country codes +- **Language Filtering**: Filter results by language using ISO 639-1 codes +- **Domain Filtering**: Include or exclude specific domains from results +- **Multi-query Support**: Execute up to 5 related queries in a single request + +## Configuration + +```bash +PERPLEXITY_API_KEY=your_api_key # Required +``` + +Get your API key at: [Perplexity API Settings](https://perplexity.ai/account/api) + +## Resources + +- [PyPI Package](https://pypi.org/project/strands-perplexity/) +- [GitHub Repository](https://github.com/mkmeral/strands-perplexity) +- [Perplexity Search API Docs](https://docs.perplexity.ai/guides/search-quickstart) +- [Perplexity Search Best Practices](https://docs.perplexity.ai/guides/search-best-practices) diff --git a/src/content/docs/contribute/contributing/documentation.mdx b/src/content/docs/contribute/contributing/documentation.mdx index eecbaed6a..e073eed6d 100644 --- a/src/content/docs/contribute/contributing/documentation.mdx +++ b/src/content/docs/contribute/contributing/documentation.mdx @@ -89,6 +89,9 @@ Code examples are critical—they show developers exactly what to do. Always tes - Add comments — explain non-obvious parts - Use realistic names — avoid foo/bar, use descriptive names + + + ```python # Good: Start simple from strands import Agent @@ -100,9 +103,17 @@ from strands import Agent from strands.models import BedrockModel agent = Agent( - model=BedrockModel(model_id="anthropic.claude-3-sonnet"), + model=BedrockModel(model_id="us.anthropic.claude-sonnet-4-20250514"), system_prompt="You are a helpful assistant." ) agent("What's the weather like?") ``` + + + +```typescript +--8<-- "contribute/contributing/documentation.ts:basic_agent" +``` + + diff --git a/src/content/docs/contribute/contributing/documentation.ts b/src/content/docs/contribute/contributing/documentation.ts new file mode 100644 index 000000000..ce2dd47a3 --- /dev/null +++ b/src/content/docs/contribute/contributing/documentation.ts @@ -0,0 +1,14 @@ +// --8<-- [start:basic_agent] +import { Agent, BedrockModel } from '@strands-agents/sdk' + +// Good: Start simple +const agent = new Agent() +await agent.invoke('Hello, world!') + +// Then show configuration +const configuredAgent = new Agent({ + model: new BedrockModel({ modelId: 'us.anthropic.claude-sonnet-4-20250514' }), + systemPrompt: 'You are a helpful assistant.', +}) +await configuredAgent.invoke("What's the weather like?") +// --8<-- [end:basic_agent] diff --git a/src/content/docs/contribute/contributing/extensions.mdx b/src/content/docs/contribute/contributing/extensions.mdx index 8cc99d6c1..03acaab6c 100644 --- a/src/content/docs/contribute/contributing/extensions.mdx +++ b/src/content/docs/contribute/contributing/extensions.mdx @@ -6,7 +6,7 @@ sidebar: You've built a tool that calls your company's internal API. Or a model provider for a regional LLM service. Or a session manager that persists to Redis. It works great for your project—now you want to share it with others. -This guide walks you through packaging and publishing your Strands components so other developers can install them with `pip install`. +This guide walks you through packaging and publishing your Strands components so other developers can install and use them in their own agents. ## Why publish @@ -24,12 +24,27 @@ Strands has several extension points. Each serves a different purpose in the age |-----------|---------|------------| | **Tools** | Add capabilities to agents—call APIs, access databases, interact with services | [Custom tools](../../user-guide/concepts/tools/custom-tools.md) | | **Model providers** | Integrate LLM APIs beyond the built-in providers | [Custom model providers](../../user-guide/concepts/model-providers/custom_model_provider.md) | -| **Hook providers** | Extend or modify agent behavior during lifecycle events such as invocations, tool calls, and model calls | [Hooks](../../user-guide/concepts/agents/hooks.md) | +| **Plugins** | Extend or modify agent behavior during lifecycle events such as invocations, tool calls, and model calls | [Plugins](../../user-guide/concepts/plugins/index.md) | | **Session managers** | Persist conversations to external storage for resumption or sharing | [Session management](../../user-guide/concepts/agents/session-management.md) | | **Conversation managers** | Control how message history grows—trim old messages or summarize context | [Conversation management](../../user-guide/concepts/agents/conversation-management.md) | Tools are the most common extension type. They let agents interact with specific services like Slack, databases, or internal APIs. +## Start from the template + +:::note[Python only] +The extension template is currently Python-only. +::: + +The fastest way to create a publishable extension is the [extension template](https://github.com/strands-agents/extension-template-python). It gives you a ready-made project structure with skeleton implementations, testing setup, and a GitHub Actions workflow for publishing to PyPI. + +1. Click "Use this template" on GitHub to create your repository +2. Run `python setup_template.py` to customize the project — pick a package name, select which components you need (tool, model provider, plugin, session manager, conversation manager), and fill in your author info +3. Install dependencies with `pip install -e ".[dev]"` +4. Implement your component logic in the generated files +5. Run `hatch run prepare` to validate everything (format, lint, typecheck, test) + +The template follows Strands naming conventions automatically — your package becomes `strands-{name}` on PyPI and `strands_{name}` as a Python module. ## Get discovered diff --git a/src/content/docs/contribute/index.mdx b/src/content/docs/contribute/index.mdx index 8af0abae2..a9bc3c355 100644 --- a/src/content/docs/contribute/index.mdx +++ b/src/content/docs/contribute/index.mdx @@ -22,12 +22,14 @@ You can share your tools, model providers, hooks, and session managers with the | I want to... | What it involves | Guide | |--------------|------------------|-------| -| Publish an extension | Package your component and publish to PyPI so others can use it | [Publishing Extensions](./contributing/extensions.md) | +| Publish an extension | Package your component and publish it so others can use it | [Publishing Extensions](./contributing/extensions.md) | ## Community resources - [Community Catalog](../community/community-packages.md) — Discover community-built extensions -- [GitHub Discussions](https://github.com/strands-agents/sdk-python/discussions) — Ask questions, share ideas +- GitHub Discussions — Ask questions, share ideas + - [Python](https://github.com/strands-agents/sdk-python/discussions) + - [TypeScript](https://github.com/strands-agents/sdk-typescript/discussions) - [Roadmap](https://github.com/orgs/strands-agents/projects/8/views/1) — See what we're working on - [Development Tenets](https://github.com/strands-agents/docs/blob/main/team/TENETS.md) — Principles that guide SDK design - [Decision Records](https://github.com/strands-agents/docs/blob/main/team/DECISIONS.md) — Past design decisions with rationale diff --git a/src/content/docs/examples/README.mdx b/src/content/docs/examples/README.mdx index f1fd34bfc..552073a7c 100644 --- a/src/content/docs/examples/README.mdx +++ b/src/content/docs/examples/README.mdx @@ -4,87 +4,49 @@ sidebar: label: "Overview" --- -The examples directory provides a collection of sample implementations to help you get started with building intelligent agents using Strands Agents. This directory contains two main subdirectories: `/examples/python` for Python-based agent examples and `/examples/cdk` for Cloud Development Kit integration examples. - -## Purpose - -These examples demonstrate how to leverage Strands Agents to build intelligent agents for various use cases. From simple file operations to complex multi-agent systems, each example illustrates key concepts, patterns, and best practices in agent development. - -By exploring these reference implementations, you'll gain practical insights into Strands Agents' capabilities and learn how to apply them to your own projects. The examples emphasize real-world applications that you can adapt and extend for your specific needs. - -## Prerequisites - -- Python 3.10 or higher -- Strands Agents SDK -- AWS credentials configured with access to a Bedrock model provider using the Claude 4 model (modifiable as needed) -- For specific examples, additional requirements may be needed (see individual example READMEs) - -For more information, see the [Getting Started](../user-guide/quickstart.md) guide. +A collection of sample implementations to help you get started with Strands Agents. From simple agents to complex multi-agent systems, each example illustrates key concepts and patterns you can adapt for your own projects. ## Getting Started -1. Clone the repository containing these examples -2. Install the required dependencies: - - [strands-agents](https://github.com/strands-agents/sdk-python) - - [strands-agents-tools](https://github.com/strands-agents/tools) -3. Navigate to the examples directory: - ```bash - cd /path/to/examples/ - ``` -4. Browse the available examples in the `/examples/python` and `/examples/cdk` directories -5. Each example includes its own README or documentation file with specific instructions -6. Follow the documentation to run the example and understand its implementation - -## Directory Structure - -### Python Examples - -The `/examples/python` directory contains various Python-based examples demonstrating different agent capabilities. Each example includes detailed documentation explaining its purpose, implementation details, and instructions for running it. - -These examples cover a diverse range of agent capabilities and patterns, showcasing the flexibility and power of Strands Agents. The directory is regularly updated with new examples as additional features and use cases are developed. - -Available Python examples: - -- [Agents Workflows](python/agents_workflows.md) - Example of a sequential agent workflow pattern -- [CLI Reference Agent](python/cli-reference-agent.md) - Example of Command-line reference agent implementation -- [File Operations](python/file_operations.md) - Example of agent with file manipulation capabilities -- [MCP Calculator](python/mcp_calculator.md) - Example of agent with Model Context Protocol capabilities -- [Meta Tooling](python/meta_tooling.md) - Example of agent with Meta tooling capabilities -- [Multi-Agent Example](python/multi_agent_example/multi_agent_example.md) - Example of a multi-agent system -- [Weather Forecaster](python/weather_forecaster.md) - Example of a weather forecasting agent with http_request capabilities - -### CDK Examples - -The `/examples/cdk` directory contains examples for using the AWS Cloud Development Kit (CDK) with agents. The CDK is an open-source software development framework for defining cloud infrastructure as code and provisioning it through AWS CloudFormation. These examples demonstrate how to deploy agent-based applications to AWS using infrastructure as code principles. - -Each CDK example includes its own documentation with instructions for setup and deployment. - -Available CDK examples: - -- [Deploy to EC2](https://github.com/strands-agents/docs/blob/main/docs/examples/cdk/deploy_to_ec2/README.md) - Guide for deploying agents to Amazon EC2 instances -- [Deploy to Fargate](https://github.com/strands-agents/docs/blob/main/docs/examples/cdk/deploy_to_fargate/README.md) - Guide for deploying agents to AWS Fargate -- [Deploy to App Runner](https://github.com/strands-agents/docs/blob/main/docs/examples/cdk/deploy_to_apprunner/README.md) - Guide for deploying agents to AWS App Runner -- [Deploy to Lambda](https://github.com/strands-agents/docs/blob/main/docs/examples/cdk/deploy_to_lambda/README.md) - Guide for deploying agents to AWS Lambda - -### TypeScript Examples - -The `/examples/typescript` directory contains TypeScript-based examples demonstrating agent deployment and integration patterns. These examples showcase how to build and Deploy Typescript agents. - -Available TypeScript examples: - -- [Deploy to Bedrock AgentCore](https://github.com/strands-agents/docs/blob/main/docs/examples/typescript/deploy_to_bedrock_agentcore/README.md) - Complete example for deploying TypeScript agents to Amazon Bedrock AgentCore Runtime. - -### Amazon EKS Example - -The `/examples/deploy_to_eks` directory contains examples for using Amazon EKS with agents. -The [Deploy to Amazon EKS](https://github.com/strands-agents/docs/blob/main/docs/examples/deploy_to_eks/README.md) includes its own documentation with instruction for setup and deployment. - -## Example Structure - -Each example typically follows this structure: - -- Python implementation file(s) (`.py`) -- Documentation file (`.md`) explaining the example's purpose, architecture, and usage -- Any additional resources needed for the example - -To run any specific example, refer to its associated documentation for detailed instructions and requirements. +1. Set up the SDK for your language: + - [Python quickstart](../user-guide/quickstart/python.md) (Python 3.10+, pip) + - [TypeScript quickstart](../user-guide/quickstart/typescript.md) (Node.js 20+, npm) +2. Configure AWS credentials for Amazon Bedrock (covered in both quickstart guides above), or set up an [alternative model provider](../user-guide/concepts/model-providers/index.md) +3. Clone the examples: + ```bash + git clone https://github.com/strands-agents/docs.git + cd docs/docs/examples + ``` +4. Browse the examples below and follow the instructions in each one + +## Agent Examples + +| Example | Description | Python | TypeScript | +|---------|-------------|:------:|:----------:| +| [Structured Output](structured_output.md) | Type-safe, validated responses | ✅ | ✅ | +| [Agents Workflows](python/agents_workflows.md) | Sequential agent workflow pattern | ✅ | | +| [CLI Reference Agent](python/cli-reference-agent.md) | Command-line reference agent | ✅ | | +| [File Operations](python/file_operations.md) | File manipulation capabilities | ✅ | | +| [Graph Loops](python/graph_loops_example.md) | Graph orchestration with loops | ✅ | | +| [Knowledge Base Agent](python/knowledge_base_agent.md) | Knowledge base retrieval | ✅ | | +| [MCP Calculator](python/mcp_calculator.md) | Model Context Protocol capabilities | ✅ | | +| [Memory Agent](python/memory_agent.md) | Persistent memory | ✅ | | +| [Meta Tooling](python/meta_tooling.md) | Meta tooling capabilities | ✅ | | +| [Multi-Agent Example](python/multi_agent_example/multi_agent_example.md) | Multi-agent system | ✅ | | +| [Multimodal](python/multimodal.md) | Multimodal capabilities | ✅ | | +| [Weather Forecaster](python/weather_forecaster.md) | Weather forecasting agent | ✅ | | + +## Deployment Examples + +Also see [Operating Agents in Production](../user-guide/deploy/operating-agents-in-production.md) for best practices on security, monitoring, and scaling. + +| Guide | Description | Python | TypeScript | +|-------|-------------|:------:|:----------:| +| [Bedrock AgentCore](../user-guide/deploy/deploy_to_bedrock_agentcore/index.md) | Serverless agent runtime | ✅ | ✅ | +| [Docker](../user-guide/deploy/deploy_to_docker/index.md) | Containerized deployment | ✅ | ✅ | +| [AWS Lambda](../user-guide/deploy/deploy_to_aws_lambda.md) | Serverless compute | ✅ | | +| [AWS Fargate](../user-guide/deploy/deploy_to_aws_fargate.md) | Serverless containers | ✅ | | +| [AWS App Runner](../user-guide/deploy/deploy_to_aws_apprunner.md) | Managed web applications | ✅ | | +| [Amazon EC2](../user-guide/deploy/deploy_to_amazon_ec2.md) | Virtual machines | ✅ | | +| [Amazon EKS](../user-guide/deploy/deploy_to_amazon_eks.md) | Managed Kubernetes | ✅ | | +| [Kubernetes](../user-guide/deploy/deploy_to_kubernetes.md) | Self-managed Kubernetes | ✅ | | diff --git a/src/content/docs/examples/python/structured_output.mdx b/src/content/docs/examples/python/structured_output.mdx deleted file mode 100644 index bfca65f55..000000000 --- a/src/content/docs/examples/python/structured_output.mdx +++ /dev/null @@ -1,212 +0,0 @@ ---- -title: Structured Output Example -sidebar: - label: "Structured Output" ---- - -This example demonstrates how to use Strands' structured output feature to get type-safe, validated responses from language models using [Pydantic](https://docs.pydantic.dev/latest/concepts/models/) models. Instead of raw text that you need to parse manually, you define the exact structure you want and receive a validated Python object. - -## What You'll Learn - -- How to define Pydantic models for structured output -- Extracting structured information from text -- Using conversation history with structured output -- Working with complex nested models - -## Code Example - -The example covers four key use cases: - -1. Basic structured output -2. Using existing conversation context -3. Working with complex nested models - -```python -#!/usr/bin/env python3 -""" -Structured Output Example - -This example demonstrates how to use structured output with Strands Agents to -get type-safe, validated responses using Pydantic models. -""" -import asyncio -import tempfile -from typing import List, Optional -from pydantic import BaseModel, Field -from strands import Agent - -def basic_example(): - """Basic example extracting structured information from text.""" - print("\n--- Basic Example ---") - - class PersonInfo(BaseModel): - name: str - age: int - occupation: str - - agent = Agent() - result = agent.structured_output( - PersonInfo, - "John Smith is a 30-year-old software engineer" - ) - - print(f"Name: {result.name}") # "John Smith" - print(f"Age: {result.age}") # 30 - print(f"Job: {result.occupation}") # "software engineer" - - -def multimodal_example(): - """Basic example extracting structured information from a document.""" - print("\n--- Multi-Modal Example ---") - - class PersonInfo(BaseModel): - name: str - age: int - occupation: str - - with tempfile.NamedTemporaryFile(delete=False) as person_file: - person_file.write(b"John Smith is a 30-year old software engineer") - person_file.flush() - - with open(person_file.name, "rb") as fp: - document_bytes = fp.read() - - agent = Agent() - result = agent.structured_output( - PersonInfo, - [ - {"text": "Please process this application."}, - { - "document": { - "format": "txt", - "name": "application", - "source": { - "bytes": document_bytes, - }, - }, - }, - ] - ) - - print(f"Name: {result.name}") # "John Smith" - print(f"Age: {result.age}") # 30 - print(f"Job: {result.occupation}") # "software engineer" - - -def conversation_history_example(): - """Example using conversation history with structured output.""" - print("\n--- Conversation History Example ---") - - agent = Agent() - - # Build up conversation context - print("Building conversation context...") - agent("What do you know about Paris, France?") - agent("Tell me about the weather there in spring.") - - # Extract structured information with a prompt - class CityInfo(BaseModel): - city: str - country: str - population: Optional[int] = None - climate: str - - # Uses existing conversation context with a prompt - print("Extracting structured information from conversation context...") - result = agent.structured_output(CityInfo, "Extract structured information about Paris") - - print(f"City: {result.city}") - print(f"Country: {result.country}") - print(f"Population: {result.population}") - print(f"Climate: {result.climate}") - - -def complex_nested_model_example(): - """Example handling complex nested data structures.""" - print("\n--- Complex Nested Model Example ---") - - class Address(BaseModel): - street: str - city: str - country: str - postal_code: Optional[str] = None - - class Contact(BaseModel): - email: Optional[str] = None - phone: Optional[str] = None - - class Person(BaseModel): - """Complete person information.""" - name: str = Field(description="Full name of the person") - age: int = Field(description="Age in years") - address: Address = Field(description="Home address") - contacts: List[Contact] = Field(default_factory=list, description="Contact methods") - skills: List[str] = Field(default_factory=list, description="Professional skills") - - agent = Agent() - result = agent.structured_output( - Person, - "Extract info: Jane Doe, a systems admin, 28, lives at 123 Main St, New York, USA. Email: jane@example.com" - ) - - print(f"Name: {result.name}") # "Jane Doe" - print(f"Age: {result.age}") # 28 - print(f"Street: {result.address.street}") # "123 Main St" - print(f"City: {result.address.city}") # "New York" - print(f"Country: {result.address.country}") # "USA" - print(f"Email: {result.contacts[0].email}") # "jane@example.com" - print(f"Skills: {result.skills}") # ["systems admin"] - - -async def async_example(): - """Basic example extracting structured information from text asynchronously.""" - print("\n--- Async Example ---") - - class PersonInfo(BaseModel): - name: str - age: int - occupation: str - - agent = Agent() - result = await agent.structured_output_async( - PersonInfo, - "John Smith is a 30-year-old software engineer" - ) - - print(f"Name: {result.name}") # "John Smith" - print(f"Age: {result.age}") # 30 - print(f"Job: {result.occupation}") # "software engineer" - - -if __name__ == "__main__": - print("Structured Output Examples\n") - - basic_example() - multimodal_example() - conversation_history_example() - complex_nested_model_example() - asyncio.run(async_example()) - - print("\nExamples completed.") -``` - -## How It Works - -1. **Define a Schema**: Create a Pydantic model that defines the structure you want -2. **Call structured_output()**: Pass your model and optionally a prompt to the agent - - If running async, call `structured_output_async()` instead. -3. **Get Validated Results**: Receive a properly typed Python object matching your schema - -The `structured_output()` method ensures that the language model generates a response that conforms to your specified schema. It handles converting your Pydantic model into a format the model understands and validates the response. - -## Key Benefits - -- Type-safe responses with proper Python types -- Automatic validation against your schema -- IDE type hinting from LLM-generated responses -- Clear documentation of expected output -- Error prevention for malformed responses - -## Learn More - -For more details on structured output, see the [Structured Output documentation](../../user-guide/concepts/agents/structured-output.md). diff --git a/src/content/docs/examples/structured_output.mdx b/src/content/docs/examples/structured_output.mdx new file mode 100644 index 000000000..ba73970db --- /dev/null +++ b/src/content/docs/examples/structured_output.mdx @@ -0,0 +1,114 @@ +--- +title: Structured Output Example +sidebar: + label: "Structured Output" +redirectFrom: + - docs/examples/python/structured_output +--- + +Structured output lets you get type-safe, validated responses from language models. Instead of raw text that you need to parse manually, you define the exact structure you want and receive a validated object. + +Each language uses its own schema library for defining output structures. See the tabs below for language-specific examples. + +## Basic Structured Output + +Define a schema and pass it to the agent. The agent returns a validated object matching your schema. + + + + +```python +from pydantic import BaseModel +from strands import Agent + +class PersonInfo(BaseModel): + name: str + age: int + occupation: str + +agent = Agent() +result = agent( + "John Smith is a 30-year-old software engineer", + structured_output_model=PersonInfo +) + +print(f"Name: {result.structured_output.name}") # "John Smith" +print(f"Age: {result.structured_output.age}") # 30 +print(f"Job: {result.structured_output.occupation}") # "software engineer" +``` + + + +```typescript +import { Agent } from '@strands-agents/sdk' +import { z } from 'zod' + +--8<-- "examples/structured_output.ts:basic_example" +``` + + + +## Complex Nested Schemas + +Schemas can be nested to represent complex data structures: + + + + +```python +from typing import List, Optional +from pydantic import BaseModel, Field +from strands import Agent + +class Address(BaseModel): + street: str + city: str + country: str + postal_code: Optional[str] = None + +class Contact(BaseModel): + email: Optional[str] = None + phone: Optional[str] = None + +class Person(BaseModel): + name: str = Field(description="Full name of the person") + age: int = Field(description="Age in years") + address: Address = Field(description="Home address") + contacts: List[Contact] = Field(default_factory=list, description="Contact methods") + skills: List[str] = Field(default_factory=list, description="Professional skills") + +agent = Agent() +result = agent( + "Extract info: Jane Doe, a systems admin, 28, lives at 123 Main St, New York, USA. Email: jane@example.com", + structured_output_model=Person +) + +print(f"Name: {result.structured_output.name}") +print(f"Age: {result.structured_output.age}") +print(f"Street: {result.structured_output.address.street}") +print(f"City: {result.structured_output.address.city}") +print(f"Email: {result.structured_output.contacts[0].email}") +``` + + + +```typescript +import { Agent } from '@strands-agents/sdk' +import { z } from 'zod' + +--8<-- "examples/structured_output.ts:nested_models" +``` + + + +## How It Works + +1. Define a schema using your language's schema library +2. Pass the schema to the agent when invoking it +3. Access the validated output from the result + +The agent converts your schema into a tool specification that guides the language model to produce correctly formatted responses, then validates the output automatically. + +## Learn More + +For more details, see the [Structured Output documentation](../user-guide/concepts/agents/structured-output.md). diff --git a/src/content/docs/examples/structured_output.ts b/src/content/docs/examples/structured_output.ts new file mode 100644 index 000000000..d29728ed5 --- /dev/null +++ b/src/content/docs/examples/structured_output.ts @@ -0,0 +1,61 @@ +import { Agent } from '@strands-agents/sdk' +import { z } from 'zod' + +// --8<-- [start:basic_example] +const PersonInfo = z.object({ + name: z.string().describe('Name of the person'), + age: z.number().describe('Age of the person'), + occupation: z.string().describe('Occupation of the person'), +}) + +type PersonInfo = z.infer + +const basicAgent = new Agent() +const basicResult = await basicAgent.invoke('John Smith is a 30-year-old software engineer', { + structuredOutputSchema: PersonInfo, +}) + +const person = basicResult.structuredOutput as PersonInfo +console.log(`Name: ${person.name}`) // "John Smith" +console.log(`Age: ${person.age}`) // 30 +console.log(`Job: ${person.occupation}`) // "software engineer" +// --8<-- [end:basic_example] + +async function nestedExample() { + // --8<-- [start:nested_models] + const Address = z.object({ + street: z.string(), + city: z.string(), + country: z.string(), + postalCode: z.string().optional(), + }) + + const Contact = z.object({ + email: z.string().optional(), + phone: z.string().optional(), + }) + + const Person = z.object({ + name: z.string().describe('Full name of the person'), + age: z.number().describe('Age in years'), + address: Address.describe('Home address'), + contacts: z.array(Contact).describe('Contact methods'), + skills: z.array(z.string()).describe('Professional skills'), + }) + + type Person = z.infer + + const agent = new Agent() + const result = await agent.invoke( + 'Extract info: Jane Doe, a systems admin, 28, lives at 123 Main St, New York, USA. Email: jane@example.com', + { structuredOutputSchema: Person }, + ) + + const person = result.structuredOutput as Person + console.log(`Name: ${person.name}`) // "Jane Doe" + console.log(`Age: ${person.age}`) // 28 + console.log(`Street: ${person.address.street}`) // "123 Main St" + console.log(`City: ${person.address.city}`) // "New York" + console.log(`Email: ${person.contacts[0].email}`) // "jane@example.com" + // --8<-- [end:nested_models] +} diff --git a/src/content/docs/readme.ts b/src/content/docs/readme.ts deleted file mode 100644 index e54f5775c..000000000 --- a/src/content/docs/readme.ts +++ /dev/null @@ -1,11 +0,0 @@ -// --8<-- [start:basicAgent] -// Create a basic agent -import { Agent } from '@strands-agents/sdk' - -// Create an agent with default settings -const agent = new Agent(); - -// Ask the agent a question -const response = await agent.invoke("Tell me about agentic AI"); -console.log(response.lastMessage); -// --8<-- [end:basicAgent] diff --git a/src/content/docs/user-guide/build-with-ai.mdx b/src/content/docs/user-guide/build-with-ai.mdx index a88877e9a..3f2a85230 100644 --- a/src/content/docs/user-guide/build-with-ai.mdx +++ b/src/content/docs/user-guide/build-with-ai.mdx @@ -24,6 +24,9 @@ Choose your AI coding tool below and follow the setup instructions. You can use the Strands Agents MCP server as a tool within your own Strands agents: + + + ```python from mcp import stdio_client, StdioServerParameters from strands import Agent @@ -40,6 +43,16 @@ agent = Agent(tools=[mcp_client]) agent("How do I create a custom tool in Strands Agents?") ``` + + + +```typescript +--8<-- "user-guide/build-with-ai.ts:mcp_strands" +``` + + + + See the [MCP tools documentation](/docs/user-guide/concepts/tools/mcp-tools/) for more details on using MCP tools with Strands agents. @@ -73,26 +86,6 @@ claude mcp add strands uvx strands-agents-mcp-server See the [Claude Code MCP documentation](https://docs.anthropic.com/en/docs/claude-code/tutorials#configure-mcp-servers) for more details. - - - -Add the following to `~/.aws/amazonq/mcp.json`: - -```json -{ - "mcpServers": { - "strands-agents": { - "command": "uvx", - "args": ["strands-agents-mcp-server"], - "disabled": false, - "autoApprove": ["search_docs", "fetch_doc"] - } - } -} -``` - -See the [Q Developer CLI MCP documentation](https://docs.aws.amazon.com/amazonq/latest/qdeveloper-ug/command-line-mcp-configuration.html) for more details. - @@ -183,6 +176,6 @@ The llms-full.txt file contains the entire documentation and can be large. For m ## Tips for AI-assisted Strands development - **Use the MCP server over llms.txt when possible** — it retrieves only the relevant sections, saving tokens and improving accuracy. -- **Start from examples** — point your AI tool at the [examples](../examples/README.mdx) for common patterns like [multi-agent systems](../examples/python/multi_agent_example/multi_agent_example.mdx), [structured output](../examples/python/structured_output.mdx), and [tool use](../examples/python/mcp_calculator.mdx). +- **Start from examples** — point your AI tool at the [examples](../examples/README.mdx) for common patterns like [multi-agent systems](../examples/python/multi_agent_example/multi_agent_example.mdx), [structured output](../examples/structured_output.mdx), and [tool use](../examples/python/mcp_calculator.mdx). - **Review AI-generated code** — always verify that generated code follows the patterns in the official documentation, especially for model provider configuration and tool definitions. - **Use project rules** — many AI coding tools support project-level instructions (e.g., `.cursorrules`, `CLAUDE.md`). Add Strands-specific conventions to keep AI output consistent across your project. diff --git a/src/content/docs/user-guide/build-with-ai.ts b/src/content/docs/user-guide/build-with-ai.ts new file mode 100644 index 000000000..c323e0346 --- /dev/null +++ b/src/content/docs/user-guide/build-with-ai.ts @@ -0,0 +1,18 @@ +import { Agent, McpClient } from '@strands-agents/sdk' +import { StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js' + +async function mcpExample() { + // --8<-- [start:mcp_strands] + const mcpClient = new McpClient({ + transport: new StdioClientTransport({ + command: 'uvx', + args: ['strands-agents-mcp-server'], + }), + }) + + const agent = new Agent({ tools: [mcpClient] }) + await agent.invoke('How do I create a custom tool in Strands Agents?') + + await mcpClient.disconnect() + // --8<-- [end:mcp_strands] +} diff --git a/src/content/docs/user-guide/concepts/agents/hooks.mdx b/src/content/docs/user-guide/concepts/agents/hooks.mdx index c8d087a7c..064ab1eca 100644 --- a/src/content/docs/user-guide/concepts/agents/hooks.mdx +++ b/src/content/docs/user-guide/concepts/agents/hooks.mdx @@ -349,6 +349,7 @@ Most event properties are read-only to prevent unintended modifications. However - [`AfterToolCallEvent`](@api/python/strands.hooks.events#AfterToolCallEvent) - `result` - Modify the tool result. See [Result Modification](#result-modification). - `retry` - Request a retry of the tool invocation. See [Tool Call Retry](#tool-call-retry). + - `exception` *(read-only)* - The original exception if the tool raised one, otherwise `None`. See [Exception Handling](#exception-handling). - [`AfterInvocationEvent`](@api/python/strands.hooks.events#AfterInvocationEvent) - `resume` - Trigger a follow-up agent invocation with new input. See [Invocation resume](#invocation-resume). @@ -356,6 +357,12 @@ Most event properties are read-only to prevent unintended modifications. However +- `BeforeToolsEvent` + - `cancel` - Cancel all tool calls in a batch with a message. See [Limit Tool Counts](#limit-tool-counts). + +- `BeforeToolCallEvent` + - `cancel` - Cancel tool execution with a message. See [Limit Tool Counts](#limit-tool-counts). + - `AfterModelCallEvent` - `retry` - Request a retry of the model invocation. @@ -795,8 +802,8 @@ class LimitToolCounts(HookProvider): -```ts -// This feature is not yet available in TypeScript SDK +```typescript +--8<-- "user-guide/concepts/agents/hooks.ts:limit_tool_counts_class" ``` @@ -819,8 +826,8 @@ agent("Sleep once") -```ts -// This feature is not yet available in TypeScript SDK +```typescript +--8<-- "user-guide/concepts/agents/hooks.ts:limit_tool_counts_usage" ``` @@ -897,6 +904,49 @@ result = agent("What is the capital of France?") +### Exception Handling + +When a tool raises an exception, the agent converts it to an error result and returns it to the model, allowing the model to adjust its approach and retry. This works well for expected errors like validation failures, but for unexpected errors—assertion failures, configuration errors, or bugs—you may want to fail immediately rather than let the model retry futilely. The `exception` property on `AfterToolCallEvent` provides access to the original exception, enabling hooks to inspect error types and selectively propagate those that shouldn't be retried: + + + + +```python +class PropagateUnexpectedExceptions(HookProvider): + """Re-raise unexpected exceptions instead of returning them to the model.""" + + def __init__(self, allowed_exceptions: tuple[type[Exception], ...] = (ValueError,)): + self.allowed_exceptions = allowed_exceptions + + def register_hooks(self, registry: HookRegistry) -> None: + registry.add_callback(AfterToolCallEvent, self._check_exception) + + def _check_exception(self, event: AfterToolCallEvent) -> None: + if event.exception is None: + return # Tool succeeded + if isinstance(event.exception, self.allowed_exceptions): + return # Let model retry these + raise event.exception # Propagate unexpected errors +``` + +```python +# Usage +agent = Agent( + model=model, + tools=[my_tool], + hooks=[PropagateUnexpectedExceptions(allowed_exceptions=(ValueError, ValidationError))], +) +``` + + + + +```ts +// This feature is not yet available in TypeScript SDK +``` + + + ### Tool Call Retry Useful for implementing custom retry logic for tool invocations. The `AfterToolCallEvent.retry` field allows hooks to request that a tool be re-executed—for example, to handle transient errors, timeouts, or flaky external services. When `retry` is set to `True`, the tool executor discards the current result and invokes the tool again with the same `tool_use_id`. diff --git a/src/content/docs/user-guide/concepts/agents/hooks.ts b/src/content/docs/user-guide/concepts/agents/hooks.ts index 221290a44..f2ff52901 100644 --- a/src/content/docs/user-guide/concepts/agents/hooks.ts +++ b/src/content/docs/user-guide/concepts/agents/hooks.ts @@ -82,7 +82,7 @@ async function toolInterceptionExample() { name = 'tool-interceptor' initAgent(agent: LocalAgent): void { - agent.addHook(BeforeToolCallEvent, (ev) => this.interceptTool(ev)) + agent.addHook(BeforeToolCallEvent, (event) => this.interceptTool(event)) } private interceptTool(event: BeforeToolCallEvent): void { @@ -224,6 +224,60 @@ async function fixedToolArgumentsExample() { // --8<-- [end:fixed_tool_arguments_usage] } +async function limitToolCountsExample() { + // --8<-- [start:limit_tool_counts_class] + class LimitToolCounts implements Plugin { + private maxToolCounts: Record + private toolCounts: Record = {} + + /** + * Initialize with maximum allowed invocations per tool. + * + * @param maxToolCounts - A dictionary mapping tool names to their maximum + * allowed invocation counts per agent invocation. + */ + constructor(maxToolCounts: Record) { + this.maxToolCounts = maxToolCounts + } + + name = 'limit-tool-counts' + + initAgent(agent: LocalAgent): void { + agent.addHook(BeforeInvocationEvent, () => this.resetCounts()) + agent.addHook(BeforeToolCallEvent, (event) => this.interceptTool(event)) + } + + private resetCounts(): void { + this.toolCounts = {} + } + + private interceptTool(event: BeforeToolCallEvent): void { + const toolName = event.toolUse.name + const maxToolCount = this.maxToolCounts[toolName] + const toolCount = (this.toolCounts[toolName] ?? 0) + 1 + this.toolCounts[toolName] = toolCount + + if (maxToolCount !== undefined && toolCount > maxToolCount) { + event.cancel = + `Tool '${toolName}' has been invoked too many times and is now being throttled. ` + + `DO NOT CALL THIS TOOL ANYMORE` + } + } + } + // --8<-- [end:limit_tool_counts_class] + + // --8<-- [start:limit_tool_counts_usage] + const limitPlugin = new LimitToolCounts({ sleep: 3 }) + + const agent = new Agent({ tools: [sleep], plugins: [limitPlugin] }) + + // This call will only have 3 successful sleeps + await agent.invoke('Sleep 5 times for 10ms each or until you can\'t anymore') + // This will sleep successfully again because the count resets every invocation + await agent.invoke('Sleep once') + // --8<-- [end:limit_tool_counts_usage] +} + // ===================== // Multi-Agent Hook Examples // ===================== @@ -342,6 +396,7 @@ async function layeredHooksExample() { } // Suppress unused function warnings +void limitToolCountsExample void orchestratorCallbackExample void conditionalNodeExecutionExample void orchestratorAgnosticDesignExample diff --git a/src/content/docs/user-guide/concepts/agents/structured-output.mdx b/src/content/docs/user-guide/concepts/agents/structured-output.mdx index 32c4bb1c7..31d0fa7d6 100644 --- a/src/content/docs/user-guide/concepts/agents/structured-output.mdx +++ b/src/content/docs/user-guide/concepts/agents/structured-output.mdx @@ -101,7 +101,7 @@ In Python, Strands accepts the `structured_output_model` parameter in agent invo ### Error Handling -When structured output validation fails, Strands throws a custom `StructuredOutputException` that can be caught and handled appropriately: +When structured output validation fails, Strands throws a custom exception that can be caught and handled appropriately: diff --git a/src/content/docs/user-guide/concepts/agents/structured-output.ts b/src/content/docs/user-guide/concepts/agents/structured-output.ts index 8874b1054..6be907f89 100644 --- a/src/content/docs/user-guide/concepts/agents/structured-output.ts +++ b/src/content/docs/user-guide/concepts/agents/structured-output.ts @@ -1,4 +1,4 @@ -import { Agent, StructuredOutputException, tool } from '@strands-agents/sdk' +import { Agent, StructuredOutputError, tool } from '@strands-agents/sdk' import { z } from 'zod' // --8<-- [start:basic_usage] @@ -39,7 +39,7 @@ async function errorHandling() { try { const result = await agent.invoke('some prompt') } catch (error) { - if (error instanceof StructuredOutputException) { + if (error instanceof StructuredOutputError) { console.log(`Structured output failed: ${error.message}`) } } diff --git a/src/content/docs/user-guide/concepts/model-providers/amazon-bedrock.mdx b/src/content/docs/user-guide/concepts/model-providers/amazon-bedrock.mdx index f6863c843..52d28bdd0 100644 --- a/src/content/docs/user-guide/concepts/model-providers/amazon-bedrock.mdx +++ b/src/content/docs/user-guide/concepts/model-providers/amazon-bedrock.mdx @@ -511,6 +511,10 @@ When a guardrail is triggered: - Output redaction (disabled by default): If a guardrail policy is triggered, the output is redacted - Custom redaction messages can be specified for both input and output redactions +:::note[Latest Message Evaluation] +When `guardLatestUserMessage: true`, only the most recent user message is sent to guardrails for evaluation instead of the entire conversation. This can improve performance and reduce costs in multi-turn conversations where earlier messages have already been validated. +::: + diff --git a/src/content/docs/user-guide/concepts/model-providers/amazon-bedrock.ts b/src/content/docs/user-guide/concepts/model-providers/amazon-bedrock.ts index b10257894..a0a4e0241 100644 --- a/src/content/docs/user-guide/concepts/model-providers/amazon-bedrock.ts +++ b/src/content/docs/user-guide/concepts/model-providers/amazon-bedrock.ts @@ -409,6 +409,7 @@ async function guardrailsExample() { output: false, // Default: false outputMessage: '[Assistant output redacted.]', // Custom redaction message }, + guardLatestUserMessage: true, // Only evaluate the latest user message (default: false) }, }) diff --git a/src/content/docs/user-guide/concepts/model-providers/amazon-bedrock_imports.ts b/src/content/docs/user-guide/concepts/model-providers/amazon-bedrock_imports.ts index d1553399d..fa6916e7c 100644 --- a/src/content/docs/user-guide/concepts/model-providers/amazon-bedrock_imports.ts +++ b/src/content/docs/user-guide/concepts/model-providers/amazon-bedrock_imports.ts @@ -10,5 +10,5 @@ import { z } from 'zod' // --8<-- [end:tool_update_config_imports] // --8<-- [start:custom_credentials_imports] -import { BedrockModel } from '@strands-agents/sdk/bedrock' +import { BedrockModel } from '@strands-agents/sdk/models/bedrock' // --8<-- [end:custom_credentials_imports] diff --git a/src/content/docs/user-guide/concepts/model-providers/gemini.mdx b/src/content/docs/user-guide/concepts/model-providers/google.mdx similarity index 96% rename from src/content/docs/user-guide/concepts/model-providers/gemini.mdx rename to src/content/docs/user-guide/concepts/model-providers/google.mdx index be188c56b..bb024a3d7 100644 --- a/src/content/docs/user-guide/concepts/model-providers/gemini.mdx +++ b/src/content/docs/user-guide/concepts/model-providers/google.mdx @@ -1,9 +1,9 @@ --- -title: Gemini +title: Google integrationType: model-provider --- -[Google Gemini](https://ai.google.dev/api) is Google's family of multimodal large language models designed for advanced reasoning, code generation, and creative tasks. The Strands Agents SDK implements a Gemini provider, allowing you to run agents against the Gemini models available through Google's AI API. +[Google Gemini](https://ai.google.dev/api) is Google's family of multimodal large language models designed for advanced reasoning, code generation, and creative tasks. The Strands Agents SDK implements a Google/Gemini provider, allowing you to run agents against the Gemini models available through Google's AI API. ## Installation @@ -62,9 +62,9 @@ print(response) ```typescript import { Agent } from '@strands-agents/sdk' -import { GeminiModel } from '@strands-agents/sdk/gemini' +import { GoogleModel } from '@strands-agents/sdk/models/google' -const model = new GeminiModel({ +const model = new GoogleModel({ apiKey: '', modelId: 'gemini-2.5-flash', params: { @@ -333,11 +333,11 @@ print(response) ```typescript import { GoogleGenAI } from '@google/genai' import { Agent } from '@strands-agents/sdk' -import { GeminiModel } from '@strands-agents/sdk/gemini' +import { GoogleModel } from '@strands-agents/sdk/models/google' const client = new GoogleGenAI({ apiKey: '' }) -const model = new GeminiModel({ +const model = new GoogleModel({ client, modelId: 'gemini-2.5-flash', params: { @@ -396,9 +396,9 @@ response = agent([ ```typescript import { Agent, ImageBlock, TextBlock } from '@strands-agents/sdk' -import { GeminiModel } from '@strands-agents/sdk/gemini' +import { GoogleModel } from '@strands-agents/sdk/models/google' -const model = new GeminiModel({ +const model = new GoogleModel({ apiKey: '', modelId: 'gemini-2.5-flash', }) diff --git a/src/content/docs/user-guide/concepts/model-providers/index.mdx b/src/content/docs/user-guide/concepts/model-providers/index.mdx index 671945f95..c3b078fdd 100644 --- a/src/content/docs/user-guide/concepts/model-providers/index.mdx +++ b/src/content/docs/user-guide/concepts/model-providers/index.mdx @@ -123,6 +123,6 @@ response = agent("What can you help me with?") - **[Amazon Bedrock](amazon-bedrock.md)** - Default provider with wide model selection, enterprise features, and full Python/TypeScript support - **[OpenAI](openai.md)** - GPT models with streaming support -- **[Gemini](gemini.md)** - Google's Gemini models with tool calling support +- **[Google](google.md)** - Google's Gemini models with tool calling support - **[Custom Providers](custom_model_provider.md)** - Build your own model integration - **[Anthropic](anthropic.md)** - Direct Claude API access (Python only) diff --git a/src/content/docs/user-guide/concepts/model-providers/index.ts b/src/content/docs/user-guide/concepts/model-providers/index.ts index 86de7a2c4..d58cbc979 100644 --- a/src/content/docs/user-guide/concepts/model-providers/index.ts +++ b/src/content/docs/user-guide/concepts/model-providers/index.ts @@ -20,8 +20,9 @@ async function basicUsage() { // Alternatively, use OpenAI by just switching model provider const openaiModel = new OpenAIModel({ + api: 'chat', apiKey: process.env.OPENAI_API_KEY, - modelId: 'gpt-4o', + modelId: 'gpt-5.4', }) agent = new Agent({ model: openaiModel }) response = await agent.invoke('What can you help me with?') diff --git a/src/content/docs/user-guide/concepts/model-providers/index_imports.ts b/src/content/docs/user-guide/concepts/model-providers/index_imports.ts index 37178a69a..7d3dbf2fb 100644 --- a/src/content/docs/user-guide/concepts/model-providers/index_imports.ts +++ b/src/content/docs/user-guide/concepts/model-providers/index_imports.ts @@ -2,6 +2,6 @@ // --8<-- [start:basic_usage_imports] import { Agent } from '@strands-agents/sdk' -import { BedrockModel } from '@strands-agents/sdk/bedrock' -import { OpenAIModel } from '@strands-agents/sdk/openai' +import { BedrockModel } from '@strands-agents/sdk/models/bedrock' +import { OpenAIModel } from '@strands-agents/sdk/models/openai' // --8<-- [end:basic_usage_imports] diff --git a/src/content/docs/user-guide/concepts/model-providers/openai.mdx b/src/content/docs/user-guide/concepts/model-providers/openai.mdx index bffb62df6..443a8bde2 100644 --- a/src/content/docs/user-guide/concepts/model-providers/openai.mdx +++ b/src/content/docs/user-guide/concepts/model-providers/openai.mdx @@ -117,7 +117,7 @@ The model configuration sets parameters for inference: | Parameter | Description | Example | Options | |------------|-------------|---------|---------| -| `modelId` | ID of a model to use | `gpt-4o` | [reference](https://platform.openai.com/docs/models) +| `modelId` | ID of a model to use | `gpt-5.4` | [reference](https://platform.openai.com/docs/models) | `maxTokens` | Maximum tokens to generate | `1000` | [reference](https://platform.openai.com/docs/api-reference/chat/create) | `temperature` | Controls randomness (0-2) | `0.7` | [reference](https://platform.openai.com/docs/api-reference/chat/create) | `topP` | Nucleus sampling (0-1) | `0.9` | [reference](https://platform.openai.com/docs/api-reference/chat/create) diff --git a/src/content/docs/user-guide/concepts/model-providers/openai.ts b/src/content/docs/user-guide/concepts/model-providers/openai.ts index 1746ef52f..5bef1604c 100644 --- a/src/content/docs/user-guide/concepts/model-providers/openai.ts +++ b/src/content/docs/user-guide/concepts/model-providers/openai.ts @@ -6,14 +6,15 @@ // Imports are in openai_imports.ts import { Agent } from '@strands-agents/sdk' -import { OpenAIModel } from '@strands-agents/sdk/openai' +import { OpenAIModel } from '@strands-agents/sdk/models/openai' // Basic usage async function basicUsage() { // --8<-- [start:basic_usage] const model = new OpenAIModel({ + api: 'chat', apiKey: process.env.OPENAI_API_KEY || '', - modelId: 'gpt-4o', + modelId: 'gpt-5.4', maxTokens: 1000, temperature: 0.7, }) @@ -28,11 +29,12 @@ async function basicUsage() { async function customServer() { // --8<-- [start:custom_server] const model = new OpenAIModel({ + api: 'chat', apiKey: '', clientConfig: { baseURL: '', }, - modelId: 'gpt-4o', + modelId: 'gpt-5.4', }) const agent = new Agent({ model }) @@ -44,8 +46,9 @@ async function customServer() { async function customConfig() { // --8<-- [start:custom_config] const model = new OpenAIModel({ + api: 'chat', apiKey: process.env.OPENAI_API_KEY || '', - modelId: 'gpt-4o', + modelId: 'gpt-5.4', maxTokens: 1000, temperature: 0.7, topP: 0.9, @@ -63,8 +66,9 @@ async function customConfig() { async function updateConfig() { // --8<-- [start:update_config] const model = new OpenAIModel({ + api: 'chat', apiKey: process.env.OPENAI_API_KEY || '', - modelId: 'gpt-4o', + modelId: 'gpt-5.4', temperature: 0.7, }) diff --git a/src/content/docs/user-guide/concepts/model-providers/openai_imports.ts b/src/content/docs/user-guide/concepts/model-providers/openai_imports.ts index 5ae042847..a49e6e839 100644 --- a/src/content/docs/user-guide/concepts/model-providers/openai_imports.ts +++ b/src/content/docs/user-guide/concepts/model-providers/openai_imports.ts @@ -2,5 +2,5 @@ // --8<-- [start:basic_usage_imports] import { Agent } from '@strands-agents/sdk' -import { OpenAIModel } from '@strands-agents/sdk/openai' +import { OpenAIModel } from '@strands-agents/sdk/models/openai' // --8<-- [end:basic_usage_imports] diff --git a/src/content/docs/user-guide/concepts/multi-agent/agent-to-agent.mdx b/src/content/docs/user-guide/concepts/multi-agent/agent-to-agent.mdx index 22a10f17b..f986d8401 100644 --- a/src/content/docs/user-guide/concepts/multi-agent/agent-to-agent.mdx +++ b/src/content/docs/user-guide/concepts/multi-agent/agent-to-agent.mdx @@ -309,7 +309,7 @@ a2a_server.serve() ```typescript -import { A2AExpressServer } from '@strands-agents/sdk/a2a' +import { A2AExpressServer } from '@strands-agents/sdk/a2a/express' --8<-- "user-guide/concepts/multi-agent/agent-to-agent.ts:basic_server" ``` @@ -474,7 +474,7 @@ The TypeScript `A2AExpressServer` supports a custom `taskStore` for persisting t ```typescript import { Agent } from '@strands-agents/sdk' -import { A2AExpressServer } from '@strands-agents/sdk/a2a' +import { A2AExpressServer } from '@strands-agents/sdk/a2a/express' const agent = new Agent({ systemPrompt: 'You are a helpful agent.' }) @@ -528,7 +528,7 @@ Use the `httpUrl` option to set the public URL for the agent card. For custom pa ```typescript import { Agent } from '@strands-agents/sdk' -import { A2AExpressServer } from '@strands-agents/sdk/a2a' +import { A2AExpressServer } from '@strands-agents/sdk/a2a/express' const agent = new Agent({ systemPrompt: 'A calculator agent.' }) diff --git a/src/content/docs/user-guide/concepts/multi-agent/agent-to-agent.ts b/src/content/docs/user-guide/concepts/multi-agent/agent-to-agent.ts index 50f82e239..689aa64e3 100644 --- a/src/content/docs/user-guide/concepts/multi-agent/agent-to-agent.ts +++ b/src/content/docs/user-guide/concepts/multi-agent/agent-to-agent.ts @@ -2,7 +2,8 @@ // NOTE: Type-checking is disabled because the examples reference remote services not available at build time. import { Agent, tool } from '@strands-agents/sdk' -import { A2AAgent, A2AExpressServer } from '@strands-agents/sdk/a2a' +import { A2AAgent } from '@strands-agents/sdk/a2a' +import { A2AExpressServer } from '@strands-agents/sdk/a2a/express' import { z } from 'zod' async function basicUsageExample() { diff --git a/src/content/docs/user-guide/concepts/multi-agent/agents-as-tools.mdx b/src/content/docs/user-guide/concepts/multi-agent/agents-as-tools.mdx index 74a3637c9..095921081 100644 --- a/src/content/docs/user-guide/concepts/multi-agent/agents-as-tools.mdx +++ b/src/content/docs/user-guide/concepts/multi-agent/agents-as-tools.mdx @@ -33,7 +33,7 @@ When implementing the "Agents as Tools" pattern with Strands Agents SDK: ## Implementing Agents as Tools with Strands Agents SDK -Strands Agents SDK provides a powerful framework for implementing the "Agents as Tools" pattern. Specialized agents are wrapped as callable tool functions that can be used by an orchestrator agent. +Strands Agents SDK provides two ways to implement the "Agents as Tools" pattern: the built-in `.as_tool()` method for quick setup, and the `@tool` decorator for full control over how the agent is invoked. ```mermaid flowchart TD @@ -41,15 +41,97 @@ flowchart TD Orchestrator --> RA["Research Assistant"] Orchestrator --> PA["Product Recommendation Assistant"] Orchestrator --> TA["Trip Planning Assistant"] - + RA --> Orchestrator PA --> Orchestrator TA --> Orchestrator ``` -### Creating Specialized Tool Agents +### Using `.as_tool()` -First, define specialized agents as tool functions: +The simplest way to use an agent as a tool is with the built-in `.as_tool()` method. This wraps the agent so it accepts an `input` string parameter and returns the agent's text response. + + + + +```python +from strands import Agent +from strands_tools import retrieve, http_request + +# Create specialized agents +research_agent = Agent( + system_prompt="""You are a specialized research assistant. Focus only on providing + factual, well-sourced information in response to research questions. + Always cite your sources when possible.""", + tools=[retrieve, http_request], +) + +product_agent = Agent( + system_prompt="""You are a specialized product recommendation assistant. + Provide personalized product suggestions based on user preferences.""", + tools=[retrieve, http_request], +) + +travel_agent = Agent( + system_prompt="""You are a specialized travel planning assistant. + Create detailed travel itineraries based on user preferences.""", + tools=[retrieve, http_request], +) + +# Create the orchestrator with agents as tools +orchestrator = Agent( + system_prompt="""You are an assistant that routes queries to specialized agents: + - For research questions and factual information → Use the research_agent tool + - For product recommendations and shopping advice → Use the product_agent tool + - For travel planning and itineraries → Use the travel_agent tool + - For simple questions not requiring specialized knowledge → Answer directly + + Always select the most appropriate tool based on the user's query.""", + tools=[ + research_agent.as_tool(), + product_agent.as_tool(), + travel_agent.as_tool(), + ], +) +``` + +You can customize the tool name and description: + +```python +orchestrator = Agent( + system_prompt="You are an assistant that routes queries to specialized agents.", + tools=[ + research_agent.as_tool( + name="research_assistant", + description="Process and respond to research-related queries requiring factual information.", + ), + ], +) +``` + +#### Context Management + +By default, `.as_tool()` resets the agent's conversation context between invocations, ensuring every call starts from a clean baseline. To preserve the agent's conversation history across invocations, pass `preserve_context=True`: + +```python +# Agent will remember prior interactions within the same orchestrator session +orchestrator = Agent( + system_prompt="You are an assistant that routes queries to specialized agents.", + tools=[research_agent.as_tool(preserve_context=True)], +) +``` + + + +```typescript +// .as_tool() not yet supported in TypeScript. +``` + + + +### Using the `@tool` Decorator + +For more control over how the agent is invoked — such as custom pre/post-processing, error handling, or passing multiple parameters — you can use the `@tool` decorator to wrap an agent manually: @@ -58,7 +140,6 @@ First, define specialized agents as tool functions: from strands import Agent, tool from strands_tools import retrieve, http_request -# Define a specialized system prompt RESEARCH_ASSISTANT_PROMPT = """ You are a specialized research assistant. Focus only on providing factual, well-sourced information in response to research questions. @@ -69,21 +150,19 @@ Always cite your sources when possible. def research_assistant(query: str) -> str: """ Process and respond to research-related queries. - + Args: query: A research question requiring factual information - + Returns: A detailed research answer with citations """ try: - # Strands Agents SDK makes it easy to create a specialized agent research_agent = Agent( system_prompt=RESEARCH_ASSISTANT_PROMPT, - tools=[retrieve, http_request] # Research-specific tools + tools=[retrieve, http_request] ) - - # Call the agent and return its response + response = research_agent(query) return str(response) except Exception as e: @@ -108,10 +187,10 @@ You can create multiple specialized agents following the same pattern: def product_recommendation_assistant(query: str) -> str: """ Handle product recommendation queries by suggesting appropriate products. - + Args: query: A product inquiry with user preferences - + Returns: Personalized product recommendations with reasoning """ @@ -119,7 +198,7 @@ def product_recommendation_assistant(query: str) -> str: product_agent = Agent( system_prompt="""You are a specialized product recommendation assistant. Provide personalized product suggestions based on user preferences.""", - tools=[retrieve, http_request, dialog], # Tools for getting product data + tools=[retrieve, http_request, dialog], ) # Implementation with response handling # ... @@ -131,10 +210,10 @@ def product_recommendation_assistant(query: str) -> str: def trip_planning_assistant(query: str) -> str: """ Create travel itineraries and provide travel advice. - + Args: query: A travel planning request with destination and preferences - + Returns: A detailed travel itinerary or travel advice """ @@ -142,7 +221,7 @@ def trip_planning_assistant(query: str) -> str: travel_agent = Agent( system_prompt="""You are a specialized travel planning assistant. Create detailed travel itineraries based on user preferences.""", - tools=[retrieve, http_request], # Travel information tools + tools=[retrieve, http_request], ) # Implementation with response handling # ... @@ -159,9 +238,9 @@ def trip_planning_assistant(query: str) -> str: -### Creating the Orchestrator Agent +#### Creating the Orchestrator Agent -Next, create an orchestrator agent that has access to all specialized agents as tools: +Create an orchestrator agent that has access to all specialized agents as tools: @@ -170,7 +249,6 @@ Next, create an orchestrator agent that has access to all specialized agents as from strands import Agent from .specialized_agents import research_assistant, product_recommendation_assistant, trip_planning_assistant -# Define the orchestrator system prompt with clear tool selection guidance MAIN_SYSTEM_PROMPT = """ You are an assistant that routes queries to specialized agents: - For research questions and factual information → Use the research_assistant tool @@ -181,7 +259,6 @@ You are an assistant that routes queries to specialized agents: Always select the most appropriate tool based on the user's query. """ -# Strands Agents SDK allows easy integration of agent tools orchestrator = Agent( system_prompt=MAIN_SYSTEM_PROMPT, callback_handler=None, diff --git a/src/content/docs/user-guide/concepts/tools/index.mdx b/src/content/docs/user-guide/concepts/tools/index.mdx index 3c3b70df8..9f1c52989 100644 --- a/src/content/docs/user-guide/concepts/tools/index.mdx +++ b/src/content/docs/user-guide/concepts/tools/index.mdx @@ -536,3 +536,56 @@ def search_database(query: str, max_results: int = 10) -> list: ``` + +### 4. Agents as Tools + +Any agent can be converted into a tool using `.as_tool()` and passed to another agent's tools array. This enables hierarchical multi-agent systems where an orchestrator delegates to specialized agents. For more details, see [Agents as Tools](../multi-agent/agents-as-tools.md). + + + + +```python +from strands import Agent + +research_agent = Agent( + system_prompt="You are a specialized research assistant.", +) + +orchestrator = Agent( + system_prompt="You are an assistant that routes queries to specialized agents.", + tools=[research_agent.as_tool()], +) + +orchestrator("What are the latest advances in quantum computing?") +``` + +You can also specify a custom name and description for the tool: + +```python +orchestrator = Agent( + system_prompt="You are an assistant that routes queries to specialized agents.", + tools=[ + research_agent.as_tool( + name="research_assistant", + description="Process and respond to research-related queries requiring factual information.", + ), + ], +) +``` + +By default, the tool agent resets its conversation context between invocations, ensuring every call starts from a clean baseline. To preserve the agent's conversation history across invocations, pass `preserve_context=True` when creating the tool: + +```python +orchestrator = Agent( + system_prompt="You are an assistant that routes queries to specialized agents.", + tools=[research_agent.as_tool(preserve_context=True)], +) +``` + + + +```typescript +// .as_tool() not yet supported in TypeScript. +``` + + diff --git a/src/content/docs/user-guide/concepts/tools/vended-tools-imports.ts b/src/content/docs/user-guide/concepts/tools/vended-tools-imports.ts new file mode 100644 index 000000000..a1b4e06d9 --- /dev/null +++ b/src/content/docs/user-guide/concepts/tools/vended-tools-imports.ts @@ -0,0 +1,37 @@ +// @ts-nocheck +// This file contains import snippets used in documentation examples. +// Each snippet is a standalone import block for a specific tool. +// @ts-nocheck is used because imports are intentionally repeated across snippets +// for documentation clarity — each snippet shows the complete imports needed. + +// --8<-- [start:bash_import] +import { Agent } from '@strands-agents/sdk' +import { bash } from '@strands-agents/sdk/vended-tools/bash' +// --8<-- [end:bash_import] + +// --8<-- [start:file_editor_import] +import { Agent } from '@strands-agents/sdk' +import { fileEditor } from '@strands-agents/sdk/vended-tools/file-editor' +// --8<-- [end:file_editor_import] + +// --8<-- [start:http_request_import] +import { Agent } from '@strands-agents/sdk' +import { httpRequest } from '@strands-agents/sdk/vended-tools/http-request' +// --8<-- [end:http_request_import] + +// --8<-- [start:notebook_import] +import { Agent } from '@strands-agents/sdk' +import { notebook } from '@strands-agents/sdk/vended-tools/notebook' +// --8<-- [end:notebook_import] + +// --8<-- [start:notebook_persistence_import] +import { Agent, SessionManager, FileStorage } from '@strands-agents/sdk' +import { notebook } from '@strands-agents/sdk/vended-tools/notebook' +// --8<-- [end:notebook_persistence_import] + +// --8<-- [start:combined_import] +import { Agent } from '@strands-agents/sdk' +import { bash } from '@strands-agents/sdk/vended-tools/bash' +import { fileEditor } from '@strands-agents/sdk/vended-tools/file-editor' +import { notebook } from '@strands-agents/sdk/vended-tools/notebook' +// --8<-- [end:combined_import] diff --git a/src/content/docs/user-guide/concepts/tools/vended-tools.mdx b/src/content/docs/user-guide/concepts/tools/vended-tools.mdx new file mode 100644 index 000000000..293fd4ba2 --- /dev/null +++ b/src/content/docs/user-guide/concepts/tools/vended-tools.mdx @@ -0,0 +1,144 @@ +--- +title: Vended Tools +description: "Pre-built tools included in the TypeScript SDK for common agent tasks like file operations, shell commands, HTTP requests, and persistent notes." +sidebar: + label: "Vended Tools" +languages: [typescript] +--- + +Vended tools are pre-built tools included directly in the Strands SDK for common agent tasks like file operations, shell commands, HTTP requests, and persistent notes. + +They ship as part of the SDK package and are updated alongside it — see [Versioning & Maintenance](#versioning--maintenance) for details on how changes are communicated and what level of backwards compatibility they maintain. + +## Quick Start + +Each tool is imported from its own subpath under `@strands-agents/sdk/vended-tools` — no additional packages required: + +```typescript +--8<-- "user-guide/concepts/tools/vended-tools.ts:basic_import" + +--8<-- "user-guide/concepts/tools/vended-tools.ts:agent_with_vended_tools" +``` + +## Available Tools + +| Tool | Description | Supported in | +|------|-------------|--------------| +| [File Editor](#file-editor) | View, create, and edit files | Node.js | +| [HTTP Request](#http-request) | Make HTTP requests to external APIs | Node.js 20+, browsers | +| [Notebook](#notebook) | Manage persistent text notebooks | Node.js, browsers | +| [Bash](#bash) | Execute shell commands with persistent sessions | Node.js (Unix/Linux/macOS) | + +### File Editor + +Gives your agent the ability to read and modify files on disk — useful for coding agents, config management, or any workflow where the agent needs to inspect output and make targeted edits. + +_Supported in: Node.js only._ + +:::caution[Security Warning] +This tool reads and writes files with the full permissions of the Node.js process. Only use with trusted input and consider running in a sandboxed environment for production. +::: + +**Example:** +```typescript +--8<-- "user-guide/concepts/tools/vended-tools-imports.ts:file_editor_import" + +--8<-- "user-guide/concepts/tools/vended-tools.ts:file_editor_example" +``` + +📖 [Full API Reference](https://github.com/strands-agents/sdk-typescript/blob/main/src/vended-tools/file-editor/README.md) + +--- + +### HTTP Request + +Lets your agent call external APIs and fetch web content. Supports all HTTP methods, custom headers, and request bodies. Default timeout is 30 seconds. + +_Supported in: Node.js 20+, modern browsers._ + +**Example:** +```typescript +--8<-- "user-guide/concepts/tools/vended-tools-imports.ts:http_request_import" + +--8<-- "user-guide/concepts/tools/vended-tools.ts:http_request_example" +``` + +📖 [Full API Reference](https://github.com/strands-agents/sdk-typescript/blob/main/src/vended-tools/http-request/README.md) + +--- + +### Notebook + +A scratchpad the agent can read and write across invocations. The most effective use is giving the agent a notebook at the start of a task and instructing it to plan its work there — it can break the task into steps, check things off as it goes, and always have a clear picture of what's left. Notebook state is part of the agent's state, so it persists automatically with [Session Management](../agents/session-management.mdx). + +_Supported in: Node.js, browsers._ + +**Example - Task Management:** +```typescript +--8<-- "user-guide/concepts/tools/vended-tools-imports.ts:notebook_import" + +--8<-- "user-guide/concepts/tools/vended-tools.ts:notebook_example" +``` + +**Example - State Persistence:** +```typescript +--8<-- "user-guide/concepts/tools/vended-tools-imports.ts:notebook_persistence_import" + +--8<-- "user-guide/concepts/tools/vended-tools.ts:notebook_state_persistence" +``` + +📖 [Full API Reference](https://github.com/strands-agents/sdk-typescript/blob/main/src/vended-tools/notebook/README.md) + +--- + +### Bash + +Lets your agent run shell commands and act on the output. Shell state — variables, working directory, exported functions — persists across invocations within the same session, so the agent can build up context incrementally. Sessions can be restarted to clear state. + +_Supported in: Node.js on Unix/Linux/macOS. Not supported on Windows._ + +:::caution[Security Warning] +This tool executes arbitrary bash commands without sandboxing. Commands run with the full permissions of the Node.js process. Only use with trusted input and consider running in sandboxed environments (containers, VMs) for production. +::: + +**Example - File Operations:** +```typescript +--8<-- "user-guide/concepts/tools/vended-tools-imports.ts:bash_import" + +--8<-- "user-guide/concepts/tools/vended-tools.ts:bash_example" +``` + +**Example - Session Persistence:** +```typescript +--8<-- "user-guide/concepts/tools/vended-tools-imports.ts:bash_import" + +--8<-- "user-guide/concepts/tools/vended-tools.ts:bash_session" +``` + +📖 [Full API Reference](https://github.com/strands-agents/sdk-typescript/blob/main/src/vended-tools/bash/README.md) + +--- + +## Using Multiple Tools Together + +Combine vended tools to build powerful agent workflows: + +```typescript +--8<-- "user-guide/concepts/tools/vended-tools-imports.ts:combined_import" + +--8<-- "user-guide/concepts/tools/vended-tools.ts:combined_tools_example" +``` + +## Versioning & Maintenance + +Vended tools ship as part of the SDK and are updated alongside it. Report bugs and feature requests in the [TypeScript SDK GitHub repository](https://github.com/strands-agents/sdk-typescript/issues). + +Tool names are stable and will not change. In minor versions, a tool's description, spec, or parameters may be updated to improve effectiveness — these changes are noted in SDK release notes. Pin your SDK version and test after upgrades if your workflows depend on specific tool behavior. + +## See also + +- [Custom Tools](custom-tools.mdx) — Build your own tools +- [Community Tools Package](community-tools-package.mdx) — Python tools package with 30+ tools +- [Session Management](../agents/session-management.mdx) — Persist agent state including notebooks +- [Interrupts](../interrupts.mdx) — Implement approval workflows for sensitive operations +- [Hooks](../agents/hooks.mdx) — Intercept and customize tool execution diff --git a/src/content/docs/user-guide/concepts/tools/vended-tools.ts b/src/content/docs/user-guide/concepts/tools/vended-tools.ts new file mode 100644 index 000000000..2a43a868f --- /dev/null +++ b/src/content/docs/user-guide/concepts/tools/vended-tools.ts @@ -0,0 +1,133 @@ +// --8<-- [start:basic_import] +import { Agent } from '@strands-agents/sdk' +import { bash } from '@strands-agents/sdk/vended-tools/bash' +import { fileEditor } from '@strands-agents/sdk/vended-tools/file-editor' +import { httpRequest } from '@strands-agents/sdk/vended-tools/http-request' +import { notebook } from '@strands-agents/sdk/vended-tools/notebook' +// --8<-- [end:basic_import] +import { SessionManager, FileStorage } from '@strands-agents/sdk' + +// Agent with vended tools example +async function agentWithVendedToolsExample() { + // --8<-- [start:agent_with_vended_tools] + const agent = new Agent({ + tools: [bash, fileEditor, httpRequest, notebook], + }) + // --8<-- [end:agent_with_vended_tools] +} + +// Bash tool example - file operations +async function bashFileOperationsExample() { + // --8<-- [start:bash_example] + const agent = new Agent({ + tools: [bash], + }) + + // List files and create a new file + await agent.invoke('List all files in the current directory') + await agent.invoke('Create a new file called notes.txt with "Hello World"') + // --8<-- [end:bash_example] +} + +// Bash tool example - session persistence +async function bashSessionPersistenceExample() { + // --8<-- [start:bash_session] + const agent = new Agent({ + tools: [bash], + }) + + // Variables persist across invocations within the same session + await agent.invoke('Run: export MY_VAR="hello"') + await agent.invoke('Run: echo $MY_VAR') // Will show "hello" + + // Restart session to clear state + await agent.invoke('Restart the bash session') + await agent.invoke('Run: echo $MY_VAR') // Variable will be empty + // --8<-- [end:bash_session] +} + +// File editor example +async function fileEditorExample() { + // --8<-- [start:file_editor_example] + const agent = new Agent({ + tools: [fileEditor], + }) + + // Create, view, and edit files + await agent.invoke('Create a file /tmp/config.json with {"debug": false}') + await agent.invoke('Replace "debug": false with "debug": true in /tmp/config.json') + await agent.invoke('View lines 1-10 of /tmp/config.json') + // --8<-- [end:file_editor_example] +} + +// HTTP request example +async function httpRequestExample() { + // --8<-- [start:http_request_example] + const agent = new Agent({ + tools: [httpRequest], + }) + + // Make API requests + await agent.invoke('Get data from https://api.example.com/users') + await agent.invoke('Post {"name": "John"} to https://api.example.com/users') + // --8<-- [end:http_request_example] +} + +// Notebook example - task management +async function notebookTaskExample() { + // --8<-- [start:notebook_example] + const agent = new Agent({ + tools: [notebook], + systemPrompt: + 'Before starting any multi-step task, create a notebook with a checklist of steps. ' + + 'Check off each step as you complete it.', + }) + + // The agent uses the notebook to plan and track its work + await agent.invoke('Write a project plan for building a personal budget tracker app') + // --8<-- [end:notebook_example] +} + +// Notebook state persistence example +async function notebookStatePersistenceExample() { + // --8<-- [start:notebook_state_persistence] + const session = new SessionManager({ + sessionId: 'my-session', + storage: { snapshot: new FileStorage('./sessions') }, + }) + + const agent = new Agent({ tools: [notebook], sessionManager: session }) + + // Notebooks are automatically persisted as part of the session + await agent.invoke('Create a notebook called "ideas" with "# Project Ideas"') + await agent.invoke('Add "- Build a web scraper" to the ideas notebook') + + // ... + + // Later, a new agent with the same session restores notebooks automatically + const restoredAgent = new Agent({ tools: [notebook], sessionManager: session }) + await restoredAgent.invoke('Read the ideas notebook') + // --8<-- [end:notebook_state_persistence] +} + +// Combined tools example - development workflow +async function combinedToolsExample() { + // --8<-- [start:combined_tools_example] + const agent = new Agent({ + tools: [bash, fileEditor, notebook], + systemPrompt: [ + 'You are a software development assistant.', + 'When given a feature to implement:', + '1. Use the notebook tool to create a plan with a checklist of steps', + '2. Work through each step, checking them off as you go', + '3. Use the bash tool to run tests and verify your changes', + ].join('\n'), + }) + + // Agent plans the work, implements it, and tracks progress + await agent.invoke( + 'Add input validation to the createUser function in src/users.ts. ' + + 'It should reject empty names and invalid email formats.', + ) + // --8<-- [end:combined_tools_example] +} diff --git a/src/content/docs/user-guide/deploy/deploy_to_docker/imports.ts b/src/content/docs/user-guide/deploy/deploy_to_docker/imports.ts index 00e5ead37..4ddefd7e5 100644 --- a/src/content/docs/user-guide/deploy/deploy_to_docker/imports.ts +++ b/src/content/docs/user-guide/deploy/deploy_to_docker/imports.ts @@ -1,6 +1,6 @@ // --8<-- [start: imports] import { Agent } from '@strands-agents/sdk' import express, { type Request, type Response } from 'express' -import { OpenAIModel } from '@strands-agents/sdk/openai' +import { OpenAIModel } from '@strands-agents/sdk/models/openai' // --8<-- [end: imports] diff --git a/src/content/docs/user-guide/deploy/deploy_to_docker/index.ts b/src/content/docs/user-guide/deploy/deploy_to_docker/index.ts index bbd13ad5d..96b480e96 100644 --- a/src/content/docs/user-guide/deploy/deploy_to_docker/index.ts +++ b/src/content/docs/user-guide/deploy/deploy_to_docker/index.ts @@ -1,13 +1,13 @@ import { Agent } from '@strands-agents/sdk' import express, { type Request, type Response } from 'express' -import { OpenAIModel } from '@strands-agents/sdk/openai' +import { OpenAIModel } from '@strands-agents/sdk/models/openai' // --8<-- [start: agent] const PORT = Number(process.env.PORT) || 8080 // Note: Any supported model provider can be configured // Automatically uses process.env.OPENAI_API_KEY -const model = new OpenAIModel() +const model = new OpenAIModel({ api: 'chat' }) const agent = new Agent({ model }) diff --git a/src/content/docs/user-guide/deploy/deploy_to_docker/typescript.mdx b/src/content/docs/user-guide/deploy/deploy_to_docker/typescript.mdx index 5b4f676c8..0c155c913 100644 --- a/src/content/docs/user-guide/deploy/deploy_to_docker/typescript.mdx +++ b/src/content/docs/user-guide/deploy/deploy_to_docker/typescript.mdx @@ -73,13 +73,13 @@ npm pkg set scripts.build="tsc" scripts.start="node dist/index.js" scripts.dev=" cat > index.ts << 'EOF' import { Agent } from '@strands-agents/sdk' import express, { type Request, type Response } from 'express' -import { OpenAIModel } from '@strands-agents/sdk/openai' +import { OpenAIModel } from '@strands-agents/sdk/models/openai' const PORT = Number(process.env.PORT) || 8080 // Note: Any supported model provider can be configured // Automatically uses process.env.OPENAI_API_KEY -const model = new OpenAIModel() +const model = new OpenAIModel({ api: 'chat' }) const agent = new Agent({ model }) diff --git a/src/content/docs/user-guide/observability-evaluation/metrics.mdx b/src/content/docs/user-guide/observability-evaluation/metrics.mdx index 23f225d78..0519a1f43 100644 --- a/src/content/docs/user-guide/observability-evaluation/metrics.mdx +++ b/src/content/docs/user-guide/observability-evaluation/metrics.mdx @@ -293,6 +293,58 @@ This summary provides a complete picture of the agent's execution, including cyc +## Local Execution Traces + + + + +In addition to aggregate metrics, the Strands Agents SDK automatically collects **local execution traces** — lightweight, in-memory timing trees that capture the hierarchy and duration of operations within the agent loop. These traces are always collected regardless of OpenTelemetry configuration and are returned directly in the `AgentResult`. + +Each trace represents a cycle in the agent loop, with child traces for model invocations and tool calls: + +```python +from strands import Agent +from strands_tools import calculator + +agent = Agent(tools=[calculator]) +result = agent("What is 15 * 8 + 42?") + +# Traces are included in the summary output +print(result.metrics.get_summary()) +``` + +Each trace contains: + +- **name**: Human-readable label (e.g., "Cycle 1", "stream_messages", "Tool: calculator") +- **duration**: Execution time in seconds +- **children**: Nested traces for operations within the cycle +- **metadata**: Associated data like `cycleId`, `toolUseId`, and `toolName` +- **message**: The model output message (for model invocation traces) + +Traces are included in the `get_summary()` output, giving you a complete hierarchical view of agent execution alongside aggregate metrics. + + + +In addition to aggregate metrics, the Strands Agents SDK automatically collects **local execution traces** — lightweight, in-memory timing trees that capture the hierarchy and duration of operations within the agent loop. These traces are always collected regardless of OpenTelemetry configuration and are returned directly in `AgentResult.traces`. + +Each trace is an `AgentTrace` instance representing a cycle in the agent loop, with child traces for model invocations and tool calls: + +```typescript +--8<-- "user-guide/observability-evaluation/metrics.ts:local_traces" +``` + +Each `AgentTrace` contains: + +- **name**: Human-readable label (e.g., "Cycle 1", "stream_messages", "Tool: calculator") +- **duration**: Execution time in milliseconds +- **children**: Nested `AgentTrace` instances for operations within the cycle +- **metadata**: Associated data like `cycleId`, `toolUseId`, and `toolName` +- **message**: The model output message (for model invocation traces) + +Traces are separate from `AgentMetrics` and are accessed via `result.traces`. Note that `AgentResult.toJSON()` excludes traces and metrics by default to keep API responses lean — access them directly via `result.traces` and `result.metrics`. + + + ## Best Practices 1. **Monitor Token Usage**: Keep track of token usage to ensure you stay within limits and optimize costs. Set up alerts for when token usage approaches predefined thresholds to avoid unexpected costs. diff --git a/src/content/docs/user-guide/observability-evaluation/metrics.ts b/src/content/docs/user-guide/observability-evaluation/metrics.ts index d112fa787..7b652f9bb 100644 --- a/src/content/docs/user-guide/observability-evaluation/metrics.ts +++ b/src/content/docs/user-guide/observability-evaluation/metrics.ts @@ -66,6 +66,20 @@ async function agentLoopMetricsExample() { // --8<-- [end:agent_loop_metrics] } +// Local traces example +async function localTracesExample() { + // --8<-- [start:local_traces] + const agent = new Agent({ + tools: [notebook], + }) + + const result = await agent.invoke('What is 15 * 8 + 42?') + + // Access traces directly from the result + console.log(JSON.stringify(result.traces)) + // --8<-- [end:local_traces] +} + // Metrics summary example async function metricsSummaryExample() { // --8<-- [start:metrics_summary] diff --git a/src/content/docs/user-guide/observability-evaluation/traces.mdx b/src/content/docs/user-guide/observability-evaluation/traces.mdx index d6cc7ce73..b1f92e9ab 100644 --- a/src/content/docs/user-guide/observability-evaluation/traces.mdx +++ b/src/content/docs/user-guide/observability-evaluation/traces.mdx @@ -385,6 +385,37 @@ agent = Agent( +### Custom Spans + +You can access the configured tracer to create your own custom spans alongside the auto-instrumented ones: + + + + +```python +from opentelemetry import trace + +# Get your configured tracer to optionally create your own custom spans +tracer = trace.get_tracer(__name__) +with tracer.start_as_current_span("my-custom-operation") as span: + span.set_attribute("custom.key", "value") + # ... do work ... +``` + + + +```typescript +--8<-- "user-guide/observability-evaluation/traces_imports.ts:custom_spans_imports" + +--8<-- "user-guide/observability-evaluation/traces.ts:custom_spans" +``` + + + +:::tip +`getTracer()` (TypeScript) and `trace.get_tracer()` (Python) use the global tracer provider. When you use `setupTracer()` or `StrandsTelemetry()` without a custom provider, it's automatically registered as global — so your custom spans will use the same provider as the agent's auto-instrumented spans. +::: + ### Configuring the exporters from source code diff --git a/src/content/docs/user-guide/observability-evaluation/traces.ts b/src/content/docs/user-guide/observability-evaluation/traces.ts index a70c9e5fc..f9d393a4b 100644 --- a/src/content/docs/user-guide/observability-evaluation/traces.ts +++ b/src/content/docs/user-guide/observability-evaluation/traces.ts @@ -1,5 +1,5 @@ import { Agent } from '@strands-agents/sdk' -import { setupTracer } from '@strands-agents/sdk/telemetry' +import { setupTracer, getTracer } from '@strands-agents/sdk/telemetry' import { NodeTracerProvider } from '@opentelemetry/sdk-trace-node' import { BatchSpanProcessor, SimpleSpanProcessor, ConsoleSpanExporter } from '@opentelemetry/sdk-trace-base' import { OTLPTraceExporter } from '@opentelemetry/exporter-trace-otlp-http' @@ -90,6 +90,20 @@ function configuringExporters() { // --8<-- [end:configuring_exporters] } +function customSpans() { + // --8<-- [start:custom_spans] + // Set up telemetry first (or register your own NodeTracerProvider) + setupTracer({ exporters: { otlp: true } }) + + // Get a tracer and create custom spans + const tracer = getTracer() + const span = tracer.startSpan('my-custom-operation') + span.setAttribute('custom.key', 'value') + // ... do work ... + span.end() + // --8<-- [end:custom_spans] +} + async function endToEnd() { // --8<-- [start:end_to_end] // Set environment variables for OTLP endpoint diff --git a/src/content/docs/user-guide/observability-evaluation/traces_imports.ts b/src/content/docs/user-guide/observability-evaluation/traces_imports.ts index 20f4c02ac..f9a569eea 100644 --- a/src/content/docs/user-guide/observability-evaluation/traces_imports.ts +++ b/src/content/docs/user-guide/observability-evaluation/traces_imports.ts @@ -30,6 +30,10 @@ import { BatchSpanProcessor, SimpleSpanProcessor, ConsoleSpanExporter } from '@o import { OTLPTraceExporter } from '@opentelemetry/exporter-trace-otlp-http' // --8<-- [end:configuring_exporters_imports] +// --8<-- [start:custom_spans_imports] +import { setupTracer, getTracer } from '@strands-agents/sdk/telemetry' +// --8<-- [end:custom_spans_imports] + // --8<-- [start:end_to_end_imports] import { Agent } from '@strands-agents/sdk' import { setupTracer } from '@strands-agents/sdk/telemetry' diff --git a/src/content/docs/user-guide/quickstart.mdx b/src/content/docs/user-guide/quickstart.mdx deleted file mode 100644 index 86bd0d864..000000000 --- a/src/content/docs/user-guide/quickstart.mdx +++ /dev/null @@ -1,556 +0,0 @@ ---- -title: Quickstart ---- - -This quickstart guide shows you how to create your first basic Strands agent, add built-in and custom tools to your agent, use different model providers, emit debug logs, and run the agent locally. - -After completing this guide you can integrate your agent with a web server, implement concepts like multi-agent, evaluate and improve your agent, along with deploying to production and running at scale. - -## Install the SDK - -First, ensure that you have Python 3.10+ installed. - -We'll create a virtual environment to install the Strands Agents SDK and its dependencies in to. - -```bash -python -m venv .venv -``` - -And activate the virtual environment: - -* macOS / Linux: `source .venv/bin/activate` -* Windows (CMD): `.venv\Scripts\activate.bat` -* Windows (PowerShell): `.venv\Scripts\Activate.ps1` - -Next we'll install the `strands-agents` SDK package: - -```bash -pip install strands-agents -``` - -The Strands Agents SDK additionally offers the [`strands-agents-tools`](https://pypi.org/project/strands-agents-tools/) ([GitHub](https://github.com/strands-agents/tools)) and [`strands-agents-builder`](https://pypi.org/project/strands-agents-builder/) ([GitHub](https://github.com/strands-agents/agent-builder)) packages for development. The [`strands-agents-tools`](https://pypi.org/project/strands-agents-tools/) package is a community-driven project that provides a set of tools for your agents to use, bridging the gap between large language models and practical applications. The [`strands-agents-builder`](https://pypi.org/project/strands-agents-builder/) package provides an agent that helps you to build your own Strands agents and tools. - - -Let's install those development packages too: - -```bash -pip install strands-agents-tools strands-agents-builder -``` - -### Strands MCP Server (Optional) - -Strands also provides an MCP (Model Context Protocol) server that can assist you during development. This server gives AI coding assistants in your IDE access to Strands documentation, development prompts, and best practices. You can use it with MCP-compatible clients like Q Developer CLI, Cursor, Claude, Cline, and others to help you: - -- Develop custom tools and agents with guided prompts -- Debug and troubleshoot your Strands implementations -- Get quick answers about Strands concepts and patterns -- Design multi-agent systems with Graph or Swarm patterns - -To use the MCP server, you'll need [uv](https://github.com/astral-sh/uv) installed on your system. You can install it by following the [official installation instructions](https://github.com/astral-sh/uv#installation). - -Once uv is installed, configure the MCP server with your preferred client. For example, to use with Q Developer CLI, add to `~/.aws/amazonq/mcp.json`: - -```json -{ - "mcpServers": { - "strands-agents": { - "command": "uvx", - "args": ["strands-agents-mcp-server"] - } - } -} -``` - -See the [MCP server documentation](https://github.com/strands-agents/mcp-server) for setup instructions with other clients. - -## Configuring Credentials - -Strands supports many different model providers. By default, agents use the Amazon Bedrock model provider with the Claude 4 model. To change the default model, refer to [the Model Providers section](./quickstart/python.md#model-providers). - -To use the examples in this guide, you'll need to configure your environment with AWS credentials that have permissions to invoke the Claude 4 model. You can set up your credentials in several ways: - -1. **Environment variables**: Set `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, and optionally `AWS_SESSION_TOKEN` -2. **AWS credentials file**: Configure credentials using `aws configure` CLI command -3. **IAM roles**: If running on AWS services like EC2, ECS, or Lambda, use IAM roles -4. **Bedrock API keys**: Set the `AWS_BEARER_TOKEN_BEDROCK` environment variable - -Make sure your AWS credentials have the necessary permissions to access Amazon Bedrock and invoke the Claude 4 model. - -## Project Setup - -Now we'll create our Python project where our agent will reside. We'll use this directory structure: - -``` -my_agent/ -├── __init__.py -├── agent.py -└── requirements.txt -``` - -Create the directory: `mkdir my_agent` - -Now create `my_agent/requirements.txt` to include the `strands-agents` and `strands-agents-tools` packages as dependencies: - -``` -strands-agents>=1.0.0 -strands-agents-tools>=0.2.0 -``` - -Create the `my_agent/__init__.py` file: - -```python -from . import agent -``` - -And finally our `agent.py` file where the goodies are: - -```python -from strands import Agent, tool -from strands_tools import calculator, current_time - -# Define a custom tool as a Python function using the @tool decorator -@tool -def letter_counter(word: str, letter: str) -> int: - """ - Count occurrences of a specific letter in a word. - - Args: - word (str): The input word to search in - letter (str): The specific letter to count - - Returns: - int: The number of occurrences of the letter in the word - """ - if not isinstance(word, str) or not isinstance(letter, str): - return 0 - - if len(letter) != 1: - raise ValueError("The 'letter' parameter must be a single character") - - return word.lower().count(letter.lower()) - -# Create an agent with tools from the community-driven strands-tools package -# as well as our custom letter_counter tool -agent = Agent(tools=[calculator, current_time, letter_counter]) - -# Ask the agent a question that uses the available tools -message = """ -I have 4 requests: - -1. What is the time right now? -2. Calculate 3111696 / 74088 -3. Tell me how many letter R's are in the word "strawberry" 🍓 -""" -agent(message) -``` - -This basic quickstart agent can perform mathematical calculations, get the current time, run Python code, and count letters in words. The agent automatically determines when to use tools based on the input query and context. - -```mermaid -flowchart LR - A[Input & Context] --> Loop - - subgraph Loop[" "] - direction TB - B["Reasoning (LLM)"] --> C["Tool Selection"] - C --> D["Tool Execution"] - D --> B - end - - Loop --> E[Response] -``` - -More details can be found in the [Agent Loop](concepts/agents/agent-loop.md) documentation. - -## Running Agents - -Our agent is just Python, so we can run it using any mechanism for running Python! - -To test our agent we can simply run: -```bash -python -u my_agent/agent.py -``` - -And that's it! We now have a running agent with powerful tools and abilities in just a few lines of code 🥳. - -## Understanding What Agents Did - -After running an agent, you can understand what happened during execution through traces and metrics. Every agent invocation returns an [`AgentResult`](@api/python/strands.agent.agent_result#AgentResult) object with comprehensive observability data. - -Traces provide detailed insight into the agent's reasoning process. You can access in-memory traces and metrics directly from the [`AgentResult`](@api/python/strands.agent.agent_result#AgentResult), or export them using [OpenTelemetry](observability-evaluation/traces.md) to observability platforms. - -:::note[Example result.metrics.get_summary() output] - -```python -result = agent("What is the square root of 144?") -print(result.metrics.get_summary()) -``` -```python -{ - "accumulated_metrics": { - "latencyMs": 6253 - }, - "accumulated_usage": { - "inputTokens": 3921, - "outputTokens": 83, - "totalTokens": 4004 - }, - "average_cycle_time": 0.9406174421310425, - "tool_usage": { - "calculator": { - "execution_stats": { - "average_time": 0.008260965347290039, - "call_count": 1, - "error_count": 0, - "success_count": 1, - "success_rate": 1.0, - "total_time": 0.008260965347290039 - }, - "tool_info": { - "input_params": { - "expression": "sqrt(144)", - "mode": "evaluate" - }, - "name": "calculator", - "tool_use_id": "tooluse_jR3LAfuASrGil31Ix9V7qQ" - } - } - }, - "total_cycles": 2, - "total_duration": 1.881234884262085, - "traces": [ - { - "children": [ - { - "children": [], - "duration": 4.476144790649414, - "end_time": 1747227039.938964, - "id": "c7e86c24-c9d4-4a79-a3a2-f0eaf42b0d19", - "message": { - "content": [ - { - "text": "I'll calculate the square root of 144 for you." - }, - { - "toolUse": { - "input": { - "expression": "sqrt(144)", - "mode": "evaluate" - }, - "name": "calculator", - "toolUseId": "tooluse_jR3LAfuASrGil31Ix9V7qQ" - } - } - ], - "role": "assistant" - }, - "metadata": {}, - "name": "stream_messages", - "parent_id": "78595347-43b1-4652-b215-39da3c719ec1", - "raw_name": null, - "start_time": 1747227035.462819 - }, - { - "children": [], - "duration": 0.008296012878417969, - "end_time": 1747227039.948415, - "id": "4f64ce3d-a21c-4696-aa71-2dd446f71488", - "message": { - "content": [ - { - "toolResult": { - "content": [ - { - "text": "Result: 12" - } - ], - "status": "success", - "toolUseId": "tooluse_jR3LAfuASrGil31Ix9V7qQ" - } - } - ], - "role": "user" - }, - "metadata": { - "toolUseId": "tooluse_jR3LAfuASrGil31Ix9V7qQ", - "tool_name": "calculator" - }, - "name": "Tool: calculator", - "parent_id": "78595347-43b1-4652-b215-39da3c719ec1", - "raw_name": "calculator - tooluse_jR3LAfuASrGil31Ix9V7qQ", - "start_time": 1747227039.940119 - }, - { - "children": [], - "duration": 1.881267786026001, - "end_time": 1747227041.8299048, - "id": "0261b3a5-89f2-46b2-9b37-13cccb0d7d39", - "message": null, - "metadata": {}, - "name": "Recursive call", - "parent_id": "78595347-43b1-4652-b215-39da3c719ec1", - "raw_name": null, - "start_time": 1747227039.948637 - } - ], - "duration": null, - "end_time": null, - "id": "78595347-43b1-4652-b215-39da3c719ec1", - "message": null, - "metadata": {}, - "name": "Cycle 1", - "parent_id": null, - "raw_name": null, - "start_time": 1747227035.46276 - }, - { - "children": [ - { - "children": [], - "duration": 1.8811860084533691, - "end_time": 1747227041.829879, - "id": "1317cfcb-0e87-432e-8665-da5ddfe099cd", - "message": { - "content": [ - { - "text": "\n\nThe square root of 144 is 12." - } - ], - "role": "assistant" - }, - "metadata": {}, - "name": "stream_messages", - "parent_id": "f482cee9-946c-471a-9bd3-fae23650f317", - "raw_name": null, - "start_time": 1747227039.948693 - } - ], - "duration": 1.881234884262085, - "end_time": 1747227041.829896, - "id": "f482cee9-946c-471a-9bd3-fae23650f317", - "message": null, - "metadata": {}, - "name": "Cycle 2", - "parent_id": null, - "raw_name": null, - "start_time": 1747227039.948661 - } - ] -} -``` -::: - -This observability data helps you debug agent behavior, optimize performance, and understand the agent's reasoning process. For detailed information, see [Observability](observability-evaluation/observability.md), [Traces](observability-evaluation/traces.md), and [Metrics](observability-evaluation/metrics.md). - - -## Console Output - -Agents display their reasoning and responses in real-time to the console by default. You can disable this output by setting `callback_handler=None` when creating your agent: - -```python -agent = Agent( - tools=[calculator, current_time, letter_counter], - callback_handler=None, -) -``` - -Learn more in the [Callback Handlers](concepts/streaming/callback-handlers.md) documentation. - -## Debug Logs - -To enable debug logs in our agent, configure the `strands` logger: - -```python -import logging -from strands import Agent - -# Enables Strands debug log level -logging.getLogger("strands").setLevel(logging.DEBUG) - -# Sets the logging format and streams logs to stderr -logging.basicConfig( - format="%(levelname)s | %(name)s | %(message)s", - handlers=[logging.StreamHandler()] -) - -agent = Agent() - -agent("Hello!") -``` - -See the [Logs documentation](observability-evaluation/logs.md) for more information. - -## Model Providers - -### Identifying a configured model - -Strands defaults to the Bedrock model provider using Claude 4 Sonnet. The model your agent is using can be retrieved by accessing [`model.config`](@api/python/strands.models.model#Model.get_config): - -```python -from strands import Agent - -agent = Agent() - -print(agent.model.config) -# {'model_id': 'us.anthropic.claude-sonnet-4-20250514-v1:0'} -``` - -You can specify a different model in two ways: - -1. By passing a string model ID directly to the Agent constructor -2. By creating a model provider instance with specific configurations - -### Using a String Model ID - -The simplest way to specify a model is to pass the model ID string directly: - -```python -from strands import Agent - -# Create an agent with a specific model by passing the model ID string -agent = Agent(model="anthropic.claude-sonnet-4-20250514-v1:0") -``` - -### Amazon Bedrock (Default) - -For more control over model configuration, you can create a model provider instance: - -```python -import boto3 -from strands import Agent -from strands.models import BedrockModel - -# Create a BedrockModel -bedrock_model = BedrockModel( - model_id="anthropic.claude-sonnet-4-20250514-v1:0", - region_name="us-west-2", - temperature=0.3, -) - -agent = Agent(model=bedrock_model) -``` - -For the Amazon Bedrock model provider, see the [Boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html) to configure credentials for your environment. For development, AWS credentials are typically defined in `AWS_` prefixed environment variables or configured with the `aws configure` CLI command. - -You will also need to enable model access in Amazon Bedrock for the models that you choose to use with your agents, following the [AWS documentation](https://docs.aws.amazon.com/bedrock/latest/userguide/model-access-modify.html) to enable access. - -More details in the [Amazon Bedrock Model Provider](concepts/model-providers/amazon-bedrock.md) documentation. - -### Additional Model Providers - -Strands Agents supports several other model providers beyond Amazon Bedrock: - -- **[Anthropic](concepts/model-providers/anthropic.md)** - Direct API access to Claude models -- **[Amazon Nova](concepts/model-providers/amazon-nova.md)** - API access to Amazon Nova models -- **[LiteLLM](concepts/model-providers/litellm.md)** - Unified interface for OpenAI, Mistral, and other providers -- **[Llama API](concepts/model-providers/llamaapi.md)** - Access to Meta's Llama models -- **[Mistral](concepts/model-providers/mistral.md)** - Access to Mistral models -- **[Ollama](concepts/model-providers/ollama.md)** - Run models locally for privacy or offline use -- **[OpenAI](concepts/model-providers/openai.md)** - Access to OpenAI or OpenAI-compatible models -- **[Writer](concepts/model-providers/writer.md)** - Access to Palmyra models -- **[Cohere community](../community/model-providers/cohere.md)** - Use Cohere models through an OpenAI compatible interface -- **[CLOVA Studio community](../community/model-providers/clova-studio.md)** - Korean-optimized AI models from Naver Cloud Platform -- **[FireworksAI community](../community/model-providers/fireworksai.md)** - Use FireworksAI models through an OpenAI compatible interface -- **[Custom Providers](concepts/model-providers/custom_model_provider.md)** - Build your own provider for specialized needs - -## Capturing Streamed Data & Events - -Strands provides two main approaches to capture streaming events from an agent: async iterators and callback functions. - -### Async Iterators - -For asynchronous applications (like web servers or APIs), Strands provides an async iterator approach using [`stream_async()`](@api/python/strands.agent.agent#Agent.stream_async). This is particularly useful with async frameworks like FastAPI or Django Channels. - -```python -import asyncio -from strands import Agent -from strands_tools import calculator - -# Initialize our agent without a callback handler -agent = Agent( - tools=[calculator], - callback_handler=None # Disable default callback handler -) - -# Async function that iterates over streamed agent events -async def process_streaming_response(): - prompt = "What is 25 * 48 and explain the calculation" - - # Get an async iterator for the agent's response stream - agent_stream = agent.stream_async(prompt) - - # Process events as they arrive - async for event in agent_stream: - if "data" in event: - # Print text chunks as they're generated - print(event["data"], end="", flush=True) - elif "current_tool_use" in event and event["current_tool_use"].get("name"): - # Print tool usage information - print(f"\n[Tool use delta for: {event['current_tool_use']['name']}]") - -# Run the agent with the async event processing -asyncio.run(process_streaming_response()) -``` - -The async iterator yields the same event types as the callback handler callbacks, including text generation events, tool events, and lifecycle events. This approach is ideal for integrating Strands agents with async web frameworks. - -See the [Async Iterators](concepts/streaming/async-iterators.md) documentation for full details. - -> Note, Strands also offers an [`invoke_async()`](@api/python/strands.agent.agent#Agent.invoke_async) method for non-iterative async invocations. - -### Callback Handlers (Callbacks) - -We can create a custom callback function (named a [callback handler](concepts/streaming/callback-handlers.md)) that is invoked at various points throughout an agent's lifecycle. - -Here is an example that captures streamed data from the agent and logs it instead of printing: - -```python -import logging -from strands import Agent -from strands_tools import shell - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger() - -# Define a simple callback handler that logs instead of printing -tool_use_ids = [] -def callback_handler(**kwargs): - if "data" in kwargs: - # Log the streamed chunks - logger.info(f"{kwargs['delta']}") - elif "current_tool_use" in kwargs: - tool = kwargs["current_tool_use"] - if tool["toolUseId"] not in tool_use_ids: - # Log the tool use - logger.info(f"[Using tool: {tool.get('name')}]") - tool_use_ids.append(tool["toolUseId"]) - -# Create an agent with the callback handler -agent = Agent( - tools=[shell], - callback_handler=callback_handler -) - -# Ask the agent a question -result = agent("What operating system am I using?") - -# Print only the last response -print(f"\n{result}") -``` - -The callback handler is called in real-time as the agent thinks, uses tools, and responds. - -See the [Callback Handlers](concepts/streaming/callback-handlers.md) documentation for full details. - -## Next Steps - -Ready to learn more? Check out these resources: - -- [Examples](../examples/README.md) - Examples for many use cases, multi-agent systems, autonomous agents, and more -- [Community Supported Tools](concepts/tools/community-tools-package.md) - The `strands-agents-tools` package provides many powerful example tools for your agents to use during development -- [Strands Agent Builder](https://github.com/strands-agents/agent-builder) - Use the accompanying `strands-agents-builder` agent builder to harness the power of LLMs to generate your own tools and agents -- [Agent Loop](concepts/agents/agent-loop.md) - Learn how Strands agents work under the hood -- [State & Sessions](concepts/agents/state.md) - Understand how agents maintain context and state across a conversation or workflow -- [Multi-agent](concepts/multi-agent/agents-as-tools.md) - Orchestrate multiple agents together as one system, with each agent completing specialized tasks -- [Observability & Evaluation](observability-evaluation/observability.md) - Understand how agents make decisions and improve them with data -- [Operating Agents in Production](deploy/operating-agents-in-production.md) - Taking agents from development to production, operating them responsibly at scale diff --git a/src/content/docs/user-guide/quickstart/overview.mdx b/src/content/docs/user-guide/quickstart/overview.mdx index 4f812bb85..86d929bc9 100644 --- a/src/content/docs/user-guide/quickstart/overview.mdx +++ b/src/content/docs/user-guide/quickstart/overview.mdx @@ -5,6 +5,7 @@ sidebar: label: "Getting Started" redirectFrom: - docs + - docs/user-guide/quickstart --- import { LinkCard, CardGrid } from '@astrojs/starlight/components'; @@ -18,7 +19,7 @@ The Strands Agents SDK empowers developers to quickly build, manage, evaluate an href="../python/" /> @@ -27,7 +28,7 @@ The Strands Agents SDK empowers developers to quickly build, manage, evaluate an ## Language support -Strands Agents SDK is available in both Python and TypeScript. The Python SDK is mature and production-ready with comprehensive feature coverage. The TypeScript SDK is experimental and focuses on core agent functionality. +Strands Agents SDK is available in both Python and TypeScript. ### Feature availability @@ -41,6 +42,7 @@ The table below compares feature availability between the Python and TypeScript | **Model providers** | [Amazon Bedrock](../concepts/model-providers/amazon-bedrock/) | ✅ | ✅ | | | [OpenAI](../concepts/model-providers/openai/) | ✅ | ✅ | | | [Anthropic](../concepts/model-providers/anthropic/) | ✅ | ✅ | +| | [Google](../concepts/model-providers/google/) | ✅ | ✅ | | | [Ollama](../concepts/model-providers/ollama/) | ✅ | ❌ | | | [LiteLLM](../concepts/model-providers/litellm/) | ✅ | ❌ | | | [Custom providers](../concepts/model-providers/custom_model_provider/) | ✅ | ✅ | diff --git a/src/content/docs/user-guide/quickstart/python.mdx b/src/content/docs/user-guide/quickstart/python.mdx index bd90c398a..7ff10c399 100644 --- a/src/content/docs/user-guide/quickstart/python.mdx +++ b/src/content/docs/user-guide/quickstart/python.mdx @@ -42,7 +42,7 @@ pip install strands-agents-tools strands-agents-builder ### Strands MCP Server (Optional) -Strands also provides an MCP (Model Context Protocol) server that can assist you during development. This server gives AI coding assistants in your IDE access to Strands documentation, development prompts, and best practices. You can use it with MCP-compatible clients like Q Developer CLI, Cursor, Claude, Cline, and others to help you: +Strands also provides an MCP (Model Context Protocol) server that can assist you during development. This server gives AI coding assistants in your IDE access to Strands documentation, development prompts, and best practices. You can use it with MCP-compatible clients like Kiro, Cursor, Claude, Cline, and others to help you: - Develop custom tools and agents with guided prompts - Debug and troubleshoot your Strands implementations @@ -51,7 +51,7 @@ Strands also provides an MCP (Model Context Protocol) server that can assist you To use the MCP server, you'll need [uv](https://github.com/astral-sh/uv) installed on your system. You can install it by following the [official installation instructions](https://github.com/astral-sh/uv#installation). -Once uv is installed, configure the MCP server with your preferred client. For example, to use with Q Developer CLI, add to `~/.aws/amazonq/mcp.json`: +Once uv is installed, configure the MCP server with your preferred client. For example, to use with Kiro, add to `~/.kiro/settings/mcp.json`: ```json { diff --git a/src/content/docs/user-guide/quickstart/typescript.mdx b/src/content/docs/user-guide/quickstart/typescript.mdx index 0297f922c..82fc00c3f 100644 --- a/src/content/docs/user-guide/quickstart/typescript.mdx +++ b/src/content/docs/user-guide/quickstart/typescript.mdx @@ -5,10 +5,6 @@ sidebar: label: "TypeScript" --- -:::caution[Experimental SDK] -The TypeScript SDK is currently experimental. It does not yet support all features available in the Python SDK, and breaking changes are expected as development continues. Use with caution in production environments. -::: - This quickstart guide shows you how to create your first basic Strands agent with TypeScript, add built-in and custom tools to your agent, use different model providers, emit debug logs, and run the agent locally. After completing this guide you can integrate your agent with a web server or browser, evaluate and improve your agent, along with deploying to production and running at scale. @@ -121,7 +117,7 @@ And that's it! We now have a running agent with powerful tools and abilities in ## Understanding What Agents Did -After running an agent, you can understand what happened during execution by examining the agent's messages and through traces and metrics. Every agent invocation returns an `AgentResult` object that contains the data the agent used along with (comming soon) comprehensive observability data. +After running an agent, you can understand what happened during execution by examining the agent's messages, traces, and metrics. Every agent invocation returns an `AgentResult` object that contains the data the agent used along with comprehensive observability data including [local execution traces](../observability-evaluation/metrics.md#local-execution-traces) and [metrics](../observability-evaluation/metrics.md). ```typescript @@ -171,7 +167,7 @@ More details in the [Amazon Bedrock Model Provider](../concepts/model-providers/ Strands Agents supports several other model providers beyond Amazon Bedrock: - **[OpenAI](../concepts/model-providers/openai.md)** - Access to OpenAI or OpenAI-compatible models -- **[Gemini](../concepts/model-providers/gemini.md)** - Access to Google's Gemini models +- **[Google](../concepts/model-providers/google.md)** - Access to Google's Gemini models ## Capturing Streamed Data & Events diff --git a/src/content/docs/user-guide/versioning-and-support.mdx b/src/content/docs/user-guide/versioning-and-support.mdx index 4f72969de..b01ed62a8 100644 --- a/src/content/docs/user-guide/versioning-and-support.mdx +++ b/src/content/docs/user-guide/versioning-and-support.mdx @@ -186,5 +186,5 @@ The Strands SDK is an open-source project that welcomes community contributions. * [Python Issues](https://github.com/strands-agents/sdk-python/issues) * [TypeScript Issues](https://github.com/strands-agents/sdk-typescript/issues) * [Evals Issues](https://github.com/strands-agents/evals/issues) -* **Contribute Code**: Review the [Contributing Guide](https://github.com/strands-agents/sdk-python/blob/main/CONTRIBUTING.md) to get started +* **Contribute Code**: Review the [Contributing Guide](../contribute/contributing/core-sdk.md) to get started * **Share Feedback**: Your input on versioning and support policies helps shape the SDK's future diff --git a/src/util/redirect.ts b/src/util/redirect.ts index 66b1e9d27..c7e2ea131 100644 --- a/src/util/redirect.ts +++ b/src/util/redirect.ts @@ -16,6 +16,12 @@ type SlugRule = | { match: RegExp; to: (m: RegExpMatchArray) => string } const SLUG_RULES: SlugRule[] = [ + // gemini was renamed to google + { + match: exactly('docs/user-guide/concepts/model-providers/gemini'), + to: 'docs/user-guide/concepts/model-providers/google', + }, + // python-tools was renamed to custom-tools { match: exactly('docs/user-guide/concepts/tools/python-tools'), diff --git a/test/known-routes.json b/test/known-routes.json index 629537b36..9b59fc0e3 100644 --- a/test/known-routes.json +++ b/test/known-routes.json @@ -43,7 +43,8 @@ "/latest/documentation/docs/examples/python/multi_agent_example/", "/latest/documentation/docs/examples/python/multi_agent_example/multi_agent_example/", "/latest/documentation/docs/examples/python/multimodal/", - "/latest/documentation/docs/examples/python/structured_output/", + "/docs/examples/python/structured_output/", + "/latest/documentation/docs/examples/structured_output/", "/latest/documentation/docs/examples/python/weather_forecaster/", "/latest/documentation/docs/examples/typescript/deploy_to_bedrock_agentcore/", "/docs/llms/", @@ -80,7 +81,7 @@ "/latest/documentation/docs/user-guide/concepts/model-providers/cohere/", "/latest/documentation/docs/user-guide/concepts/model-providers/custom_model_provider/", "/latest/documentation/docs/user-guide/concepts/model-providers/fireworksai/", - "/latest/documentation/docs/user-guide/concepts/model-providers/gemini/", + "/latest/documentation/docs/user-guide/concepts/model-providers/google/", "/latest/documentation/docs/user-guide/concepts/model-providers/litellm/", "/latest/documentation/docs/user-guide/concepts/model-providers/llamaapi/", "/latest/documentation/docs/user-guide/concepts/model-providers/llamacpp/", @@ -156,5 +157,6 @@ "/docs/user-guide/concepts/model-providers/nebius-token-factory/", "/docs/user-guide/concepts/model-providers/fireworksai/", "/docs/user-guide/concepts/model-providers/xai/", + "/docs/user-guide/quickstart/", "/docs/" ]