diff --git a/src/oss/langchain/agents.mdx b/src/oss/langchain/agents.mdx index bb516cbd9..ff8e3b036 100644 --- a/src/oss/langchain/agents.mdx +++ b/src/oss/langchain/agents.mdx @@ -513,8 +513,7 @@ result = agent.invoke( :::js ```typescript wrap import { z } from "zod"; -import { createAgent } from "langchain"; -import { dynamicSystemPromptMiddleware } from "langchain/middleware"; +import { createAgent, dynamicSystemPromptMiddleware } from "langchain"; const contextSchema = z.object({ userRole: z.enum(["expert", "beginner"]), diff --git a/src/oss/langchain/mcp.mdx b/src/oss/langchain/mcp.mdx index 15b8a92b2..7f5ee72d8 100644 --- a/src/oss/langchain/mcp.mdx +++ b/src/oss/langchain/mcp.mdx @@ -106,7 +106,6 @@ weather_response = await agent.ainvoke( ```ts Accessing multiple MCP servers {highlight={1,5,19,22}} icon="server" import { MultiServerMCPClient } from "@langchain/mcp-adapters"; -import { ChatAnthropic } from "@langchain/anthropic"; import { createAgent } from "langchain"; const client = new MultiServerMCPClient({ @@ -125,7 +124,7 @@ const client = new MultiServerMCPClient({ const tools = await client.getTools(); const agent = createAgent({ - llm: new ChatAnthropic({ model: "claude-3-7-sonnet-latest" }), + model: "anthropic:claude-3-7-sonnet-latest", tools, }); diff --git a/src/oss/langchain/middleware.mdx b/src/oss/langchain/middleware.mdx index 2050314a9..a8d229bd1 100644 --- a/src/oss/langchain/middleware.mdx +++ b/src/oss/langchain/middleware.mdx @@ -118,8 +118,7 @@ agent = create_agent( ::: :::js ```typescript -import { createAgent } from "langchain"; -import { summarizationMiddleware, humanInTheLoopMiddleware } from "langchain/middleware"; +import { createAgent, summarizationMiddleware, humanInTheLoopMiddleware } from "langchain"; const agent = createAgent({ // ... @@ -190,8 +189,7 @@ agent = create_agent( :::js ```typescript -import { createAgent } from "langchain"; -import { summarizationMiddleware } from "langchain/middleware"; +import { createAgent, summarizationMiddleware } from "langchain"; import { ChatOpenAI } from "@langchain/openai"; const agent = createAgent({ @@ -276,7 +274,7 @@ agent = create_agent( tools=[write_file_tool, execute_sql_tool, read_data_tool], middleware=[ HumanInTheLoopMiddleware( - tool_configs={ + interrupt_on={ "write_file": { "allow_accept": True, "allow_edit": True, @@ -299,8 +297,7 @@ agent = create_agent( :::js ```typescript -import { createAgent } from "langchain"; -import { humanInTheLoopMiddleware } from "langchain/middleware"; +import { createAgent, humanInTheLoopMiddleware } from "langchain"; import { Command, MemorySaver } from "@langchain/langgraph"; const agent = createAgent({ @@ -367,33 +364,29 @@ if state.next: :::js ```typescript -import { HumanMessage } from "langchain"; +import { HumanMessage, HumanInTheLoopRequest, Interrupt } from "langchain"; import { Command } from "@langchain/langgraph"; -import type { Interrupt, HumanInTheLoopRequest } from "langchain/middleware"; // Initial invocation const result = await agent.invoke( - { - messages: [new HumanMessage("Delete old records from the database")], - }, - config + { + messages: [new HumanMessage("Delete old records from the database")], + }, + config ); // Check if agent is paused for approval if (result.__interrupt__) { - const interruptRequest = result.__interrupt__?.[0] as Interrupt< - HumanInTheLoopRequest[] - >; - - // Show tool call details to user - console.log("Tool:", interruptRequest.value[0].actionRequest); - console.log("Allowed actions:", interruptRequest.value[0].config); - - // Resume with approval - await agent.invoke( - new Command({ resume: [{ type: "accept" }] }), - config - ); + const interruptRequest = result.__interrupt__?.[0] as Interrupt< + HumanInTheLoopRequest[] + >; + + // Show tool call details to user + console.log("Tool:", interruptRequest.value[0].actionRequest); + console.log("Allowed actions:", interruptRequest.value[0].config); + + // Resume with approval + await agent.invoke(new Command({ resume: [{ type: "accept" }] }), config); } ``` ::: @@ -412,7 +405,7 @@ from langgraph.types import Command # Create the human-in-the-loop middleware hitl_middleware = HumanInTheLoopMiddleware( - tool_configs={ + interrupt_on={ "write_file": { "allow_edit": True, "allow_accept": True, @@ -468,11 +461,18 @@ if state.next: :::js ```typescript -import type { Interrupt } from "langchain"; -import type { HumanInTheLoopRequest, HumanInTheLoopMiddlewareHumanResponse } from "langchain/middleware"; +import { + createAgent, + humanInTheLoopMiddleware, + type HumanInTheLoopMiddlewareHumanResponse, + type HumanInTheLoopRequest, + HumanMessage, + type Interrupt, +} from "langchain"; +import { Command, MemorySaver } from "@langchain/langgraph"; const hitlMiddleware = humanInTheLoopMiddleware({ - toolConfigs: { + interruptOn: { write_file: { allowEdit: true, allowAccept: true, @@ -486,9 +486,9 @@ const checkpointer = new MemorySaver(); const agent = createAgent({ model: "openai:gpt-4o", checkpointer, - prompt: + systemPrompt: "You are a helpful assistant. Use the tools provided to help the user.", - tools: [writeFileTool], + tools: [/* ... */], middleware: [hitlMiddleware] as const, }); @@ -501,7 +501,11 @@ const config = { // Initial invocation const result = await agent.invoke( { - messages: [new HumanMessage("Write to greeting.txt with the content 'Hello, world!'")], + messages: [ + new HumanMessage( + "Write to greeting.txt with the content 'Hello, world!'" + ), + ], }, config ); @@ -509,13 +513,15 @@ const result = await agent.invoke( const interruptRequest = result.__interrupt__?.[0] as Interrupt< HumanInTheLoopRequest[] >; -const resume: HumanInTheLoopMiddlewareHumanResponse[] = [{ - type: "edit", - args: { - action: "write_file", - args: { filename: "greeting.txt", content: "Safe content" }, +const resume: HumanInTheLoopMiddlewareHumanResponse[] = [ + { + type: "edit", + args: { + action: "write_file", + args: { filename: "greeting.txt", content: "Safe content" }, + }, }, -}]; +]; // Resume with approval await agent.invoke(new Command({ resume }), config); @@ -539,7 +545,7 @@ from langgraph.checkpoint.memory import InMemorySaver from langgraph.types import Command hitl_middleware = HumanInTheLoopMiddleware( - tool_configs={ + interrupt_on={ "write_file": True, } ) @@ -587,11 +593,17 @@ if state.next: :::js ```typescript -import type { Interrupt } from "langchain"; -import type { HumanInTheLoopRequest, HumanInTheLoopMiddlewareHumanResponse } from "langchain/middleware"; +import { + Interrupt, + HumanMessage, + HumanInTheLoopRequest, + humanInTheLoopMiddleware, +} from "langchain"; +import { createAgent } from "langchain"; +import { Command, MemorySaver } from "@langchain/langgraph"; const hitlMiddleware = humanInTheLoopMiddleware({ - toolConfigs: { + interruptOn: { write_file: true, }, }); @@ -600,9 +612,11 @@ const checkpointer = new MemorySaver(); const agent = createAgent({ model: "openai:gpt-4o", checkpointer, - prompt: + systemPrompt: "You are a helpful assistant. Use the tools provided to help the user.", - tools: [writeFileTool], + tools: [ + /* ... */ + ], middleware: [hitlMiddleware] as const, }); @@ -612,21 +626,30 @@ const config = { }, }; -const result = await agent.invoke({ messages: [HumanMessage("Write to greeting.txt with the content 'Hello, world!'")] }, config); +const result = await agent.invoke( + { + messages: [ + new HumanMessage( + "Write to greeting.txt with the content 'Hello, world!'" + ), + ], + }, + config +); const interruptRequest = result.__interrupt__?.[0] as Interrupt< HumanInTheLoopRequest[] >; -const resume = ( +const resume = interruptRequest.value[0].actionRequest.action === "write_file" && - interruptRequest.value[0].actionRequest.args.filename === - "important.txt" -) ? { - type: "response", - args: "File operation not allowed for this file", -} : { - type: "accept", -}; + interruptRequest.value[0].actionRequest.args.filename === "important.txt" + ? { + type: "response", + args: "File operation not allowed for this file", + } + : { + type: "accept", + }; await agent.invoke(new Command({ resume: [resume] }), config); ``` @@ -641,7 +664,7 @@ await agent.invoke(new Command({ resume: [resume] }), config); **Configuration:** :::python -- `tool_configs`: Map of tool names to their approval settings, each with the following options: +- `interrupt_on`: Map of tool names to their approval settings, each with the following options: - `allow_accept`: Whether the human can approve the current action without changes - `allow_edit`: Whether the human can reject the current action with feedback - `allow_respond`: Whether the human can approve the current action with edited content @@ -649,7 +672,7 @@ await agent.invoke(new Command({ resume: [resume] }), config); - `description_prefix`: The prefix to use when constructing action requests. ::: :::js -- `toolConfigs`: Map of tool names to their approval settings, each with the following options: +- `interruptOn`: Map of tool names to their approval settings, each with the following options: - `allowAccept`: Whether the human can approve the current action without changes - `allowEdit`: Whether the human can reject the current action with feedback - `allowRespond`: Whether the human can approve the current action with edited content @@ -700,8 +723,7 @@ agent.invoke({"messages": [HumanMessage("What's my name?")]}) ::: :::js ```typescript -import { createAgent, HumanMessage } from "langchain"; -import { anthropicPromptCachingMiddleware } from "langchain/middleware"; +import { anthropicPromptCachingMiddleware, createAgent, HumanMessage } from "langchain"; const LONG_PROMPT = ` Please be a helpful assistant. @@ -711,7 +733,7 @@ Please be a helpful assistant. const agent = createAgent({ model: "anthropic:claude-sonnet-4-latest", - prompt: LONG_PROMPT, + systemPrompt: LONG_PROMPT, middleware: [anthropicPromptCachingMiddleware({ ttl: "5m" })], }); @@ -780,36 +802,43 @@ For example, you can adjust the system prompt based on the user's expertise leve ```typescript import { z } from "zod"; -import { createAgent } from "langchain"; -import { dynamicSystemPromptMiddleware } from "langchain/middleware"; +import { + createAgent, + dynamicSystemPromptMiddleware, + HumanMessage, +} from "langchain"; const contextSchema = z.object({ - userRole: z.enum(["expert", "beginner"]), + userRole: z.enum(["expert", "beginner"]), }); const agent = createAgent({ - model: "openai:gpt-4o", - tools: [...], - contextSchema, - middleware: [ - dynamicSystemPromptMiddleware>((state, runtime) => { - const userRole = runtime.context.userRole || "user"; - const basePrompt = "You are a helpful assistant."; - - if (userRole === "expert") { - return `${basePrompt} Provide detailed technical responses.`; - } else if (userRole === "beginner") { - return `${basePrompt} Explain concepts simply and avoid jargon.`; - } - return basePrompt; - }), - ], + model: "openai:gpt-4o", + tools: [ + /* ... */ + ], + contextSchema, + middleware: [ + dynamicSystemPromptMiddleware>( + (state, runtime) => { + const userRole = runtime.context.userRole || "user"; + const basePrompt = "You are a helpful assistant."; + + if (userRole === "expert") { + return `${basePrompt} Provide detailed technical responses.`; + } else if (userRole === "beginner") { + return `${basePrompt} Explain concepts simply and avoid jargon.`; + } + return basePrompt; + } + ), + ], }); // The system prompt will be set dynamically based on context const result = await agent.invoke( - { messages: [{ role: "user", content: "Explain async programming" }] }, - { context: { userRole: "expert" } } + { messages: [new HumanMessage("Explain async programming")] }, + { context: { userRole: "expert" } } ); ``` ::: diff --git a/src/oss/langchain/test.mdx b/src/oss/langchain/test.mdx index 3d5324efb..46aad03ef 100644 --- a/src/oss/langchain/test.mdx +++ b/src/oss/langchain/test.mdx @@ -170,9 +170,7 @@ def test_weather_tool_called_strict(): :::js ```ts highlight={25-27} -import { createAgent } from "langchain" -import { tool } from "@langchain/core/tools"; -import { HumanMessage, AIMessage, ToolMessage } from "@langchain/core/messages"; +import { createAgent, tool, HumanMessage, AIMessage, ToolMessage } from "langchain" import { createTrajectoryMatchEvaluator } from "agentevals"; import { z } from "zod"; @@ -190,7 +188,7 @@ const getWeather = tool( ); const agent = createAgent({ - llm: "openai:gpt-4o", + model: "openai:gpt-4o", tools: [getWeather] }); @@ -292,9 +290,7 @@ def test_multiple_tools_any_order(): :::js ```ts highlight={34-36} -import { createAgent } from "langchain" -import { tool } from "@langchain/core/tools"; -import { HumanMessage, AIMessage, ToolMessage } from "@langchain/core/messages"; +import { createAgent, tool, HumanMessage, AIMessage, ToolMessage } from "langchain" import { createTrajectoryMatchEvaluator } from "agentevals"; import { z } from "zod"; @@ -321,7 +317,7 @@ const getEvents = tool( ); const agent = createAgent({ - llm: "openai:gpt-4o", + model: "openai:gpt-4o", tools: [getWeather, getEvents] }); @@ -427,9 +423,7 @@ def test_agent_calls_required_tools_plus_extra(): :::js ```ts highlight={34-36} -import { createAgent } from "langchain" -import { tool } from "@langchain/core/tools"; -import { HumanMessage, AIMessage, ToolMessage } from "@langchain/core/messages"; +import { createAgent, tool, HumanMessage, AIMessage, ToolMessage } from "langchain" import { createTrajectoryMatchEvaluator } from "agentevals"; import { z } from "zod"; @@ -456,7 +450,7 @@ const getDetailedForecast = tool( ); const agent = createAgent({ - llm: "openai:gpt-4o", + model: "openai:gpt-4o", tools: [getWeather, getDetailedForecast] }); @@ -557,9 +551,7 @@ def test_trajectory_quality(): ::: :::js ```ts highlight={23-26} -import { createAgent } from "langchain" -import { tool } from "@langchain/core/tools"; -import { HumanMessage, AIMessage, ToolMessage } from "@langchain/core/messages"; +import { createAgent, tool, HumanMessage, AIMessage, ToolMessage } from "langchain" import { createTrajectoryLLMAsJudge, TRAJECTORY_ACCURACY_PROMPT } from "agentevals"; import { z } from "zod"; @@ -575,7 +567,7 @@ const getWeather = tool( ); const agent = createAgent({ - llm: "openai:gpt-4o", + model: "openai:gpt-4o", tools: [getWeather] }); @@ -780,6 +772,7 @@ LangSmith offers two main approaches for running evaluations: [Vitest/Jest](/lan ```ts import * as ls from "langsmith/vitest"; +// alternatively: // import * as ls from "langsmith/jest"; import { createTrajectoryLLMAsJudge, TRAJECTORY_ACCURACY_PROMPT } from "agentevals";