Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions src/oss/langchain/agents.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -513,8 +513,7 @@ result = agent.invoke(
:::js
```typescript wrap
import { z } from "zod";
import { createAgent } from "langchain";
import { dynamicSystemPromptMiddleware } from "langchain/middleware";
import { createAgent, dynamicSystemPromptMiddleware } from "langchain";

const contextSchema = z.object({
userRole: z.enum(["expert", "beginner"]),
Expand Down
3 changes: 1 addition & 2 deletions src/oss/langchain/mcp.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,6 @@ weather_response = await agent.ainvoke(

```ts Accessing multiple MCP servers {highlight={1,5,19,22}} icon="server"
import { MultiServerMCPClient } from "@langchain/mcp-adapters";
import { ChatAnthropic } from "@langchain/anthropic";
import { createAgent } from "langchain";

const client = new MultiServerMCPClient({
Expand All @@ -125,7 +124,7 @@ const client = new MultiServerMCPClient({

const tools = await client.getTools();
const agent = createAgent({
llm: new ChatAnthropic({ model: "claude-3-7-sonnet-latest" }),
model: "anthropic:claude-3-7-sonnet-latest",
tools,
});

Expand Down
191 changes: 110 additions & 81 deletions src/oss/langchain/middleware.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -118,8 +118,7 @@ agent = create_agent(
:::
:::js
```typescript
import { createAgent } from "langchain";
import { summarizationMiddleware, humanInTheLoopMiddleware } from "langchain/middleware";
import { createAgent, summarizationMiddleware, humanInTheLoopMiddleware } from "langchain";

const agent = createAgent({
// ...
Expand Down Expand Up @@ -190,8 +189,7 @@ agent = create_agent(

:::js
```typescript
import { createAgent } from "langchain";
import { summarizationMiddleware } from "langchain/middleware";
import { createAgent, summarizationMiddleware } from "langchain";
import { ChatOpenAI } from "@langchain/openai";

const agent = createAgent({
Expand Down Expand Up @@ -276,7 +274,7 @@ agent = create_agent(
tools=[write_file_tool, execute_sql_tool, read_data_tool],
middleware=[
HumanInTheLoopMiddleware(
tool_configs={
interrupt_on={
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have a different PR for HIL

"write_file": {
"allow_accept": True,
"allow_edit": True,
Expand All @@ -299,8 +297,7 @@ agent = create_agent(

:::js
```typescript
import { createAgent } from "langchain";
import { humanInTheLoopMiddleware } from "langchain/middleware";
import { createAgent, humanInTheLoopMiddleware } from "langchain";
import { Command, MemorySaver } from "@langchain/langgraph";

const agent = createAgent({
Expand Down Expand Up @@ -367,33 +364,29 @@ if state.next:

:::js
```typescript
import { HumanMessage } from "langchain";
import { HumanMessage, HumanInTheLoopRequest, Interrupt } from "langchain";
import { Command } from "@langchain/langgraph";
import type { Interrupt, HumanInTheLoopRequest } from "langchain/middleware";

// Initial invocation
const result = await agent.invoke(
{
messages: [new HumanMessage("Delete old records from the database")],
},
config
{
messages: [new HumanMessage("Delete old records from the database")],
},
config
);

// Check if agent is paused for approval
if (result.__interrupt__) {
const interruptRequest = result.__interrupt__?.[0] as Interrupt<
HumanInTheLoopRequest[]
>;

// Show tool call details to user
console.log("Tool:", interruptRequest.value[0].actionRequest);
console.log("Allowed actions:", interruptRequest.value[0].config);

// Resume with approval
await agent.invoke(
new Command({ resume: [{ type: "accept" }] }),
config
);
const interruptRequest = result.__interrupt__?.[0] as Interrupt<
HumanInTheLoopRequest[]
>;

// Show tool call details to user
console.log("Tool:", interruptRequest.value[0].actionRequest);
console.log("Allowed actions:", interruptRequest.value[0].config);

// Resume with approval
await agent.invoke(new Command({ resume: [{ type: "accept" }] }), config);
}
```
:::
Expand All @@ -412,7 +405,7 @@ from langgraph.types import Command

# Create the human-in-the-loop middleware
hitl_middleware = HumanInTheLoopMiddleware(
tool_configs={
interrupt_on={
"write_file": {
"allow_edit": True,
"allow_accept": True,
Expand Down Expand Up @@ -468,11 +461,18 @@ if state.next:

:::js
```typescript
import type { Interrupt } from "langchain";
import type { HumanInTheLoopRequest, HumanInTheLoopMiddlewareHumanResponse } from "langchain/middleware";
import {
createAgent,
humanInTheLoopMiddleware,
type HumanInTheLoopMiddlewareHumanResponse,
type HumanInTheLoopRequest,
HumanMessage,
type Interrupt,
} from "langchain";
import { Command, MemorySaver } from "@langchain/langgraph";

const hitlMiddleware = humanInTheLoopMiddleware({
toolConfigs: {
interruptOn: {
write_file: {
allowEdit: true,
allowAccept: true,
Expand All @@ -486,9 +486,9 @@ const checkpointer = new MemorySaver();
const agent = createAgent({
model: "openai:gpt-4o",
checkpointer,
prompt:
systemPrompt:
"You are a helpful assistant. Use the tools provided to help the user.",
tools: [writeFileTool],
tools: [/* ... */],
middleware: [hitlMiddleware] as const,
});

Expand All @@ -501,21 +501,27 @@ const config = {
// Initial invocation
const result = await agent.invoke(
{
messages: [new HumanMessage("Write to greeting.txt with the content 'Hello, world!'")],
messages: [
new HumanMessage(
"Write to greeting.txt with the content 'Hello, world!'"
),
],
},
config
);

const interruptRequest = result.__interrupt__?.[0] as Interrupt<
HumanInTheLoopRequest[]
>;
const resume: HumanInTheLoopMiddlewareHumanResponse[] = [{
type: "edit",
args: {
action: "write_file",
args: { filename: "greeting.txt", content: "Safe content" },
const resume: HumanInTheLoopMiddlewareHumanResponse[] = [
{
type: "edit",
args: {
action: "write_file",
args: { filename: "greeting.txt", content: "Safe content" },
},
},
}];
];

// Resume with approval
await agent.invoke(new Command({ resume }), config);
Expand All @@ -539,7 +545,7 @@ from langgraph.checkpoint.memory import InMemorySaver
from langgraph.types import Command

hitl_middleware = HumanInTheLoopMiddleware(
tool_configs={
interrupt_on={
"write_file": True,
}
)
Expand Down Expand Up @@ -587,11 +593,17 @@ if state.next:

:::js
```typescript
import type { Interrupt } from "langchain";
import type { HumanInTheLoopRequest, HumanInTheLoopMiddlewareHumanResponse } from "langchain/middleware";
import {
Interrupt,
HumanMessage,
HumanInTheLoopRequest,
humanInTheLoopMiddleware,
} from "langchain";
import { createAgent } from "langchain";
Copy link

Copilot AI Sep 28, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The createAgent import is duplicated. It's already imported on line 596-601 in the same import block. Remove this duplicate import statement.

Suggested change
import { createAgent } from "langchain";

Copilot uses AI. Check for mistakes.

import { Command, MemorySaver } from "@langchain/langgraph";

const hitlMiddleware = humanInTheLoopMiddleware({
toolConfigs: {
interruptOn: {
write_file: true,
},
});
Expand All @@ -600,9 +612,11 @@ const checkpointer = new MemorySaver();
const agent = createAgent({
model: "openai:gpt-4o",
checkpointer,
prompt:
systemPrompt:
"You are a helpful assistant. Use the tools provided to help the user.",
tools: [writeFileTool],
tools: [
/* ... */
],
middleware: [hitlMiddleware] as const,
});

Expand All @@ -612,21 +626,30 @@ const config = {
},
};

const result = await agent.invoke({ messages: [HumanMessage("Write to greeting.txt with the content 'Hello, world!'")] }, config);
const result = await agent.invoke(
{
messages: [
new HumanMessage(
"Write to greeting.txt with the content 'Hello, world!'"
),
],
},
config
);
Comment on lines +629 to +638
Copy link

Copilot AI Sep 28, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The new keyword is missing when creating the HumanMessage instance. This is inconsistent with other examples in the file and will cause a runtime error.

Copilot uses AI. Check for mistakes.


const interruptRequest = result.__interrupt__?.[0] as Interrupt<
HumanInTheLoopRequest[]
>;
const resume = (
const resume =
interruptRequest.value[0].actionRequest.action === "write_file" &&
interruptRequest.value[0].actionRequest.args.filename ===
"important.txt"
) ? {
type: "response",
args: "File operation not allowed for this file",
} : {
type: "accept",
};
interruptRequest.value[0].actionRequest.args.filename === "important.txt"
? {
type: "response",
args: "File operation not allowed for this file",
}
: {
type: "accept",
};

await agent.invoke(new Command({ resume: [resume] }), config);
```
Expand All @@ -641,15 +664,15 @@ await agent.invoke(new Command({ resume: [resume] }), config);
**Configuration:**

:::python
- `tool_configs`: Map of tool names to their approval settings, each with the following options:
- `interrupt_on`: Map of tool names to their approval settings, each with the following options:
- `allow_accept`: Whether the human can approve the current action without changes
- `allow_edit`: Whether the human can reject the current action with feedback
- `allow_respond`: Whether the human can approve the current action with edited content
- `description`: The description attached to the request for human input
- `description_prefix`: The prefix to use when constructing action requests.
:::
:::js
- `toolConfigs`: Map of tool names to their approval settings, each with the following options:
- `interruptOn`: Map of tool names to their approval settings, each with the following options:
- `allowAccept`: Whether the human can approve the current action without changes
- `allowEdit`: Whether the human can reject the current action with feedback
- `allowRespond`: Whether the human can approve the current action with edited content
Expand Down Expand Up @@ -700,8 +723,7 @@ agent.invoke({"messages": [HumanMessage("What's my name?")]})
:::
:::js
```typescript
import { createAgent, HumanMessage } from "langchain";
import { anthropicPromptCachingMiddleware } from "langchain/middleware";
import { anthropicPromptCachingMiddleware, createAgent, HumanMessage } from "langchain";

const LONG_PROMPT = `
Please be a helpful assistant.
Expand All @@ -711,7 +733,7 @@ Please be a helpful assistant.

const agent = createAgent({
model: "anthropic:claude-sonnet-4-latest",
prompt: LONG_PROMPT,
systemPrompt: LONG_PROMPT,
middleware: [anthropicPromptCachingMiddleware({ ttl: "5m" })],
});

Expand Down Expand Up @@ -780,36 +802,43 @@ For example, you can adjust the system prompt based on the user's expertise leve

```typescript
import { z } from "zod";
import { createAgent } from "langchain";
import { dynamicSystemPromptMiddleware } from "langchain/middleware";
import {
createAgent,
dynamicSystemPromptMiddleware,
HumanMessage,
} from "langchain";

const contextSchema = z.object({
userRole: z.enum(["expert", "beginner"]),
userRole: z.enum(["expert", "beginner"]),
});

const agent = createAgent({
model: "openai:gpt-4o",
tools: [...],
contextSchema,
middleware: [
dynamicSystemPromptMiddleware<z.infer<typeof contextSchema>>((state, runtime) => {
const userRole = runtime.context.userRole || "user";
const basePrompt = "You are a helpful assistant.";

if (userRole === "expert") {
return `${basePrompt} Provide detailed technical responses.`;
} else if (userRole === "beginner") {
return `${basePrompt} Explain concepts simply and avoid jargon.`;
}
return basePrompt;
}),
],
model: "openai:gpt-4o",
tools: [
/* ... */
],
contextSchema,
middleware: [
dynamicSystemPromptMiddleware<z.infer<typeof contextSchema>>(
(state, runtime) => {
const userRole = runtime.context.userRole || "user";
const basePrompt = "You are a helpful assistant.";

if (userRole === "expert") {
return `${basePrompt} Provide detailed technical responses.`;
} else if (userRole === "beginner") {
return `${basePrompt} Explain concepts simply and avoid jargon.`;
}
return basePrompt;
}
),
],
});

// The system prompt will be set dynamically based on context
const result = await agent.invoke(
{ messages: [{ role: "user", content: "Explain async programming" }] },
{ context: { userRole: "expert" } }
{ messages: [new HumanMessage("Explain async programming")] },
{ context: { userRole: "expert" } }
);
```
:::
Expand Down
Loading