diff --git a/api/package.json b/api/package.json
index 61a65429b77e..b0e4fcb14d09 100644
--- a/api/package.json
+++ b/api/package.json
@@ -48,7 +48,7 @@
"@librechat/api": "*",
"@librechat/data-schemas": "*",
"@microsoft/microsoft-graph-client": "^3.0.7",
- "@modelcontextprotocol/sdk": "^1.27.1",
+ "@modelcontextprotocol/sdk": "^1.29.0",
"@node-saml/passport-saml": "^5.1.0",
"@smithy/node-http-handler": "^4.4.5",
"ai-tokenizer": "^1.0.6",
diff --git a/api/server/controllers/agents/v1.js b/api/server/controllers/agents/v1.js
index e365b232e4cd..5bddb9aac360 100644
--- a/api/server/controllers/agents/v1.js
+++ b/api/server/controllers/agents/v1.js
@@ -10,7 +10,9 @@ const {
collectEdgeAgentIds,
mergeAgentOcrConversion,
MAX_AVATAR_REFRESH_AGENTS,
+ collectToolResourceFileIds,
convertOcrToContextInPlace,
+ stripFileIdsFromToolResources,
} = require('@librechat/api');
const {
Time,
@@ -387,6 +389,38 @@ const updateAgentHandler = async (req, res) => {
updateData.tools = ocrConversion.tools;
}
+ /*
+ * Strip orphaned file_id stubs from the incoming payload (see issue #12776).
+ * Scoped to updates that actually touch tool_resources: if the save does not
+ * modify that field, the delete-time cleanup in processDeleteRequest and the
+ * one-off migration already cover pre-existing corruption, so there's no
+ * reason to pay an extra DB round-trip here. Wrapped in try/catch so a
+ * transient failure in this integrity check never turns a good save into 500.
+ */
+ if (updateData.tool_resources) {
+ try {
+ const referencedFileIds = collectToolResourceFileIds(updateData.tool_resources);
+ if (referencedFileIds.length > 0) {
+ const existingFiles = await db.getFiles({ file_id: { $in: referencedFileIds } }, null, {
+ file_id: 1,
+ });
+ const existingIds = new Set((existingFiles ?? []).map((f) => f.file_id));
+ const orphans = referencedFileIds.filter((id) => !existingIds.has(id));
+ if (orphans.length > 0) {
+ logger.warn(
+ `[/Agents/:id] Pruning ${orphans.length} orphaned file reference(s) from agent ${id}`,
+ );
+ stripFileIdsFromToolResources(updateData.tool_resources, orphans);
+ }
+ }
+ } catch (orphanCheckError) {
+ logger.warn(
+ '[/Agents/:id] Orphan file check failed, skipping cleanup for this request',
+ orphanCheckError,
+ );
+ }
+ }
+
if (updateData.tools) {
const existingToolSet = new Set(existingAgent.tools ?? []);
const newMCPTools = updateData.tools.filter(
diff --git a/api/server/controllers/agents/v1.spec.js b/api/server/controllers/agents/v1.spec.js
index 455cea2e7c49..d191822add99 100644
--- a/api/server/controllers/agents/v1.spec.js
+++ b/api/server/controllers/agents/v1.spec.js
@@ -1,7 +1,7 @@
const mongoose = require('mongoose');
const { nanoid } = require('nanoid');
const { v4: uuidv4 } = require('uuid');
-const { agentSchema } = require('@librechat/data-schemas');
+const { agentSchema, fileSchema } = require('@librechat/data-schemas');
const { FileSources, PermissionBits } = require('librechat-data-provider');
const { MongoMemoryServer } = require('mongodb-memory-server');
@@ -99,6 +99,9 @@ describe('Agent Controllers - Mass Assignment Protection', () => {
const mongoUri = mongoServer.getUri();
await mongoose.connect(mongoUri);
Agent = mongoose.models.Agent || mongoose.model('Agent', agentSchema);
+ // Register File so orphan-pruning tests (and the tool_resources validation
+ // test, which now needs real File docs for its ids) have a working model.
+ mongoose.models.File || mongoose.model('File', fileSchema);
}, 20000);
afterAll(async () => {
@@ -542,6 +545,23 @@ describe('Agent Controllers - Mass Assignment Protection', () => {
});
test('should validate tool_resources in updates', async () => {
+ // Back these ids with real File docs so the orphan-pruning added for
+ // issue #12776 does not strip them — this test is about OCR conversion
+ // and schema filtering, not file existence.
+ const File = mongoose.models.File;
+ for (const id of ['ocr1', 'ocr2', 'img1']) {
+ await File.create({
+ file_id: id,
+ user: existingAgentAuthorId,
+ filename: `${id}.txt`,
+ filepath: `/tmp/${id}`,
+ object: 'file',
+ type: 'text/plain',
+ bytes: 1,
+ source: FileSources.local,
+ });
+ }
+
mockReq.user.id = existingAgentAuthorId.toString();
mockReq.params.id = existingAgentId;
mockReq.body = {
@@ -729,6 +749,93 @@ describe('Agent Controllers - Mass Assignment Protection', () => {
}),
);
});
+
+ describe('orphan file_id pruning (issue #12776)', () => {
+ const File = () => mongoose.models.File;
+
+ const createFileDoc = async (file_id, userId) =>
+ File().create({
+ file_id,
+ user: userId,
+ filename: `${file_id}.txt`,
+ filepath: `/tmp/${file_id}`,
+ object: 'file',
+ type: 'text/plain',
+ bytes: 1,
+ source: FileSources.local,
+ });
+
+ beforeEach(async () => {
+ await File().deleteMany({});
+ });
+
+ test('strips orphan file_ids from incoming tool_resources before persisting', async () => {
+ const keeper = `file_${uuidv4()}`;
+ const orphan = `file_${uuidv4()}`;
+ await createFileDoc(keeper, existingAgentAuthorId);
+
+ mockReq.user.id = existingAgentAuthorId.toString();
+ mockReq.params.id = existingAgentId;
+ mockReq.body = {
+ tool_resources: {
+ file_search: { file_ids: [keeper, orphan] },
+ },
+ };
+
+ await updateAgentHandler(mockReq, mockRes);
+
+ const agentInDb = await Agent.findOne({ id: existingAgentId }).lean();
+ expect(agentInDb.tool_resources.file_search.file_ids).toEqual([keeper]);
+ });
+
+ test('leaves tool_resources alone when the update omits it', async () => {
+ const orphan = `file_${uuidv4()}`;
+ await Agent.updateOne(
+ { id: existingAgentId },
+ { $set: { tool_resources: { file_search: { file_ids: [orphan] } } } },
+ );
+
+ mockReq.user.id = existingAgentAuthorId.toString();
+ mockReq.params.id = existingAgentId;
+ mockReq.body = { name: 'Unrelated Rename' };
+
+ await updateAgentHandler(mockReq, mockRes);
+
+ const agentInDb = await Agent.findOne({ id: existingAgentId }).lean();
+ expect(agentInDb.name).toBe('Unrelated Rename');
+ // Save-time pruning is intentionally scoped to tool_resources updates.
+ // The delete-time fix and migration script cover the untouched case.
+ expect(agentInDb.tool_resources.file_search.file_ids).toEqual([orphan]);
+ });
+
+ test('swallows errors from the file-existence check and still completes the save', async () => {
+ const db = require('~/models');
+ const originalGetFiles = db.getFiles;
+ db.getFiles = jest.fn().mockRejectedValue(new Error('transient DB error'));
+
+ const orphan = `file_${uuidv4()}`;
+ mockReq.user.id = existingAgentAuthorId.toString();
+ mockReq.params.id = existingAgentId;
+ mockReq.body = {
+ name: 'Save Succeeds',
+ tool_resources: { file_search: { file_ids: [orphan] } },
+ };
+
+ try {
+ await updateAgentHandler(mockReq, mockRes);
+
+ expect(mockRes.status).not.toHaveBeenCalledWith(500);
+ expect(mockRes.json).toHaveBeenCalled();
+ const agentInDb = await Agent.findOne({ id: existingAgentId }).lean();
+ expect(agentInDb.name).toBe('Save Succeeds');
+ // Cleanup skipped on error, so the id remains — the delete-time path
+ // or the next successful save will reconcile it.
+ expect(agentInDb.tool_resources.file_search.file_ids).toEqual([orphan]);
+ } finally {
+ db.getFiles = originalGetFiles;
+ }
+ });
+ });
});
describe('Mass Assignment Attack Scenarios', () => {
diff --git a/api/server/services/Files/process.integration.spec.js b/api/server/services/Files/process.integration.spec.js
new file mode 100644
index 000000000000..ee47e0d10954
--- /dev/null
+++ b/api/server/services/Files/process.integration.spec.js
@@ -0,0 +1,203 @@
+/**
+ * Integration test for the delete-time path of issue #12776.
+ *
+ * Covers the full flow through `processDeleteRequest`:
+ * 1. Real Agent + File docs in an in-memory Mongo.
+ * 2. Invoke the delete service.
+ * 3. Assert both the File record is gone and every agent's
+ * tool_resources.*.file_ids no longer references the deleted id.
+ *
+ * Uses FileSources.text so the strategy layer (disk / S3 / OpenAI) is a
+ * no-op — we don't need real filesystem access to exercise the agent
+ * reference cleanup, which is what issue #12776 is about.
+ */
+
+const mongoose = require('mongoose');
+const { MongoMemoryServer } = require('mongodb-memory-server');
+const { agentSchema, fileSchema, createMethods } = require('@librechat/data-schemas');
+const { FileSources } = require('librechat-data-provider');
+
+jest.mock('@librechat/data-schemas', () => {
+ const actual = jest.requireActual('@librechat/data-schemas');
+ return {
+ ...actual,
+ logger: { warn: jest.fn(), debug: jest.fn(), error: jest.fn(), info: jest.fn() },
+ };
+});
+
+jest.mock('@librechat/agents', () => ({
+ EnvVar: { CODE_API_KEY: 'CODE_API_KEY' },
+}));
+
+jest.mock('@librechat/api', () => ({
+ sanitizeFilename: jest.fn((n) => n),
+ parseText: jest.fn().mockResolvedValue({ text: '', bytes: 0 }),
+ processAudioFile: jest.fn(),
+}));
+
+jest.mock('~/server/controllers/assistants/v2', () => ({
+ addResourceFileId: jest.fn(),
+ deleteResourceFileId: jest.fn(),
+}));
+
+jest.mock('~/server/controllers/assistants/helpers', () => ({
+ getOpenAIClient: jest.fn(),
+}));
+
+jest.mock('~/server/services/Tools/credentials', () => ({
+ loadAuthValues: jest.fn(),
+}));
+
+jest.mock('~/server/services/Files/strategies', () => ({
+ getStrategyFunctions: jest.fn(() => ({ deleteFile: jest.fn().mockResolvedValue(undefined) })),
+}));
+
+jest.mock('~/server/services/Files/Audio/STTService', () => ({
+ STTService: { getInstance: jest.fn() },
+}));
+
+jest.mock('~/server/services/Config', () => ({
+ checkCapability: jest.fn().mockResolvedValue(true),
+}));
+
+jest.mock('~/cache', () => ({
+ getLogStores: jest.fn(() => ({ get: jest.fn(), set: jest.fn(), delete: jest.fn() })),
+}));
+
+// Replace the mocked `~/models` from the sibling process.spec.js with real,
+// mongoose-backed methods. All our in-memory models share this module.
+jest.mock('~/models', () => {
+ const mongoose = require('mongoose');
+ const { createMethods } = require('@librechat/data-schemas');
+ return createMethods(mongoose, {
+ removeAllPermissions: jest.fn().mockResolvedValue(undefined),
+ });
+});
+
+require('module-alias/register');
+const { processDeleteRequest } = require('./process');
+
+describe('processDeleteRequest — agent reference cleanup (issue #12776)', () => {
+ let mongoServer;
+ let Agent;
+ let File;
+
+ beforeAll(async () => {
+ mongoServer = await MongoMemoryServer.create();
+ await mongoose.connect(mongoServer.getUri());
+
+ // createMethods (via ~/models) registers the File model as a side-effect,
+ // but we also need the Agent model registered before any queries run.
+ Agent = mongoose.models.Agent || mongoose.model('Agent', agentSchema);
+ File = mongoose.models.File || mongoose.model('File', fileSchema);
+ // Touch createMethods once so the migration/setup side-effects run.
+ createMethods(mongoose, { removeAllPermissions: jest.fn() });
+ }, 30000);
+
+ afterAll(async () => {
+ await mongoose.disconnect();
+ await mongoServer.stop();
+ });
+
+ beforeEach(async () => {
+ await Agent.deleteMany({});
+ await File.deleteMany({});
+ });
+
+ const seedFile = async (file_id, userId) =>
+ File.create({
+ file_id,
+ user: userId,
+ filename: `${file_id}.txt`,
+ filepath: `/tmp/${file_id}`,
+ object: 'file',
+ type: 'text/plain',
+ bytes: 1,
+ source: FileSources.text,
+ });
+
+ const seedAgent = async (authorId, tool_resources) =>
+ Agent.create({
+ id: `agent_${Math.random().toString(36).slice(2, 10)}`,
+ name: 'Integration Test Agent',
+ provider: 'test',
+ model: 'test-model',
+ author: authorId,
+ tool_resources,
+ });
+
+ const buildReq = (fileDocs, extraBody = {}) => ({
+ user: { id: fileDocs[0].user.toString() },
+ body: { files: fileDocs, ...extraBody },
+ config: { fileStrategy: 'local', fileConfig: {}, endpoints: {} },
+ });
+
+ test('strips deleted file_ids from every agent that referenced them', async () => {
+ const userId = new mongoose.Types.ObjectId();
+ const keeperId = `file_keeper_${Math.random().toString(36).slice(2, 10)}`;
+ const deletedId = `file_deleted_${Math.random().toString(36).slice(2, 10)}`;
+
+ const deletedFile = await seedFile(deletedId, userId);
+ await seedFile(keeperId, userId);
+
+ // Two agents both reference the file that's about to be deleted, plus the
+ // keeper. A third, unrelated agent has a different file_id and must not be
+ // touched by the cleanup.
+ const agentA = await seedAgent(userId, {
+ file_search: { file_ids: [deletedId, keeperId] },
+ });
+ const agentB = await seedAgent(userId, {
+ execute_code: { file_ids: [deletedId] },
+ });
+ const untouchedAgent = await seedAgent(userId, {
+ context: { file_ids: [keeperId] },
+ });
+
+ await processDeleteRequest({ req: buildReq([deletedFile.toObject()]), files: [deletedFile] });
+
+ expect(await File.findOne({ file_id: deletedId })).toBeNull();
+ expect(await File.findOne({ file_id: keeperId })).not.toBeNull();
+
+ const updatedA = await Agent.findOne({ id: agentA.id }).lean();
+ const updatedB = await Agent.findOne({ id: agentB.id }).lean();
+ const updatedUntouched = await Agent.findOne({ id: untouchedAgent.id }).lean();
+
+ expect(updatedA.tool_resources.file_search.file_ids).toEqual([keeperId]);
+ expect(updatedB.tool_resources.execute_code.file_ids).toEqual([]);
+ expect(updatedUntouched.tool_resources.context.file_ids).toEqual([keeperId]);
+ });
+
+ test('is a no-op when no agent references the deleted file', async () => {
+ const userId = new mongoose.Types.ObjectId();
+ const loneId = `file_lone_${Math.random().toString(36).slice(2, 10)}`;
+ const loneFile = await seedFile(loneId, userId);
+ const unrelatedAgent = await seedAgent(userId, {
+ file_search: { file_ids: ['other_id'] },
+ });
+
+ await processDeleteRequest({ req: buildReq([loneFile.toObject()]), files: [loneFile] });
+
+ expect(await File.findOne({ file_id: loneId })).toBeNull();
+ const after = await Agent.findOne({ id: unrelatedAgent.id }).lean();
+ expect(after.tool_resources.file_search.file_ids).toEqual(['other_id']);
+ });
+
+ test('still deletes the file when the agent cleanup step throws', async () => {
+ const userId = new mongoose.Types.ObjectId();
+ const targetId = `file_target_${Math.random().toString(36).slice(2, 10)}`;
+ const targetFile = await seedFile(targetId, userId);
+
+ const db = require('~/models');
+ const original = db.removeAgentResourceFilesFromAllAgents;
+ db.removeAgentResourceFilesFromAllAgents = jest
+ .fn()
+ .mockRejectedValue(new Error('simulated cleanup failure'));
+
+ try {
+ await processDeleteRequest({ req: buildReq([targetFile.toObject()]), files: [targetFile] });
+ expect(await File.findOne({ file_id: targetId })).toBeNull();
+ } finally {
+ db.removeAgentResourceFilesFromAllAgents = original;
+ }
+ });
+});
diff --git a/api/server/services/Files/process.js b/api/server/services/Files/process.js
index f7d7731975c4..f9891483d41f 100644
--- a/api/server/services/Files/process.js
+++ b/api/server/services/Files/process.js
@@ -219,6 +219,14 @@ const processDeleteRequest = async ({ req, files }) => {
await Promise.allSettled(promises);
await db.deleteFiles(resolvedFileIds);
+
+ if (resolvedFileIds.length > 0) {
+ try {
+ await db.removeAgentResourceFilesFromAllAgents({ file_ids: resolvedFileIds });
+ } catch (error) {
+ logger.error('Error cleaning up orphaned agent file references', error);
+ }
+ }
};
/**
diff --git a/api/test/migrate-orphaned-agent-files.spec.js b/api/test/migrate-orphaned-agent-files.spec.js
new file mode 100644
index 000000000000..f8333ecbf44b
--- /dev/null
+++ b/api/test/migrate-orphaned-agent-files.spec.js
@@ -0,0 +1,184 @@
+/**
+ * Integration test for the orphan-cleanup migration script used to heal
+ * agents corrupted before the delete-time and save-time fixes for issue
+ * #12776 shipped. Exercises the full module end-to-end:
+ * - dry-run reports orphans without writing
+ * - apply mode removes them
+ * - re-running on a cleaned database is a no-op (idempotent)
+ * - the DETAIL_SAMPLE_LIMIT truncation kicks in on wide corruption
+ */
+
+// Replace the migration's `./connect` helper — it opens its own connection
+// via the mongo URI env var, but the test already owns the mongoose instance.
+jest.mock('../../config/connect', () => jest.fn(async () => undefined));
+
+jest.mock('@librechat/data-schemas', () => {
+ const actual = jest.requireActual('@librechat/data-schemas');
+ return {
+ ...actual,
+ logger: { warn: jest.fn(), debug: jest.fn(), error: jest.fn(), info: jest.fn() },
+ };
+});
+
+const mongoose = require('mongoose');
+const { MongoMemoryServer } = require('mongodb-memory-server');
+const { agentSchema, fileSchema } = require('@librechat/data-schemas');
+const { FileSources } = require('librechat-data-provider');
+
+const { migrateOrphanedAgentFiles } = require('../../config/migrate-orphaned-agent-files');
+
+describe('migrate-orphaned-agent-files (issue #12776)', () => {
+ let mongoServer;
+ let Agent;
+ let File;
+ const userId = () => new mongoose.Types.ObjectId();
+
+ beforeAll(async () => {
+ mongoServer = await MongoMemoryServer.create();
+ await mongoose.connect(mongoServer.getUri());
+ Agent = mongoose.models.Agent || mongoose.model('Agent', agentSchema);
+ File = mongoose.models.File || mongoose.model('File', fileSchema);
+ }, 30000);
+
+ afterAll(async () => {
+ await mongoose.disconnect();
+ await mongoServer.stop();
+ });
+
+ beforeEach(async () => {
+ await Agent.deleteMany({});
+ await File.deleteMany({});
+ });
+
+ const seedFile = (file_id) =>
+ File.create({
+ file_id,
+ user: userId(),
+ filename: `${file_id}.txt`,
+ filepath: `/tmp/${file_id}`,
+ object: 'file',
+ type: 'text/plain',
+ bytes: 1,
+ source: FileSources.text,
+ });
+
+ const seedAgent = (tool_resources) =>
+ Agent.create({
+ id: `agent_${Math.random().toString(36).slice(2, 10)}`,
+ name: `Test Agent ${Math.random().toString(36).slice(2, 6)}`,
+ provider: 'test',
+ model: 'test-model',
+ author: userId(),
+ tool_resources,
+ });
+
+ test('dry-run reports orphans without mutating any agent', async () => {
+ const keeperId = 'keeper';
+ await seedFile(keeperId);
+ const agent = await seedAgent({
+ file_search: { file_ids: [keeperId, 'orphan_1', 'orphan_2'] },
+ });
+
+ const result = await migrateOrphanedAgentFiles({ dryRun: true });
+
+ expect(result.dryRun).toBe(true);
+ expect(result.scannedAgents).toBe(1);
+ expect(result.agentsWithOrphans).toBe(1);
+ expect(result.totalOrphansRemoved).toBe(2);
+ // Dry-run reports what would change without writing — no updates counted.
+ expect(result.agentsUpdated).toBe(0);
+
+ const after = await Agent.findOne({ id: agent.id }).lean();
+ expect(after.tool_resources.file_search.file_ids).toEqual([keeperId, 'orphan_1', 'orphan_2']);
+ });
+
+ test('apply mode removes orphans across every tool_resource category', async () => {
+ const keeperA = 'k_a';
+ const keeperB = 'k_b';
+ await seedFile(keeperA);
+ await seedFile(keeperB);
+
+ const agent = await seedAgent({
+ file_search: { file_ids: [keeperA, 'o1'] },
+ execute_code: { file_ids: ['o2', keeperB] },
+ context: { file_ids: ['o3'] },
+ });
+
+ const result = await migrateOrphanedAgentFiles({ dryRun: false });
+
+ expect(result.dryRun).toBe(false);
+ expect(result.agentsWithOrphans).toBe(1);
+ expect(result.agentsUpdated).toBe(1);
+ expect(result.totalOrphansRemoved).toBe(3);
+
+ const after = await Agent.findOne({ id: agent.id }).lean();
+ expect(after.tool_resources.file_search.file_ids).toEqual([keeperA]);
+ expect(after.tool_resources.execute_code.file_ids).toEqual([keeperB]);
+ expect(after.tool_resources.context.file_ids).toEqual([]);
+ });
+
+ test('is idempotent — re-running on a clean database is a no-op', async () => {
+ await seedFile('keeper');
+ await seedAgent({ file_search: { file_ids: ['keeper', 'orphan'] } });
+
+ await migrateOrphanedAgentFiles({ dryRun: false });
+ const second = await migrateOrphanedAgentFiles({ dryRun: false });
+
+ expect(second.agentsWithOrphans).toBe(0);
+ expect(second.agentsUpdated).toBe(0);
+ expect(second.totalOrphansRemoved).toBe(0);
+ });
+
+ test('leaves agents without orphans completely alone', async () => {
+ await seedFile('only');
+ const agent = await seedAgent({ file_search: { file_ids: ['only'] } });
+
+ const result = await migrateOrphanedAgentFiles({ dryRun: false });
+
+ expect(result.scannedAgents).toBe(1);
+ expect(result.agentsWithOrphans).toBe(0);
+ const after = await Agent.findOne({ id: agent.id }).lean();
+ expect(after.tool_resources.file_search.file_ids).toEqual(['only']);
+ });
+
+ test('sample array is bounded on wide corruption (DETAIL_SAMPLE_LIMIT)', async () => {
+ // Seed more than the cap (50) so the truncation branch is exercised.
+ const agents = [];
+ for (let i = 0; i < 55; i++) {
+ agents.push(
+ await seedAgent({
+ file_search: { file_ids: [`orphan_${i}`] },
+ }),
+ );
+ }
+
+ const result = await migrateOrphanedAgentFiles({ dryRun: true });
+
+ expect(result.agentsWithOrphans).toBe(55);
+ expect(result.details.length).toBeLessThanOrEqual(50);
+ expect(result.details.length).toBeGreaterThan(0);
+ });
+
+ test('runs the body inside a system tenant context (strict-mode safe)', async () => {
+ // Pins the runAsSystem wrap: without it the migration throws under
+ // TENANT_ISOLATION_STRICT=true on the very first Agent.countDocuments(),
+ // blocking the intended remediation path for corrupted agents.
+ const { SYSTEM_TENANT_ID, tenantStorage } = require('@librechat/data-schemas');
+ await seedFile('keeper');
+ await seedAgent({ file_search: { file_ids: ['keeper', 'orphan'] } });
+
+ const contextsObserved = [];
+ const originalCountDocuments = Agent.countDocuments.bind(Agent);
+ Agent.countDocuments = jest.fn((...args) => {
+ contextsObserved.push(tenantStorage.getStore()?.tenantId);
+ return originalCountDocuments(...args);
+ });
+
+ try {
+ await migrateOrphanedAgentFiles({ dryRun: false });
+ expect(contextsObserved).toContain(SYSTEM_TENANT_ID);
+ } finally {
+ Agent.countDocuments = originalCountDocuments;
+ }
+ });
+});
diff --git a/client/src/components/MCP/CustomUserVarsSection.tsx b/client/src/components/MCP/CustomUserVarsSection.tsx
index 339b78f6b972..a986a2786f34 100644
--- a/client/src/components/MCP/CustomUserVarsSection.tsx
+++ b/client/src/components/MCP/CustomUserVarsSection.tsx
@@ -85,7 +85,12 @@ function AuthField({ name, config, hasValue, control, errors, autoFocus }: AuthF
render={({ field }) => (
({
+ useMCPAuthValuesQuery: () => ({ data: { authValueFlags: {} } }),
+}));
+
+jest.mock('~/hooks', () => ({
+ useLocalize: () => (key: string) => key,
+}));
+
+describe('CustomUserVarsSection', () => {
+ const fields = {
+ api_key: { title: 'My API Key', description: 'Your API key' },
+ };
+
+ it('renders autofill-prevention attributes on credential inputs', () => {
+ render(
+ ,
+ );
+
+ const input = screen.getByLabelText(/My API Key/);
+ expect(input).toHaveAttribute('autocomplete', 'new-password');
+ expect(input).toHaveAttribute('type', 'new-password');
+ expect(input).toHaveAttribute('data-lpignore', 'true');
+ expect(input).toHaveAttribute('data-1p-ignore', 'true');
+ });
+});
diff --git a/client/src/locales/en/translation.json b/client/src/locales/en/translation.json
index 653e920bf0bf..74c668038b1b 100644
--- a/client/src/locales/en/translation.json
+++ b/client/src/locales/en/translation.json
@@ -226,13 +226,13 @@
"com_endpoint_agent_placeholder": "Please select an Agent",
"com_endpoint_ai": "AI",
"com_endpoint_anthropic_effort": "Controls how much computational effort Claude applies. Lower effort saves tokens and reduces latency; higher effort produces more thorough responses. 'Max' enables the deepest reasoning (Opus 4.6 only).",
- "com_endpoint_anthropic_thinking_display": "Thought Visibility",
- "com_endpoint_anthropic_thinking_display_desc": "Controls whether Claude's reasoning is returned. 'Auto' opts in to summarized thoughts for models that hide them by default (Opus 4.7+); 'Summarized' always shows them; 'Omitted' always hides them for slightly lower latency.",
"com_endpoint_anthropic_maxoutputtokens": "Maximum number of tokens that can be generated in the response. Specify a lower value for shorter responses and a higher value for longer responses. Note: models may stop before reaching this maximum.",
"com_endpoint_anthropic_prompt_cache": "Prompt caching allows reusing large context or instructions across API calls, reducing costs and latency",
"com_endpoint_anthropic_temp": "Ranges from 0 to 1. Use temp closer to 0 for analytical / multiple choice, and closer to 1 for creative and generative tasks. We recommend altering this or Top P but not both.",
"com_endpoint_anthropic_thinking": "Enables internal reasoning for supported Claude models. For newer models (Opus 4.6+), uses adaptive thinking controlled by the Effort parameter. For legacy models, requires \"Thinking Budget\" to be set and lower than \"Max Output Tokens\".",
"com_endpoint_anthropic_thinking_budget": "Determines the max number of tokens Claude is allowed use for its internal reasoning process. Larger budgets can improve response quality by enabling more thorough analysis for complex problems, although Claude may not use the entire budget allocated, especially at ranges above 32K. This setting must be lower than \"Max Output Tokens.\"",
+ "com_endpoint_anthropic_thinking_display": "Thought Visibility",
+ "com_endpoint_anthropic_thinking_display_desc": "Controls whether Claude's reasoning is returned. 'Auto' opts in to summarized thoughts for models that hide them by default (Opus 4.7+); 'Summarized' always shows them; 'Omitted' always hides them for slightly lower latency.",
"com_endpoint_anthropic_topk": "Top-k changes how the model selects tokens for output. A top-k of 1 means the selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding), while a top-k of 3 means that the next token is selected from among the 3 most probable tokens (using temperature).",
"com_endpoint_anthropic_topp": "Top-p changes how the model selects tokens for output. Tokens are selected from most K (see topK parameter) probable to least until the sum of their probabilities equals the top-p value.",
"com_endpoint_anthropic_use_web_search": "Enable web search functionality using Anthropic's built-in search capabilities. This allows the model to search the web for up-to-date information and provide more accurate, current responses.",
@@ -1247,13 +1247,13 @@
"com_ui_of": "of",
"com_ui_off": "Off",
"com_ui_offline": "Offline",
+ "com_ui_omitted": "Omitted",
"com_ui_on": "On",
"com_ui_open_archived_chat_new_tab_title": "{{title}} (opens in new tab)",
"com_ui_open_source_chat_new_tab": "Open Source Chat in New Tab",
"com_ui_open_source_chat_new_tab_title": "Open Source Chat in New Tab - {{title}}",
"com_ui_open_var": "Open {{0}}",
"com_ui_openai": "OpenAI",
- "com_ui_omitted": "Omitted",
"com_ui_optional": "(optional)",
"com_ui_options": "options",
"com_ui_output": "Output",
diff --git a/client/src/locales/lv/translation.json b/client/src/locales/lv/translation.json
index 31e79cda8caa..402852e8e70a 100644
--- a/client/src/locales/lv/translation.json
+++ b/client/src/locales/lv/translation.json
@@ -231,6 +231,8 @@
"com_endpoint_anthropic_temp": "Diapazons no 0 līdz 1. Analītiskiem/atbilžu variantiem izmantot temp vērtību tuvāk 0, bet radošiem un ģeneratīviem uzdevumiem — tuvāk 1. Iesakām mainīt šo vai Top P, bet ne abus.",
"com_endpoint_anthropic_thinking": "Iespējo iekšējo spriešanu atbalstītajiem Claude modeļiem (3.7 Sonnet). Piezīme: nepieciešams iestatīt \"Domāšanas budžetu\", kam arī jābūt zemākam par \"Max Output Tokens\".",
"com_endpoint_anthropic_thinking_budget": "Nosaka maksimālo žetonu skaitu, ko Claude drīkst izmantot savā iekšējā spriešanas procesā. Lielāki budžeti var uzlabot atbilžu kvalitāti, nodrošinot rūpīgāku analīzi sarežģītām problēmām, lai gan Claude var neizmantot visu piešķirto budžetu, īpaši diapazonos virs 32 000. Šim iestatījumam jābūt zemākam par \"Maksimālie izvades tokeni\".",
+ "com_endpoint_anthropic_thinking_display": "Domu redzamība",
+ "com_endpoint_anthropic_thinking_display_desc": "Kontrolē, vai Claude argumentācija tiek atgriezta. \"Automātiski\" izvēlas apkopot domas par modeļiem, kas tās slēpj pēc noklusējuma (Opus 4.7+); \"Apkopots\" vienmēr tos parāda; \"Izlaists\" vienmēr slēpj tos, lai nedaudz samazinātu latenci.",
"com_endpoint_anthropic_topk": "Top-k maina to, kā modelis atlasa marķierus izvadei. Ja top-k ir 1, tas nozīmē, ka atlasītais marķieris ir visticamākais starp visiem modeļa vārdu krājumā esošajiem marķieriem (to sauc arī par alkatīgo dekodēšanu), savukārt, ja top-k ir 3, tas nozīmē, ka nākamais marķieris tiek izvēlēts no 3 visticamākajiem marķieriem (izmantojot temperatūru).",
"com_endpoint_anthropic_topp": "`Top-p` maina to, kā modelis atlasa marķierus izvadei. Marķieri tiek atlasīti no K (skatīt parametru topK) ticamākās līdz vismazāk ticamajai, līdz to varbūtību summa ir vienāda ar `top-p` vērtību.",
"com_endpoint_anthropic_use_web_search": "Iespējojiet meklēšanu tīmeklī funkcionalitāti, izmantojot Anthropic iebūvētās meklēšanas iespējas. Tas ļauj modelim meklēt tīmeklī jaunāko informāciju un sniegt precīzākas un aktuālākas atbildes.",
@@ -564,6 +566,7 @@
"com_nav_mcp_vars_updated": "MCP pielāgotie lietotāja mainīgie ir veiksmīgi atjaunināti.",
"com_nav_modular_chat": "Iespējot galapunktu pārslēgšanu sarunas laikā",
"com_nav_my_files": "Mani faili",
+ "com_nav_new_chat_switch_to_history": "Pārslēgties uz sarunu vēsturi ar jaunu sarunu",
"com_nav_not_supported": "Nav atbalstīts",
"com_nav_open_sidebar": "Atvērt sānu joslu",
"com_nav_playback_rate": "Audio atskaņošanas ātrums",
@@ -1246,6 +1249,7 @@
"com_ui_of": "no",
"com_ui_off": "Izslēgts",
"com_ui_offline": "Bezsaistē",
+ "com_ui_omitted": "Izlaists",
"com_ui_on": "Ieslēgts",
"com_ui_open_archived_chat_new_tab_title": "{{title}} (atveras jaunā cilnē)",
"com_ui_open_source_chat_new_tab": "Atvērtā koda saruna jaunā cilnē",
@@ -1452,6 +1456,7 @@
"com_ui_storage": "Uzglabāšana",
"com_ui_storage_filter_sort": "Filtrēt un kārtot pēc datu krātuves",
"com_ui_submit": "Nosūtīt",
+ "com_ui_summarized": "Apkopojums",
"com_ui_summarizing": "Apkopo...",
"com_ui_support_contact": "Atbalsta kontaktinformācija",
"com_ui_support_contact_email": "E-pasts",
diff --git a/config/migrate-orphaned-agent-files.js b/config/migrate-orphaned-agent-files.js
new file mode 100644
index 000000000000..e074ff81544a
--- /dev/null
+++ b/config/migrate-orphaned-agent-files.js
@@ -0,0 +1,160 @@
+const path = require('path');
+const { logger, runAsSystem } = require('@librechat/data-schemas');
+const { TOOL_RESOURCE_KEYS, collectToolResourceFileIds } = require('@librechat/api');
+
+require('module-alias')({ base: path.resolve(__dirname, '..', 'api') });
+const connect = require('./connect');
+
+const { Agent, File } = require('~/db/models');
+
+/**
+ * Cap on the number of per-agent entries we retain in `results.details`. Larger
+ * runs still update every affected agent and still report accurate aggregate
+ * counts — we just stop accumulating sample data past this threshold to keep
+ * memory bounded on deployments with thousands of corrupted agents.
+ */
+const DETAIL_SAMPLE_LIMIT = 50;
+
+/**
+ * Cleans up orphaned file_id references from agent `tool_resources` — that is,
+ * file_ids that remain on an agent after the underlying File document has
+ * already been deleted (see issue #12776). These stubs otherwise accumulate and
+ * eventually block new uploads with "Duplicate file detected."
+ *
+ * Safe to re-run — if there are no orphans, nothing is written.
+ *
+ * @param {{ dryRun?: boolean, batchSize?: number }} [options]
+ */
+async function migrateOrphanedAgentFiles({ dryRun = true, batchSize = 100 } = {}) {
+ await connect();
+
+ logger.info('Starting Orphaned Agent Files Migration', { dryRun, batchSize });
+
+ /*
+ * Scan and heal across every tenant. Without this wrapper the tenant
+ * isolation plugin either scopes queries to a (non-existent) tenant or
+ * throws under TENANT_ISOLATION_STRICT=true, making the script unusable
+ * as the intended remediation path for corrupted agents.
+ */
+ return runAsSystem(async () => {
+ const totalAgents = await Agent.countDocuments();
+ logger.info(`Scanning ${totalAgents} agent(s) for orphaned file references`);
+
+ const results = {
+ dryRun,
+ scannedAgents: 0,
+ agentsWithOrphans: 0,
+ agentsUpdated: 0,
+ totalOrphansRemoved: 0,
+ errors: 0,
+ details: [],
+ };
+
+ const cursor = Agent.find({}, { id: 1, name: 1, tool_resources: 1 })
+ .lean()
+ .cursor({ batchSize });
+
+ for await (const agent of cursor) {
+ results.scannedAgents++;
+
+ try {
+ const referencedFileIds = collectToolResourceFileIds(agent.tool_resources);
+ if (referencedFileIds.length === 0) {
+ continue;
+ }
+
+ const existing = await File.find(
+ { file_id: { $in: referencedFileIds } },
+ { file_id: 1, _id: 0 },
+ ).lean();
+ const existingIds = new Set(existing.map((f) => f.file_id));
+ const orphans = referencedFileIds.filter((id) => !existingIds.has(id));
+ if (orphans.length === 0) {
+ continue;
+ }
+
+ results.agentsWithOrphans++;
+ results.totalOrphansRemoved += orphans.length;
+ if (results.details.length < DETAIL_SAMPLE_LIMIT) {
+ results.details.push({
+ agentId: agent.id,
+ name: agent.name,
+ orphanCount: orphans.length,
+ orphans,
+ });
+ }
+
+ if (dryRun) {
+ logger.debug(`[dry-run] Would prune ${orphans.length} orphan(s) from agent ${agent.id}`);
+ continue;
+ }
+
+ const pullAllOps = {};
+ for (const key of TOOL_RESOURCE_KEYS) {
+ pullAllOps[`tool_resources.${key}.file_ids`] = orphans;
+ }
+ const updateResult = await Agent.updateOne({ _id: agent._id }, { $pullAll: pullAllOps });
+ if (updateResult.modifiedCount > 0) {
+ results.agentsUpdated++;
+ logger.info(
+ `Pruned ${orphans.length} orphan(s) from agent "${agent.name}" (${agent.id})`,
+ );
+ }
+ } catch (error) {
+ results.errors++;
+ logger.error(`Failed to process agent ${agent.id}`, { error: error.message });
+ }
+ }
+
+ logger.info('Orphaned Agent Files Migration completed', {
+ dryRun,
+ scannedAgents: results.scannedAgents,
+ agentsWithOrphans: results.agentsWithOrphans,
+ agentsUpdated: results.agentsUpdated,
+ totalOrphansRemoved: results.totalOrphansRemoved,
+ errors: results.errors,
+ });
+
+ return results;
+ });
+}
+
+if (require.main === module) {
+ const dryRun = process.argv.includes('--dry-run');
+ const batchSize =
+ parseInt(process.argv.find((arg) => arg.startsWith('--batch-size='))?.split('=')[1]) || 100;
+
+ migrateOrphanedAgentFiles({ dryRun, batchSize })
+ .then((result) => {
+ console.log(`\n=== ${dryRun ? 'DRY RUN ' : ''}RESULTS ===`);
+ console.log(`Agents scanned: ${result.scannedAgents}`);
+ console.log(`Agents with orphans: ${result.agentsWithOrphans}`);
+ console.log(
+ `Orphan references ${dryRun ? 'to remove' : 'removed'}: ${result.totalOrphansRemoved}`,
+ );
+ if (!dryRun) {
+ console.log(`Agents updated: ${result.agentsUpdated}`);
+ }
+ if (result.errors > 0) {
+ console.log(`Errors: ${result.errors}`);
+ }
+ if (result.details.length > 0) {
+ console.log('\nAffected agents:');
+ result.details.forEach((d, i) => {
+ console.log(` ${i + 1}. "${d.name}" (${d.agentId}) — ${d.orphanCount} orphan(s)`);
+ });
+ if (result.agentsWithOrphans > result.details.length) {
+ console.log(
+ ` ... and ${result.agentsWithOrphans - result.details.length} more (sample capped at ${DETAIL_SAMPLE_LIMIT})`,
+ );
+ }
+ }
+ process.exit(0);
+ })
+ .catch((error) => {
+ console.error('Orphaned agent files migration failed:', error);
+ process.exit(1);
+ });
+}
+
+module.exports = { migrateOrphanedAgentFiles };
diff --git a/package-lock.json b/package-lock.json
index 7af4d1487402..80fbd45b1913 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -63,7 +63,7 @@
"@librechat/api": "*",
"@librechat/data-schemas": "*",
"@microsoft/microsoft-graph-client": "^3.0.7",
- "@modelcontextprotocol/sdk": "^1.27.1",
+ "@modelcontextprotocol/sdk": "^1.29.0",
"@node-saml/passport-saml": "^5.1.0",
"@smithy/node-http-handler": "^4.4.5",
"ai-tokenizer": "^1.0.6",
@@ -12042,9 +12042,9 @@
}
},
"node_modules/@modelcontextprotocol/sdk": {
- "version": "1.27.1",
- "resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.27.1.tgz",
- "integrity": "sha512-sr6GbP+4edBwFndLbM60gf07z0FQ79gaExpnsjMGePXqFcSSb7t6iscpjk9DhFhwd+mTEQrzNafGP8/iGGFYaA==",
+ "version": "1.29.0",
+ "resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.29.0.tgz",
+ "integrity": "sha512-zo37mZA9hJWpULgkRpowewez1y6ML5GsXJPY8FI0tBBCd77HEvza4jDqRKOXgHNn867PVGCyTdzqpz0izu5ZjQ==",
"license": "MIT",
"dependencies": {
"@hono/node-server": "^1.19.9",
@@ -44196,7 +44196,7 @@
"@langchain/core": "^0.3.80",
"@librechat/agents": "^3.1.68",
"@librechat/data-schemas": "*",
- "@modelcontextprotocol/sdk": "^1.27.1",
+ "@modelcontextprotocol/sdk": "^1.29.0",
"@smithy/node-http-handler": "^4.4.5",
"ai-tokenizer": "^1.0.6",
"axios": "^1.15.0",
diff --git a/package.json b/package.json
index 05421a23df75..fb624b9689b8 100644
--- a/package.json
+++ b/package.json
@@ -93,7 +93,10 @@
"migrate:agent-permissions:batch": "node config/migrate-agent-permissions.js --batch-size=50",
"migrate:prompt-permissions:dry-run": "node config/migrate-prompt-permissions.js --dry-run",
"migrate:prompt-permissions": "node config/migrate-prompt-permissions.js",
- "migrate:prompt-permissions:batch": "node config/migrate-prompt-permissions.js --batch-size=50"
+ "migrate:prompt-permissions:batch": "node config/migrate-prompt-permissions.js --batch-size=50",
+ "migrate:orphaned-agent-files:dry-run": "node config/migrate-orphaned-agent-files.js --dry-run",
+ "migrate:orphaned-agent-files": "node config/migrate-orphaned-agent-files.js",
+ "migrate:orphaned-agent-files:batch": "node config/migrate-orphaned-agent-files.js --batch-size=50"
},
"repository": {
"type": "git",
diff --git a/packages/api/package.json b/packages/api/package.json
index 2dae50b8463b..62884cae4eec 100644
--- a/packages/api/package.json
+++ b/packages/api/package.json
@@ -97,7 +97,7 @@
"@langchain/core": "^0.3.80",
"@librechat/agents": "^3.1.68",
"@librechat/data-schemas": "*",
- "@modelcontextprotocol/sdk": "^1.27.1",
+ "@modelcontextprotocol/sdk": "^1.29.0",
"@smithy/node-http-handler": "^4.4.5",
"ai-tokenizer": "^1.0.6",
"axios": "^1.15.0",
diff --git a/packages/api/src/agents/index.ts b/packages/api/src/agents/index.ts
index 336eaea479c7..2dad6b33d477 100644
--- a/packages/api/src/agents/index.ts
+++ b/packages/api/src/agents/index.ts
@@ -9,6 +9,7 @@ export * from './handlers';
export * from './initialize';
export * from './legacy';
export * from './memory';
+export * from './orphans';
export * from './migration';
export * from './openai';
export * from './transactions';
diff --git a/packages/api/src/agents/orphans.spec.ts b/packages/api/src/agents/orphans.spec.ts
new file mode 100644
index 000000000000..3e614b93d458
--- /dev/null
+++ b/packages/api/src/agents/orphans.spec.ts
@@ -0,0 +1,53 @@
+import { EToolResources } from 'librechat-data-provider';
+import type { AgentToolResources } from 'librechat-data-provider';
+import { collectToolResourceFileIds, stripFileIdsFromToolResources } from './orphans';
+
+const makeResources = (): AgentToolResources => ({
+ [EToolResources.file_search]: { file_ids: ['a', 'b', 'c'] },
+ [EToolResources.execute_code]: { file_ids: ['b', 'd'] },
+ [EToolResources.context]: { file_ids: ['e'] },
+});
+
+describe('collectToolResourceFileIds', () => {
+ it('returns empty array for nullish input', () => {
+ expect(collectToolResourceFileIds(undefined)).toEqual([]);
+ expect(collectToolResourceFileIds(null)).toEqual([]);
+ });
+
+ it('gathers and de-duplicates file_ids across every category', () => {
+ const ids = collectToolResourceFileIds(makeResources());
+ expect(new Set(ids)).toEqual(new Set(['a', 'b', 'c', 'd', 'e']));
+ });
+
+ it('skips categories without a file_ids array', () => {
+ const resources: AgentToolResources = {
+ [EToolResources.file_search]: { file_ids: ['a'] },
+ [EToolResources.context]: {},
+ };
+ expect(collectToolResourceFileIds(resources)).toEqual(['a']);
+ });
+});
+
+describe('stripFileIdsFromToolResources', () => {
+ it('removes matching ids from every category and reports the count', () => {
+ const resources = makeResources();
+ const { removedCount } = stripFileIdsFromToolResources(resources, ['b', 'e']);
+
+ expect(removedCount).toBe(3);
+ expect(resources[EToolResources.file_search]?.file_ids).toEqual(['a', 'c']);
+ expect(resources[EToolResources.execute_code]?.file_ids).toEqual(['d']);
+ expect(resources[EToolResources.context]?.file_ids).toEqual([]);
+ });
+
+ it('is a no-op when no ids are provided', () => {
+ const resources = makeResources();
+ const { removedCount } = stripFileIdsFromToolResources(resources, []);
+ expect(removedCount).toBe(0);
+ expect(resources[EToolResources.file_search]?.file_ids).toEqual(['a', 'b', 'c']);
+ });
+
+ it('handles nullish tool_resources safely', () => {
+ const { removedCount } = stripFileIdsFromToolResources(undefined, ['a']);
+ expect(removedCount).toBe(0);
+ });
+});
diff --git a/packages/api/src/agents/orphans.ts b/packages/api/src/agents/orphans.ts
new file mode 100644
index 000000000000..b350b6f02736
--- /dev/null
+++ b/packages/api/src/agents/orphans.ts
@@ -0,0 +1,70 @@
+import { EToolResources } from 'librechat-data-provider';
+import type { AgentToolResources } from 'librechat-data-provider';
+
+/**
+ * Every `EToolResources` member that can carry `file_ids` on an agent document.
+ * `code_interpreter` is intentionally omitted — it's part of `EToolResources`
+ * for the Assistants API but not a key of the agent-side `AgentToolResources`
+ * shape, so including it would be a type lie and generate dead MongoDB clauses.
+ */
+export const TOOL_RESOURCE_KEYS: ReadonlyArray = [
+ EToolResources.execute_code,
+ EToolResources.file_search,
+ EToolResources.image_edit,
+ EToolResources.context,
+ EToolResources.ocr,
+];
+
+/**
+ * Collects every file_id referenced across all tool_resource categories.
+ * Duplicates are de-duplicated across categories.
+ */
+export function collectToolResourceFileIds(
+ tool_resources: AgentToolResources | undefined | null,
+): string[] {
+ if (!tool_resources) {
+ return [];
+ }
+ const seen = new Set();
+ for (const key of TOOL_RESOURCE_KEYS) {
+ const ids = tool_resources[key]?.file_ids;
+ if (!Array.isArray(ids)) {
+ continue;
+ }
+ for (const id of ids) {
+ if (typeof id === 'string') {
+ seen.add(id);
+ }
+ }
+ }
+ return Array.from(seen);
+}
+
+/**
+ * Removes the given file_ids from every tool_resource category on the provided
+ * tool_resources object. Mutates in place and also returns the same reference
+ * for convenience. Returns the count of removed references.
+ */
+export function stripFileIdsFromToolResources(
+ tool_resources: AgentToolResources | undefined | null,
+ idsToRemove: Iterable,
+): { tool_resources: AgentToolResources | undefined | null; removedCount: number } {
+ if (!tool_resources) {
+ return { tool_resources, removedCount: 0 };
+ }
+ const removeSet = idsToRemove instanceof Set ? idsToRemove : new Set(idsToRemove);
+ if (removeSet.size === 0) {
+ return { tool_resources, removedCount: 0 };
+ }
+ let removedCount = 0;
+ for (const key of TOOL_RESOURCE_KEYS) {
+ const resource = tool_resources[key];
+ if (!resource || !Array.isArray(resource.file_ids)) {
+ continue;
+ }
+ const before = resource.file_ids.length;
+ resource.file_ids = resource.file_ids.filter((id) => !removeSet.has(id));
+ removedCount += before - resource.file_ids.length;
+ }
+ return { tool_resources, removedCount };
+}
diff --git a/packages/api/src/mcp/__tests__/handler.test.ts b/packages/api/src/mcp/__tests__/handler.test.ts
index 87de316d17a0..6e0c1961bf5d 100644
--- a/packages/api/src/mcp/__tests__/handler.test.ts
+++ b/packages/api/src/mcp/__tests__/handler.test.ts
@@ -18,6 +18,7 @@ jest.mock('@modelcontextprotocol/sdk/client/auth.js', () => ({
discoverOAuthProtectedResourceMetadata: jest.fn(),
registerClient: jest.fn(),
exchangeAuthorization: jest.fn(),
+ extractWWWAuthenticateParams: jest.fn(() => ({})),
}));
jest.mock('../../mcp/oauth/tokens', () => ({
@@ -26,6 +27,10 @@ jest.mock('../../mcp/oauth/tokens', () => ({
},
}));
+jest.mock('../../mcp/oauth/resourceHint', () => ({
+ probeResourceMetadataHint: jest.fn().mockResolvedValue(null),
+}));
+
import {
startAuthorization,
discoverAuthorizationServerMetadata,
@@ -34,6 +39,7 @@ import {
exchangeAuthorization,
} from '@modelcontextprotocol/sdk/client/auth.js';
import { MCPTokenStorage } from '../../mcp/oauth/tokens';
+import { probeResourceMetadataHint } from '../../mcp/oauth/resourceHint';
import { FlowStateManager } from '../../flow/manager';
const mockStartAuthorization = startAuthorization as jest.MockedFunction;
@@ -53,6 +59,9 @@ const mockGetClientInfoAndMetadata =
MCPTokenStorage.getClientInfoAndMetadata as jest.MockedFunction<
typeof MCPTokenStorage.getClientInfoAndMetadata
>;
+const mockProbeResourceMetadataHint = probeResourceMetadataHint as jest.MockedFunction<
+ typeof probeResourceMetadataHint
+>;
describe('MCPOAuthHandler - Configurable OAuth Metadata', () => {
const mockServerName = 'test-server';
@@ -2153,4 +2162,443 @@ describe('MCPOAuthHandler - Configurable OAuth Metadata', () => {
});
});
});
+
+ describe('Protected Resource Metadata validation (RFC 9728 / GHSA-gvpj-vm2f-2m23)', () => {
+ const originalFetch = global.fetch;
+ const mockFetch = jest.fn();
+
+ beforeEach(() => {
+ jest.clearAllMocks();
+ global.fetch = mockFetch as unknown as typeof fetch;
+ mockFetch.mockResolvedValue({ ok: true, json: async () => ({}) } as Response);
+ });
+
+ afterAll(() => {
+ global.fetch = originalFetch;
+ });
+
+ it('rejects metadata whose resource points at a different origin than the configured server', async () => {
+ mockDiscoverOAuthProtectedResourceMetadata.mockResolvedValueOnce({
+ // attacker's server pretends to be real-mcp.com so tokens minted by real-mcp's
+ // auth server get sent to the attacker
+ resource: 'https://real-mcp.com/mcp',
+ authorization_servers: ['https://auth.real-mcp.com'],
+ });
+
+ await expect(
+ MCPOAuthHandler.initiateOAuthFlow(
+ 'evil-server',
+ 'https://fake-mcp.com/mcp',
+ 'user-123',
+ {},
+ undefined,
+ ),
+ ).rejects.toThrow(/does not match server URL/);
+
+ // authorization_servers from the tainted document must never be consulted
+ expect(mockDiscoverAuthorizationServerMetadata).not.toHaveBeenCalled();
+ expect(mockStartAuthorization).not.toHaveBeenCalled();
+ expect(mockRegisterClient).not.toHaveBeenCalled();
+ });
+
+ it('rejects metadata whose resource is not a parseable URL (error-wrapping path)', async () => {
+ // A malicious or broken server could return a `resource` that passes the
+ // zod schema but is not a valid URL. `resourceUrlFromServerUrl` /
+ // `checkResourceAllowed` call `new URL()` internally and will throw;
+ // `assertResourceBoundToServer` wraps that into a descriptive error rather
+ // than letting a raw `TypeError: Invalid URL` leak out.
+ mockDiscoverOAuthProtectedResourceMetadata.mockResolvedValueOnce({
+ resource: 'not-a-url',
+ authorization_servers: ['https://auth.example.com'],
+ });
+
+ await expect(
+ MCPOAuthHandler.initiateOAuthFlow(
+ 'test-server',
+ 'https://example.com/mcp',
+ 'user-123',
+ {},
+ undefined,
+ ),
+ ).rejects.toThrow(/Unable to validate Protected Resource Metadata 'resource'/);
+
+ expect(mockDiscoverAuthorizationServerMetadata).not.toHaveBeenCalled();
+ expect(mockStartAuthorization).not.toHaveBeenCalled();
+ });
+
+ it('rejects metadata that is missing the required resource identifier', async () => {
+ mockDiscoverOAuthProtectedResourceMetadata.mockResolvedValueOnce({
+ // RFC 9728 §2: `resource` is REQUIRED
+ authorization_servers: ['https://auth.example.com'],
+ } as unknown as Awaited>);
+
+ await expect(
+ MCPOAuthHandler.initiateOAuthFlow(
+ 'test-server',
+ 'https://example.com/mcp',
+ 'user-123',
+ {},
+ undefined,
+ ),
+ ).rejects.toThrow(/missing the required 'resource' identifier/);
+
+ expect(mockDiscoverAuthorizationServerMetadata).not.toHaveBeenCalled();
+ });
+
+ it('rejects metadata whose resource points at the same origin but a sibling path', async () => {
+ // Same-origin path-confusion: checkResourceAllowed enforces path-prefix match, so
+ // a server at /api can't claim tokens for /admin on the same origin.
+ mockDiscoverOAuthProtectedResourceMetadata.mockResolvedValueOnce({
+ resource: 'https://example.com/admin',
+ authorization_servers: ['https://auth.example.com'],
+ });
+
+ await expect(
+ MCPOAuthHandler.initiateOAuthFlow(
+ 'test-server',
+ 'https://example.com/api',
+ 'user-123',
+ {},
+ undefined,
+ ),
+ ).rejects.toThrow(/does not match server URL/);
+ });
+
+ it('accepts metadata whose resource exactly matches the server URL', async () => {
+ mockDiscoverOAuthProtectedResourceMetadata.mockResolvedValueOnce({
+ resource: 'https://example.com/mcp',
+ authorization_servers: ['https://auth.example.com'],
+ });
+
+ mockDiscoverAuthorizationServerMetadata.mockResolvedValueOnce({
+ issuer: 'https://auth.example.com',
+ authorization_endpoint: 'https://auth.example.com/authorize',
+ token_endpoint: 'https://auth.example.com/token',
+ registration_endpoint: 'https://auth.example.com/register',
+ response_types_supported: ['code'],
+ } as AuthorizationServerMetadata);
+
+ mockRegisterClient.mockResolvedValueOnce({
+ client_id: 'new-client-id',
+ redirect_uris: ['http://localhost:3080/api/mcp/test-server/oauth/callback'],
+ logo_uri: undefined,
+ tos_uri: undefined,
+ });
+
+ mockStartAuthorization.mockResolvedValueOnce({
+ authorizationUrl: new URL('https://auth.example.com/authorize?client_id=new-client-id'),
+ codeVerifier: 'test-code-verifier',
+ });
+
+ const result = await MCPOAuthHandler.initiateOAuthFlow(
+ 'test-server',
+ 'https://example.com/mcp',
+ 'user-123',
+ {},
+ undefined,
+ );
+
+ expect(result.authorizationUrl).toContain('resource=https%3A%2F%2Fexample.com%2Fmcp');
+ });
+
+ it('accepts metadata whose resource is an origin-level prefix of the server URL', async () => {
+ // Some RFC 9728 implementations advertise the origin as `resource` for a
+ // sub-path MCP server; checkResourceAllowed permits this (path-prefix match).
+ mockDiscoverOAuthProtectedResourceMetadata.mockResolvedValueOnce({
+ resource: 'https://example.com',
+ authorization_servers: ['https://auth.example.com'],
+ });
+
+ mockDiscoverAuthorizationServerMetadata.mockResolvedValueOnce({
+ issuer: 'https://auth.example.com',
+ authorization_endpoint: 'https://auth.example.com/authorize',
+ token_endpoint: 'https://auth.example.com/token',
+ registration_endpoint: 'https://auth.example.com/register',
+ response_types_supported: ['code'],
+ } as AuthorizationServerMetadata);
+
+ mockRegisterClient.mockResolvedValueOnce({
+ client_id: 'client-id',
+ redirect_uris: ['http://localhost:3080/api/mcp/test-server/oauth/callback'],
+ logo_uri: undefined,
+ tos_uri: undefined,
+ });
+
+ mockStartAuthorization.mockResolvedValueOnce({
+ authorizationUrl: new URL('https://auth.example.com/authorize?client_id=client-id'),
+ codeVerifier: 'test-code-verifier',
+ });
+
+ await expect(
+ MCPOAuthHandler.initiateOAuthFlow(
+ 'test-server',
+ 'https://example.com/mcp',
+ 'user-123',
+ {},
+ undefined,
+ ),
+ ).resolves.toBeDefined();
+ });
+
+ it('re-validates resource binding at token exchange for flows initiated before the fix', async () => {
+ // Defense-in-depth: flow state has a 10-min TTL, so a flow created under older
+ // (vulnerable) code could still be in-flight at upgrade time with unvalidated
+ // resourceMetadata stored. completeOAuthFlow must re-assert the binding rather
+ // than blindly trusting stored state — and must still run the normal failure
+ // bookkeeping (failFlow) so the flow manager doesn't leak a stuck PENDING entry.
+ const mockFailFlow = jest.fn();
+ const mockFlowManager = {
+ getFlowState: jest.fn().mockResolvedValue({
+ status: 'PENDING',
+ metadata: {
+ serverName: 'evil-server',
+ userId: 'user-123',
+ serverUrl: 'https://fake-mcp.com/mcp',
+ state: 'abc',
+ codeVerifier: 'verifier',
+ clientInfo: { client_id: 'cid' },
+ metadata: { authorization_endpoint: 'x', token_endpoint: 'y' },
+ resourceMetadata: {
+ // tainted: stored during a pre-fix initiateOAuthFlow
+ resource: 'https://real-mcp.com/mcp',
+ authorization_servers: ['https://auth.real-mcp.com'],
+ },
+ } as MCPOAuthFlowMetadata,
+ }),
+ failFlow: mockFailFlow,
+ } as unknown as FlowStateManager;
+
+ await expect(
+ MCPOAuthHandler.completeOAuthFlow('flow-id', 'auth-code', mockFlowManager, {}),
+ ).rejects.toThrow(/does not match server URL/);
+
+ expect(mockExchangeAuthorization).not.toHaveBeenCalled();
+ expect(mockFailFlow).toHaveBeenCalledWith('flow-id', expect.any(String), expect.any(Error));
+ });
+
+ it('falls back to origin-based discovery when the well-known endpoint returns no metadata', async () => {
+ // A missing/404 PRM doc is different from a spoofed one: the SDK throws, we
+ // catch it, and proceed to discover the auth server from the MCP server URL.
+ // This path must NOT trip the new validation.
+ mockDiscoverOAuthProtectedResourceMetadata.mockRejectedValueOnce(
+ new Error('Resource server does not implement OAuth 2.0 Protected Resource Metadata.'),
+ );
+
+ mockDiscoverAuthorizationServerMetadata.mockResolvedValueOnce({
+ issuer: 'https://example.com',
+ authorization_endpoint: 'https://example.com/authorize',
+ token_endpoint: 'https://example.com/token',
+ registration_endpoint: 'https://example.com/register',
+ response_types_supported: ['code'],
+ } as AuthorizationServerMetadata);
+
+ mockRegisterClient.mockResolvedValueOnce({
+ client_id: 'client-id',
+ redirect_uris: ['http://localhost:3080/api/mcp/test-server/oauth/callback'],
+ logo_uri: undefined,
+ tos_uri: undefined,
+ });
+
+ mockStartAuthorization.mockResolvedValueOnce({
+ authorizationUrl: new URL('https://example.com/authorize?client_id=client-id'),
+ codeVerifier: 'test-code-verifier',
+ });
+
+ const result = await MCPOAuthHandler.initiateOAuthFlow(
+ 'test-server',
+ 'https://example.com/mcp',
+ 'user-123',
+ {},
+ undefined,
+ );
+
+ expect(result.authorizationUrl).toBeDefined();
+ // No PRM, so the authorization URL must not carry a `resource` parameter
+ expect(result.authorizationUrl).not.toContain('resource=');
+ });
+ });
+
+ describe('WWW-Authenticate resource_metadata hint (RFC 9728 §5.1 / issue #12761)', () => {
+ const serverUrl = 'https://example.com/mcp';
+ const hintUrl = 'https://example.com/.well-known/oauth-protected-resource';
+
+ beforeEach(() => {
+ // Default the probe to "no hint" so earlier suites that don't set it aren't affected.
+ mockProbeResourceMetadataHint.mockResolvedValue(null);
+ });
+
+ it('threads the hint URL into discoverOAuthProtectedResourceMetadata when present', async () => {
+ mockProbeResourceMetadataHint.mockResolvedValueOnce({
+ resourceMetadataUrl: new URL(hintUrl),
+ bearerChallenge: true,
+ headAuthChallenge: true,
+ });
+
+ mockDiscoverOAuthProtectedResourceMetadata.mockResolvedValueOnce({
+ resource: serverUrl,
+ authorization_servers: ['https://auth.example.com'],
+ });
+
+ mockDiscoverAuthorizationServerMetadata.mockResolvedValueOnce({
+ issuer: 'https://auth.example.com',
+ authorization_endpoint: 'https://auth.example.com/authorize',
+ token_endpoint: 'https://auth.example.com/token',
+ registration_endpoint: 'https://auth.example.com/register',
+ response_types_supported: ['code'],
+ } as AuthorizationServerMetadata);
+
+ mockRegisterClient.mockResolvedValueOnce({
+ client_id: 'new-client-id',
+ redirect_uris: ['http://localhost:3080/api/mcp/test-server/oauth/callback'],
+ logo_uri: undefined,
+ tos_uri: undefined,
+ });
+
+ mockStartAuthorization.mockResolvedValueOnce({
+ authorizationUrl: new URL('https://auth.example.com/authorize?client_id=new-client-id'),
+ codeVerifier: 'test-code-verifier',
+ });
+
+ await MCPOAuthHandler.initiateOAuthFlow('test-server', serverUrl, 'user-123', {}, undefined);
+
+ expect(mockDiscoverOAuthProtectedResourceMetadata).toHaveBeenCalledTimes(1);
+ expect(mockDiscoverOAuthProtectedResourceMetadata).toHaveBeenCalledWith(
+ serverUrl,
+ expect.objectContaining({ resourceMetadataUrl: new URL(hintUrl) }),
+ expect.any(Function),
+ );
+ });
+
+ it('passes undefined resourceMetadataUrl when no hint is available', async () => {
+ mockProbeResourceMetadataHint.mockResolvedValueOnce(null);
+
+ mockDiscoverOAuthProtectedResourceMetadata.mockResolvedValueOnce({
+ resource: serverUrl,
+ authorization_servers: ['https://auth.example.com'],
+ });
+
+ mockDiscoverAuthorizationServerMetadata.mockResolvedValueOnce({
+ issuer: 'https://auth.example.com',
+ authorization_endpoint: 'https://auth.example.com/authorize',
+ token_endpoint: 'https://auth.example.com/token',
+ registration_endpoint: 'https://auth.example.com/register',
+ response_types_supported: ['code'],
+ } as AuthorizationServerMetadata);
+
+ mockRegisterClient.mockResolvedValueOnce({
+ client_id: 'new-client-id',
+ redirect_uris: ['http://localhost:3080/api/mcp/test-server/oauth/callback'],
+ logo_uri: undefined,
+ tos_uri: undefined,
+ });
+
+ mockStartAuthorization.mockResolvedValueOnce({
+ authorizationUrl: new URL('https://auth.example.com/authorize?client_id=new-client-id'),
+ codeVerifier: 'test-code-verifier',
+ });
+
+ await MCPOAuthHandler.initiateOAuthFlow('test-server', serverUrl, 'user-123', {}, undefined);
+
+ expect(mockDiscoverOAuthProtectedResourceMetadata).toHaveBeenCalledWith(
+ serverUrl,
+ expect.objectContaining({ resourceMetadataUrl: undefined }),
+ expect.any(Function),
+ );
+ });
+
+ it('prefers the hint over path-aware metadata when they diverge', async () => {
+ // The regression scenario from issue #12761: path-aware discovery would return
+ // stale metadata pointing at a defunct authorization server. The hint URL must
+ // take precedence so the SDK fetches the authoritative document instead.
+ mockProbeResourceMetadataHint.mockResolvedValueOnce({
+ resourceMetadataUrl: new URL(hintUrl),
+ bearerChallenge: true,
+ headAuthChallenge: true,
+ });
+
+ // Whatever the hint URL returns is what reaches the handler — stale path-aware
+ // data never gets a chance to be used, because the SDK follows the hint instead.
+ mockDiscoverOAuthProtectedResourceMetadata.mockResolvedValueOnce({
+ resource: serverUrl,
+ authorization_servers: ['https://auth.example.com'],
+ });
+
+ mockDiscoverAuthorizationServerMetadata.mockResolvedValueOnce({
+ issuer: 'https://auth.example.com',
+ authorization_endpoint: 'https://auth.example.com/authorize',
+ token_endpoint: 'https://auth.example.com/token',
+ registration_endpoint: 'https://auth.example.com/register',
+ response_types_supported: ['code'],
+ } as AuthorizationServerMetadata);
+
+ mockRegisterClient.mockResolvedValueOnce({
+ client_id: 'new-client-id',
+ redirect_uris: ['http://localhost:3080/api/mcp/test-server/oauth/callback'],
+ logo_uri: undefined,
+ tos_uri: undefined,
+ });
+
+ mockStartAuthorization.mockResolvedValueOnce({
+ authorizationUrl: new URL('https://auth.example.com/authorize?client_id=new-client-id'),
+ codeVerifier: 'test-code-verifier',
+ });
+
+ const result = await MCPOAuthHandler.initiateOAuthFlow(
+ 'test-server',
+ serverUrl,
+ 'user-123',
+ {},
+ undefined,
+ );
+
+ expect(result.authorizationUrl).toContain('auth.example.com');
+ // Exactly one SDK call — no separate path-aware retry.
+ expect(mockDiscoverOAuthProtectedResourceMetadata).toHaveBeenCalledTimes(1);
+ });
+
+ it('invokes the probe with the OAuth-aware fetch so oauthHeaders reach the server', async () => {
+ // Regression guard: without the wrapper, admin-configured `oauthHeaders` (e.g. a
+ // gateway API key that fronts the MCP endpoint) would be stripped from the probe,
+ // causing the gateway to 401 us for the wrong reason and masking the real hint.
+ mockProbeResourceMetadataHint.mockResolvedValueOnce(null);
+
+ mockDiscoverOAuthProtectedResourceMetadata.mockResolvedValueOnce({
+ resource: serverUrl,
+ authorization_servers: ['https://auth.example.com'],
+ });
+
+ mockDiscoverAuthorizationServerMetadata.mockResolvedValueOnce({
+ issuer: 'https://auth.example.com',
+ authorization_endpoint: 'https://auth.example.com/authorize',
+ token_endpoint: 'https://auth.example.com/token',
+ registration_endpoint: 'https://auth.example.com/register',
+ response_types_supported: ['code'],
+ } as AuthorizationServerMetadata);
+
+ mockRegisterClient.mockResolvedValueOnce({
+ client_id: 'new-client-id',
+ redirect_uris: ['http://localhost:3080/api/mcp/test-server/oauth/callback'],
+ logo_uri: undefined,
+ tos_uri: undefined,
+ });
+
+ mockStartAuthorization.mockResolvedValueOnce({
+ authorizationUrl: new URL('https://auth.example.com/authorize?client_id=new-client-id'),
+ codeVerifier: 'test-code-verifier',
+ });
+
+ await MCPOAuthHandler.initiateOAuthFlow(
+ 'test-server',
+ serverUrl,
+ 'user-123',
+ { 'X-Gateway-Key': 'secret' },
+ undefined,
+ );
+
+ expect(mockProbeResourceMetadataHint).toHaveBeenCalledTimes(1);
+ // Second argument must be a fetchFn (the OAuth-aware wrapper), not `undefined`.
+ const fetchFnArg = mockProbeResourceMetadataHint.mock.calls[0][1];
+ expect(typeof fetchFnArg).toBe('function');
+ });
+ });
});
diff --git a/packages/api/src/mcp/__tests__/reconnection-storm.test.ts b/packages/api/src/mcp/__tests__/reconnection-storm.test.ts
index e073dca8a362..c61e6659b8d5 100644
--- a/packages/api/src/mcp/__tests__/reconnection-storm.test.ts
+++ b/packages/api/src/mcp/__tests__/reconnection-storm.test.ts
@@ -8,6 +8,7 @@
import http from 'http';
import { randomUUID } from 'crypto';
import express from 'express';
+import { z } from 'zod';
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
import { isInitializeRequest } from '@modelcontextprotocol/sdk/types.js';
import { StreamableHTTPServerTransport } from '@modelcontextprotocol/sdk/server/streamableHttp.js';
@@ -62,7 +63,7 @@ function startMCPServer(): Promise {
function createServer(): McpServer {
const server = new McpServer({ name: 'test-server', version: '1.0.0' });
- server.tool('echo', 'echoes input', { message: { type: 'string' } as never }, async (args) => {
+ server.tool('echo', 'echoes input', { message: z.string() }, async (args) => {
const msg = (args as Record).message ?? '';
return { content: [{ type: 'text', text: msg }] };
});
diff --git a/packages/api/src/mcp/oauth/detectOAuth.fallback.test.ts b/packages/api/src/mcp/oauth/detectOAuth.fallback.test.ts
new file mode 100644
index 000000000000..324fca8df608
--- /dev/null
+++ b/packages/api/src/mcp/oauth/detectOAuth.fallback.test.ts
@@ -0,0 +1,147 @@
+import { detectOAuthRequirement } from './detectOAuth';
+
+jest.mock('@modelcontextprotocol/sdk/client/auth.js', () => ({
+ ...jest.requireActual('@modelcontextprotocol/sdk/client/auth.js'),
+ discoverOAuthProtectedResourceMetadata: jest.fn(),
+}));
+
+jest.mock('~/auth', () => ({
+ isSSRFTarget: jest.fn(() => false),
+ resolveHostnameSSRF: jest.fn(async () => false),
+}));
+
+/**
+ * Exercises the `MCP_OAUTH_ON_AUTH_ERROR=true` path in isolation — the main
+ * `detectOAuth.test.ts` disables it to assert on precise detection outcomes, so
+ * the fallback's behavior (and its avoidance of a redundant HEAD request) lives
+ * here with the config forced on.
+ */
+jest.mock('../mcpConfig', () => ({
+ mcpConfig: {
+ OAUTH_ON_AUTH_ERROR: true,
+ OAUTH_DETECTION_TIMEOUT: 5000,
+ },
+}));
+
+import { discoverOAuthProtectedResourceMetadata } from '@modelcontextprotocol/sdk/client/auth.js';
+
+const mockDiscoverOAuthProtectedResourceMetadata =
+ discoverOAuthProtectedResourceMetadata as jest.MockedFunction<
+ typeof discoverOAuthProtectedResourceMetadata
+ >;
+
+describe('detectOAuthRequirement — MCP_OAUTH_ON_AUTH_ERROR fallback', () => {
+ const originalFetch = global.fetch;
+ const mockFetch = jest.fn() as unknown as jest.MockedFunction;
+
+ beforeEach(() => {
+ jest.clearAllMocks();
+ global.fetch = mockFetch;
+ mockDiscoverOAuthProtectedResourceMetadata.mockRejectedValue(
+ new Error('No protected resource metadata'),
+ );
+ });
+
+ afterAll(() => {
+ global.fetch = originalFetch;
+ });
+
+ it('honors a 401 observed by the probe without issuing a second HEAD', async () => {
+ // Server responds 401 on HEAD with neither Bearer nor resource_metadata (Basic).
+ // Before the authChallenge optimization, detectOAuth would fire another HEAD via
+ // checkAuthErrorFallback; now it reuses the probe's observation.
+ mockFetch.mockResolvedValueOnce({
+ status: 401,
+ headers: new Headers({ 'www-authenticate': 'Basic realm="api"' }),
+ } as Response);
+ // POST still probed because the HEAD carried no useful hint.
+ mockFetch.mockResolvedValueOnce({
+ status: 401,
+ headers: new Headers({ 'www-authenticate': 'Basic realm="api"' }),
+ } as Response);
+
+ const result = await detectOAuthRequirement('https://mcp.example.com');
+
+ expect(result).toEqual({
+ requiresOAuth: true,
+ method: 'no-metadata-found',
+ metadata: null,
+ });
+ // Exactly 2 fetches — HEAD + POST from the probe. No redundant fallback HEAD.
+ expect(mockFetch).toHaveBeenCalledTimes(2);
+ });
+
+ it('honors a 403 observed by the probe without issuing a second HEAD', async () => {
+ mockFetch.mockResolvedValue({ status: 403, headers: new Headers() } as Response);
+
+ const result = await detectOAuthRequirement('https://mcp.example.com');
+
+ expect(result.requiresOAuth).toBe(true);
+ expect(result.method).toBe('no-metadata-found');
+ // HEAD + POST from the probe; no extra fallback HEAD.
+ expect(mockFetch).toHaveBeenCalledTimes(2);
+ });
+
+ it('retries via HEAD when the probe threw (transient network error)', async () => {
+ // Probe crashes on both HEAD and POST (network down).
+ mockFetch.mockRejectedValueOnce(new Error('ECONNREFUSED'));
+ mockFetch.mockRejectedValueOnce(new Error('ECONNREFUSED'));
+ // Fallback HEAD succeeds and returns 401 — we honor it.
+ mockFetch.mockResolvedValueOnce({
+ status: 401,
+ headers: new Headers(),
+ } as Response);
+
+ const result = await detectOAuthRequirement('https://mcp.example.com');
+
+ expect(result.requiresOAuth).toBe(true);
+ expect(result.method).toBe('no-metadata-found');
+ expect(mockFetch).toHaveBeenCalledTimes(3);
+ });
+
+ it('does not fire fallback when the probe observed a clean 200', async () => {
+ mockFetch.mockResolvedValue({ status: 200, headers: new Headers() } as Response);
+
+ const result = await detectOAuthRequirement('https://mcp.example.com');
+
+ expect(result.requiresOAuth).toBe(false);
+ expect(result.method).toBe('no-metadata-found');
+ // HEAD + POST from the probe, both 200 — fallback must not fire.
+ expect(mockFetch).toHaveBeenCalledTimes(2);
+ });
+
+ it('retries via fallback HEAD when only HEAD threw (transient failure)', async () => {
+ // If HEAD transiently fails (timeout/ECONNRESET) but POST responds non-auth, the
+ // probe must treat HEAD status as "unknown" so the fallback still gets a chance
+ // to classify 401/403 servers correctly.
+ mockFetch
+ .mockRejectedValueOnce(new Error('ETIMEDOUT'))
+ .mockResolvedValueOnce({ status: 200, headers: new Headers() } as Response);
+ // Fallback HEAD finally succeeds and returns 401.
+ mockFetch.mockResolvedValueOnce({
+ status: 401,
+ headers: new Headers(),
+ } as Response);
+
+ const result = await detectOAuthRequirement('https://mcp.example.com');
+
+ expect(result.requiresOAuth).toBe(true);
+ expect(result.method).toBe('no-metadata-found');
+ expect(mockFetch).toHaveBeenCalledTimes(3);
+ });
+
+ it('does not fire fallback when HEAD was 200 but POST returned 403 (WAF/CSRF)', async () => {
+ // A server that isn't OAuth-protected but 403s body-less POSTs for WAF/CSRF reasons
+ // must NOT be misclassified as OAuth-required. The fallback is scoped to HEAD status.
+ mockFetch
+ .mockResolvedValueOnce({ status: 200, headers: new Headers() } as Response)
+ .mockResolvedValueOnce({ status: 403, headers: new Headers() } as Response);
+
+ const result = await detectOAuthRequirement('https://mcp.example.com');
+
+ expect(result.requiresOAuth).toBe(false);
+ expect(result.method).toBe('no-metadata-found');
+ // Only the probe ran — no extra fallback HEAD fired.
+ expect(mockFetch).toHaveBeenCalledTimes(2);
+ });
+});
diff --git a/packages/api/src/mcp/oauth/detectOAuth.test.ts b/packages/api/src/mcp/oauth/detectOAuth.test.ts
index 8164adddaf90..cfcbf9a69211 100644
--- a/packages/api/src/mcp/oauth/detectOAuth.test.ts
+++ b/packages/api/src/mcp/oauth/detectOAuth.test.ts
@@ -1,15 +1,37 @@
+import { discoverOAuthProtectedResourceMetadata } from '@modelcontextprotocol/sdk/client/auth.js';
+import { isSSRFTarget, resolveHostnameSSRF } from '~/auth';
import { detectOAuthRequirement } from './detectOAuth';
jest.mock('@modelcontextprotocol/sdk/client/auth.js', () => ({
+ ...jest.requireActual('@modelcontextprotocol/sdk/client/auth.js'),
discoverOAuthProtectedResourceMetadata: jest.fn(),
}));
-import { discoverOAuthProtectedResourceMetadata } from '@modelcontextprotocol/sdk/client/auth.js';
+jest.mock('~/auth', () => ({
+ isSSRFTarget: jest.fn(() => false),
+ resolveHostnameSSRF: jest.fn(async () => false),
+}));
+
+/**
+ * Disable the `MCP_OAUTH_ON_AUTH_ERROR` fallback by default so tests assert on the
+ * precise detection outcome without the "any 401/403 = OAuth" safety net rewriting
+ * the result. The fallback is exercised directly in its own describe block.
+ */
+jest.mock('../mcpConfig', () => ({
+ mcpConfig: {
+ OAUTH_ON_AUTH_ERROR: false,
+ OAUTH_DETECTION_TIMEOUT: 5000,
+ },
+}));
const mockDiscoverOAuthProtectedResourceMetadata =
discoverOAuthProtectedResourceMetadata as jest.MockedFunction<
typeof discoverOAuthProtectedResourceMetadata
>;
+const mockIsSSRFTarget = isSSRFTarget as jest.MockedFunction;
+const mockResolveHostnameSSRF = resolveHostnameSSRF as jest.MockedFunction<
+ typeof resolveHostnameSSRF
+>;
describe('detectOAuthRequirement', () => {
const originalFetch = global.fetch;
@@ -18,6 +40,7 @@ describe('detectOAuthRequirement', () => {
beforeEach(() => {
jest.clearAllMocks();
global.fetch = mockFetch;
+ // Default: path-aware / hint discovery returns no metadata unless a test overrides.
mockDiscoverOAuthProtectedResourceMetadata.mockRejectedValue(
new Error('No protected resource metadata'),
);
@@ -28,14 +51,12 @@ describe('detectOAuthRequirement', () => {
});
describe('POST fallback when HEAD fails', () => {
- it('should try POST when HEAD returns 405 Method Not Allowed', async () => {
- // HEAD returns 405 (Method Not Allowed)
+ it('tries POST when HEAD returns 405 Method Not Allowed', async () => {
mockFetch.mockResolvedValueOnce({
status: 405,
headers: new Headers(),
} as Response);
- // POST returns 401 with Bearer
mockFetch.mockResolvedValueOnce({
status: 401,
headers: new Headers({ 'www-authenticate': 'Bearer' }),
@@ -46,11 +67,7 @@ describe('detectOAuthRequirement', () => {
expect(result.requiresOAuth).toBe(true);
expect(result.method).toBe('401-challenge-metadata');
expect(mockFetch).toHaveBeenCalledTimes(2);
-
- // Verify HEAD was called first
expect(mockFetch.mock.calls[0][1]).toEqual(expect.objectContaining({ method: 'HEAD' }));
-
- // Verify POST was called second with proper headers and body
expect(mockFetch.mock.calls[1][1]).toEqual(
expect.objectContaining({
method: 'POST',
@@ -60,14 +77,12 @@ describe('detectOAuthRequirement', () => {
);
});
- it('should try POST when HEAD returns non-401 status', async () => {
- // HEAD returns 200 OK (no auth required for HEAD)
+ it('tries POST when HEAD returns non-401 status', async () => {
mockFetch.mockResolvedValueOnce({
status: 200,
headers: new Headers(),
} as Response);
- // POST returns 401 with Bearer
mockFetch.mockResolvedValueOnce({
status: 401,
headers: new Headers({ 'www-authenticate': 'Bearer' }),
@@ -79,23 +94,49 @@ describe('detectOAuthRequirement', () => {
expect(mockFetch).toHaveBeenCalledTimes(2);
});
- it('should not try POST if HEAD returns 401', async () => {
- // HEAD returns 401 with Bearer
+ it('short-circuits POST when HEAD already delivers the resource_metadata hint', async () => {
+ // Only `resource_metadata` on HEAD is strong enough to skip POST — Bearer-only
+ // still lets POST run in case the server surfaces its hint only on POST.
+ const hintUrl = 'https://example.com/.well-known/oauth-protected-resource';
mockFetch.mockResolvedValueOnce({
status: 401,
- headers: new Headers({ 'www-authenticate': 'Bearer' }),
+ headers: new Headers({
+ 'www-authenticate': `Bearer resource_metadata="${hintUrl}"`,
+ }),
} as Response);
+ mockDiscoverOAuthProtectedResourceMetadata.mockResolvedValueOnce({
+ resource: 'https://mcp.example.com',
+ authorization_servers: ['https://auth.example.com'],
+ });
+
const result = await detectOAuthRequirement('https://mcp.example.com');
expect(result.requiresOAuth).toBe(true);
- // Only HEAD should be called since it returned 401
expect(mockFetch).toHaveBeenCalledTimes(1);
});
+
+ it('probes POST even when HEAD returns Bearer without a hint', async () => {
+ mockFetch
+ .mockResolvedValueOnce({
+ status: 401,
+ headers: new Headers({ 'www-authenticate': 'Bearer' }),
+ } as Response)
+ .mockResolvedValueOnce({
+ status: 401,
+ headers: new Headers({ 'www-authenticate': 'Bearer' }),
+ } as Response);
+
+ const result = await detectOAuthRequirement('https://mcp.example.com');
+
+ expect(result.requiresOAuth).toBe(true);
+ expect(result.method).toBe('401-challenge-metadata');
+ expect(mockFetch).toHaveBeenCalledTimes(2);
+ });
});
describe('Bearer detection without resource_metadata URL', () => {
- it('should detect OAuth when 401 has WWW-Authenticate: Bearer (case insensitive)', async () => {
+ it('detects OAuth when 401 has WWW-Authenticate: Bearer (case insensitive)', async () => {
mockFetch.mockResolvedValueOnce({
status: 401,
headers: new Headers({ 'www-authenticate': 'bearer' }),
@@ -108,7 +149,7 @@ describe('detectOAuthRequirement', () => {
expect(result.metadata).toBeNull();
});
- it('should detect OAuth when 401 has WWW-Authenticate: BEARER (uppercase)', async () => {
+ it('detects OAuth when 401 has WWW-Authenticate: BEARER (uppercase)', async () => {
mockFetch.mockResolvedValueOnce({
status: 401,
headers: new Headers({ 'www-authenticate': 'BEARER' }),
@@ -120,7 +161,7 @@ describe('detectOAuthRequirement', () => {
expect(result.method).toBe('401-challenge-metadata');
});
- it('should detect OAuth when Bearer is part of a larger header value', async () => {
+ it('detects OAuth when Bearer is part of a larger header value', async () => {
mockFetch.mockResolvedValueOnce({
status: 401,
headers: new Headers({ 'www-authenticate': 'Bearer realm="api"' }),
@@ -131,13 +172,11 @@ describe('detectOAuthRequirement', () => {
expect(result.requiresOAuth).toBe(true);
});
- it('should not detect OAuth when 401 has no WWW-Authenticate header', async () => {
+ it('does not detect OAuth when 401 has no WWW-Authenticate header', async () => {
mockFetch.mockResolvedValueOnce({
status: 401,
headers: new Headers(),
} as Response);
-
- // POST also returns 401 without header
mockFetch.mockResolvedValueOnce({
status: 401,
headers: new Headers(),
@@ -149,13 +188,11 @@ describe('detectOAuthRequirement', () => {
expect(result.method).toBe('no-metadata-found');
});
- it('should not detect OAuth when 401 has non-Bearer auth scheme', async () => {
+ it('does not detect OAuth when 401 has non-Bearer auth scheme', async () => {
mockFetch.mockResolvedValueOnce({
status: 401,
headers: new Headers({ 'www-authenticate': 'Basic realm="api"' }),
} as Response);
-
- // POST also returns 401 with Basic
mockFetch.mockResolvedValueOnce({
status: 401,
headers: new Headers({ 'www-authenticate': 'Basic realm="api"' }),
@@ -168,69 +205,183 @@ describe('detectOAuthRequirement', () => {
});
describe('resource_metadata URL in WWW-Authenticate', () => {
- it('should prefer resource_metadata URL when provided with Bearer', async () => {
+ it('passes the WWW-Authenticate hint URL to the SDK and returns its metadata', async () => {
const metadataUrl = 'https://auth.example.com/.well-known/oauth-protected-resource';
- mockFetch
- // HEAD request - 401 with resource_metadata URL
- .mockResolvedValueOnce({
- status: 401,
- headers: new Headers({
- 'www-authenticate': `Bearer resource_metadata="${metadataUrl}"`,
- }),
- } as Response)
- // Metadata fetch
- .mockResolvedValueOnce({
- ok: true,
- json: async () => ({
- authorization_servers: ['https://auth.example.com'],
- }),
- } as Response);
+ mockFetch.mockResolvedValueOnce({
+ status: 401,
+ headers: new Headers({
+ 'www-authenticate': `Bearer resource_metadata="${metadataUrl}"`,
+ }),
+ } as Response);
+
+ mockDiscoverOAuthProtectedResourceMetadata.mockResolvedValueOnce({
+ resource: 'https://mcp.example.com',
+ authorization_servers: ['https://auth.example.com'],
+ });
const result = await detectOAuthRequirement('https://mcp.example.com');
expect(result.requiresOAuth).toBe(true);
expect(result.method).toBe('401-challenge-metadata');
- expect(result.metadata).toEqual({
+ expect(result.metadata).toMatchObject({
authorization_servers: ['https://auth.example.com'],
});
+
+ // The SDK must be called with the hint so that it fetches the authoritative URL
+ // instead of the path-aware `/.well-known/oauth-protected-resource/` variant.
+ expect(mockDiscoverOAuthProtectedResourceMetadata).toHaveBeenCalledWith(
+ 'https://mcp.example.com',
+ expect.objectContaining({ resourceMetadataUrl: new URL(metadataUrl) }),
+ );
});
- it('should fall back to Bearer detection if metadata fetch fails', async () => {
+ it('falls back to Bearer-only detection when hinted metadata fetch fails', async () => {
const metadataUrl = 'https://auth.example.com/.well-known/oauth-protected-resource';
- mockFetch
- // HEAD request - 401 with resource_metadata URL
- .mockResolvedValueOnce({
- status: 401,
- headers: new Headers({
- 'www-authenticate': `Bearer resource_metadata="${metadataUrl}"`,
- }),
- } as Response)
- // Metadata fetch fails
- .mockRejectedValueOnce(new Error('Network error'));
+ mockFetch.mockResolvedValueOnce({
+ status: 401,
+ headers: new Headers({
+ 'www-authenticate': `Bearer resource_metadata="${metadataUrl}"`,
+ }),
+ } as Response);
+
+ // Hinted discovery throws (e.g. 404 at the hinted URL).
+ mockDiscoverOAuthProtectedResourceMetadata.mockRejectedValueOnce(
+ new Error('Resource server does not implement OAuth 2.0 Protected Resource Metadata.'),
+ );
const result = await detectOAuthRequirement('https://mcp.example.com');
- // Should still detect OAuth via Bearer
expect(result.requiresOAuth).toBe(true);
+ expect(result.method).toBe('401-challenge-metadata');
expect(result.metadata).toBeNull();
});
+
+ it('prefers the 401 hint even when path-aware metadata exists (RFC 9728 §5.1)', async () => {
+ // This is the bug from issue #12761: when a 401 WWW-Authenticate header advertises
+ // a `resource_metadata` URL, that URL must win over any path-aware metadata.
+ const metadataUrl = 'https://mcp.example.com/.well-known/oauth-protected-resource';
+
+ mockFetch.mockResolvedValueOnce({
+ status: 401,
+ headers: new Headers({
+ 'www-authenticate': `Bearer resource_metadata="${metadataUrl}"`,
+ }),
+ } as Response);
+
+ mockDiscoverOAuthProtectedResourceMetadata.mockResolvedValueOnce({
+ resource: 'https://mcp.example.com/mcp',
+ authorization_servers: ['https://auth.example.com/'],
+ });
+
+ const result = await detectOAuthRequirement('https://mcp.example.com/mcp');
+
+ expect(result.metadata).toMatchObject({
+ authorization_servers: ['https://auth.example.com/'],
+ });
+ expect(mockDiscoverOAuthProtectedResourceMetadata).toHaveBeenCalledTimes(1);
+ expect(mockDiscoverOAuthProtectedResourceMetadata).toHaveBeenCalledWith(
+ 'https://mcp.example.com/mcp',
+ expect.objectContaining({ resourceMetadataUrl: new URL(metadataUrl) }),
+ );
+ });
});
- describe('StackOverflow-like server behavior', () => {
- it('should detect OAuth for servers that return 405 for HEAD and 401+Bearer for POST', async () => {
- // This mimics StackOverflow's actual behavior:
- // HEAD -> 405 Method Not Allowed
- // POST -> 401 with WWW-Authenticate: Bearer
+ describe('SSRF hardening of the resource_metadata hint', () => {
+ // A malicious MCP server can advertise a `resource_metadata=` URL that points at a
+ // private IP, loopback, or cloud metadata service. Blindly handing that URL to the
+ // SDK would let the server weaponize detection as an SSRF vector, so we validate it
+ // first and silently fall back to path-aware discovery on rejection.
+
+ it('drops a hint URL whose hostname matches an SSRF target list entry', async () => {
+ const maliciousHint = 'http://169.254.169.254/latest/meta-data/';
+ mockFetch.mockResolvedValueOnce({
+ status: 401,
+ headers: new Headers({
+ 'www-authenticate': `Bearer resource_metadata="${maliciousHint}"`,
+ }),
+ } as Response);
+
+ mockIsSSRFTarget.mockImplementation((hostname: string) => hostname === '169.254.169.254');
+
+ mockDiscoverOAuthProtectedResourceMetadata.mockResolvedValueOnce({
+ resource: 'https://mcp.example.com',
+ authorization_servers: ['https://auth.example.com'],
+ });
+
+ const result = await detectOAuthRequirement('https://mcp.example.com');
+ expect(mockDiscoverOAuthProtectedResourceMetadata).toHaveBeenCalledWith(
+ 'https://mcp.example.com',
+ expect.objectContaining({ resourceMetadataUrl: undefined }),
+ );
+ expect(result.requiresOAuth).toBe(true);
+ expect(result.method).toBe('protected-resource-metadata');
+ });
+
+ it('drops a hint URL whose hostname resolves to a private address', async () => {
+ const maliciousHint = 'https://internal.local/.well-known/oauth-protected-resource';
+ mockFetch.mockResolvedValueOnce({
+ status: 401,
+ headers: new Headers({
+ 'www-authenticate': `Bearer resource_metadata="${maliciousHint}"`,
+ }),
+ } as Response);
+
+ mockResolveHostnameSSRF.mockImplementation(
+ async (hostname: string) => hostname === 'internal.local',
+ );
+
+ mockDiscoverOAuthProtectedResourceMetadata.mockRejectedValueOnce(
+ new Error('Resource server does not implement OAuth 2.0 Protected Resource Metadata.'),
+ );
+
+ const result = await detectOAuthRequirement('https://mcp.example.com');
+
+ // Hint dropped → SDK called with undefined → path-aware falls through → Bearer-only.
+ expect(mockDiscoverOAuthProtectedResourceMetadata).toHaveBeenCalledWith(
+ 'https://mcp.example.com',
+ expect.objectContaining({ resourceMetadataUrl: undefined }),
+ );
+ expect(result.requiresOAuth).toBe(true);
+ expect(result.method).toBe('401-challenge-metadata');
+ expect(result.metadata).toBeNull();
+ });
+ });
+
+ describe('path-aware discovery without a hint', () => {
+ it('uses path-aware discovery when the server returns no 401 challenge', async () => {
+ // HEAD and POST both return 200 — no challenge, but the server may still advertise
+ // `.well-known/oauth-protected-resource` (uncommon but spec-allowed).
+ mockFetch.mockResolvedValue({
+ status: 200,
+ headers: new Headers(),
+ } as Response);
+
+ mockDiscoverOAuthProtectedResourceMetadata.mockResolvedValueOnce({
+ resource: 'https://mcp.example.com',
+ authorization_servers: ['https://auth.example.com'],
+ });
+
+ const result = await detectOAuthRequirement('https://mcp.example.com');
+
+ expect(result.requiresOAuth).toBe(true);
+ expect(result.method).toBe('protected-resource-metadata');
+ expect(mockDiscoverOAuthProtectedResourceMetadata).toHaveBeenCalledWith(
+ 'https://mcp.example.com',
+ expect.objectContaining({ resourceMetadataUrl: undefined }),
+ );
+ });
+ });
+
+ describe('StackOverflow-like server behavior', () => {
+ it('detects OAuth for servers that return 405 for HEAD and 401+Bearer for POST', async () => {
mockFetch
- // HEAD returns 405
.mockResolvedValueOnce({
status: 405,
headers: new Headers(),
} as Response)
- // POST returns 401 with Bearer
.mockResolvedValueOnce({
status: 401,
headers: new Headers({ 'www-authenticate': 'Bearer' }),
@@ -245,7 +396,7 @@ describe('detectOAuthRequirement', () => {
});
describe('error handling', () => {
- it('should return no OAuth required when all checks fail', async () => {
+ it('returns no OAuth required when all checks fail', async () => {
mockFetch.mockRejectedValue(new Error('Network error'));
const result = await detectOAuthRequirement('https://unreachable.example.com');
@@ -254,7 +405,7 @@ describe('detectOAuthRequirement', () => {
expect(result.method).toBe('no-metadata-found');
});
- it('should handle timeout gracefully', async () => {
+ it('handles timeout gracefully', async () => {
mockFetch.mockImplementation(
() => new Promise((_, reject) => setTimeout(() => reject(new Error('Timeout')), 100)),
);
diff --git a/packages/api/src/mcp/oauth/detectOAuth.ts b/packages/api/src/mcp/oauth/detectOAuth.ts
index 84d2066f4c0b..33391b181dd7 100644
--- a/packages/api/src/mcp/oauth/detectOAuth.ts
+++ b/packages/api/src/mcp/oauth/detectOAuth.ts
@@ -6,6 +6,8 @@
// Manual testing ensures the OAuth detection still works against real MCP servers.
import { discoverOAuthProtectedResourceMetadata } from '@modelcontextprotocol/sdk/client/auth.js';
+import { isSSRFTarget, resolveHostnameSSRF } from '~/auth';
+import { probeResourceMetadataHint } from './resourceHint';
import { mcpConfig } from '../mcpConfig';
export interface OAuthDetectionResult {
@@ -17,25 +19,65 @@ export interface OAuthDetectionResult {
/**
* Detects if an MCP server requires OAuth authentication using proactive discovery methods.
*
- * This function implements a comprehensive OAuth detection strategy:
- * 1. Standard Protected Resource Metadata (RFC 9728) - checks /.well-known/oauth-protected-resource
- * 2. 401 Challenge Method - checks WWW-Authenticate header for resource_metadata URL
- * 3. Optional fallback: treat any 401/403 response as OAuth requirement (if MCP_OAUTH_ON_AUTH_ERROR=true)
+ * Strategy (RFC 9728 §5.1 aligned):
+ * 1. Probe the server for a 401 challenge and extract the `resource_metadata` URL from
+ * the `WWW-Authenticate` header, if any.
+ * 2. Call the SDK's Protected Resource Metadata discovery. When the hint is present it
+ * overrides the path-aware well-known endpoint, matching the behavior of Claude
+ * Desktop / MCP Inspector / Copilot and avoiding stale path-aware metadata.
+ * 3. If no metadata was found but the server advertised `Bearer`, report OAuth-required
+ * without metadata (legacy servers without `.well-known` still need auth).
+ * 4. Optional fallback: treat any 401/403 as an OAuth requirement when
+ * `MCP_OAUTH_ON_AUTH_ERROR=true`.
*
* @param serverUrl - The MCP server URL to check for OAuth requirements
- * @returns Promise - OAuth requirement details
*/
export async function detectOAuthRequirement(serverUrl: string): Promise {
- const protectedResourceResult = await checkProtectedResourceMetadata(serverUrl);
- if (protectedResourceResult) return protectedResourceResult;
-
- const challengeResult = await check401ChallengeMetadata(serverUrl);
- if (challengeResult) return challengeResult;
+ const hint = await probeResourceMetadataHint(serverUrl);
+
+ /**
+ * The `resource_metadata` URL is attacker-controlled (it's echoed from the MCP
+ * server's own 401 challenge). Reject hints pointing at private/loopback/metadata
+ * addresses before the SDK fetches them, so a malicious server cannot weaponize
+ * detection as an SSRF vector against the LibreChat host or its internal network.
+ */
+ const safeHintUrl = hint?.resourceMetadataUrl
+ ? await validateHintUrl(hint.resourceMetadataUrl)
+ : undefined;
+
+ const metadataResult = await checkProtectedResourceMetadata(serverUrl, safeHintUrl);
+ if (metadataResult) return metadataResult;
+
+ if (hint?.bearerChallenge) {
+ return {
+ requiresOAuth: true,
+ method: '401-challenge-metadata',
+ metadata: null,
+ };
+ }
- const fallbackResult = await checkAuthErrorFallback(serverUrl);
- if (fallbackResult) return fallbackResult;
+ /**
+ * `MCP_OAUTH_ON_AUTH_ERROR` fallback: honor a 401/403 already observed by the HEAD
+ * probe instead of issuing a duplicate HEAD. POST-only 401/403 is intentionally
+ * excluded — WAF/CSRF rules commonly 403 a body-less JSON POST on endpoints that
+ * have nothing to do with OAuth, and those must not flip detection. A `null` probe
+ * means every attempt threw (transient network error); retry once via HEAD so a
+ * blip doesn't flip detection to "no OAuth required" for a server that needs it.
+ */
+ if (mcpConfig.OAUTH_ON_AUTH_ERROR) {
+ if (hint?.headAuthChallenge) {
+ return {
+ requiresOAuth: true,
+ method: 'no-metadata-found',
+ metadata: null,
+ };
+ }
+ if (hint === null) {
+ const fallbackResult = await checkAuthErrorFallback(serverUrl);
+ if (fallbackResult) return fallbackResult;
+ }
+ }
- // No OAuth detected
return {
requiresOAuth: false,
method: 'no-metadata-found',
@@ -47,18 +89,26 @@ export async function detectOAuthRequirement(serverUrl: string): Promise {
try {
- const resourceMetadata = await discoverOAuthProtectedResourceMetadata(serverUrl);
+ const resourceMetadata = await discoverOAuthProtectedResourceMetadata(serverUrl, {
+ resourceMetadataUrl,
+ });
if (!resourceMetadata?.authorization_servers?.length) return null;
return {
requiresOAuth: true,
- method: 'protected-resource-metadata',
+ method: resourceMetadataUrl ? '401-challenge-metadata' : 'protected-resource-metadata',
metadata: resourceMetadata,
};
} catch {
@@ -67,90 +117,26 @@ async function checkProtectedResourceMetadata(
}
/**
- * Checks for OAuth using 401 challenge with resource metadata URL or Bearer token.
- * Tries HEAD first, then falls back to POST if HEAD doesn't return 401.
- * Some servers (like StackOverflow) only return 401 for POST requests.
+ * SSRF-guards an attacker-controlled `resource_metadata` hint before the SDK follows it.
+ * `detectOAuthRequirement` runs without admin-scoped `allowedDomains`, so the rejection
+ * policy here is stricter than the handler's: any private/loopback/metadata-service
+ * target is dropped, regardless of origin relative to the MCP server. On rejection the
+ * caller continues with path-aware discovery (safe, since it targets the server itself).
*/
-async function check401ChallengeMetadata(serverUrl: string): Promise {
- // Try HEAD first (lighter weight)
- const headResult = await check401WithMethod(serverUrl, 'HEAD');
- if (headResult) return headResult;
-
- // Fall back to POST if HEAD didn't return 401 (some servers don't support HEAD)
- const postResult = await check401WithMethod(serverUrl, 'POST');
- if (postResult) return postResult;
-
- return null;
-}
-
-async function check401WithMethod(
- serverUrl: string,
- method: 'HEAD' | 'POST',
-): Promise {
+async function validateHintUrl(hintUrl: URL): Promise {
try {
- const fetchOptions: RequestInit = {
- method,
- signal: AbortSignal.timeout(mcpConfig.OAUTH_DETECTION_TIMEOUT),
- };
-
- // POST requests need headers and body for MCP servers
- if (method === 'POST') {
- fetchOptions.headers = { 'Content-Type': 'application/json' };
- fetchOptions.body = JSON.stringify({});
- }
-
- const response = await fetch(serverUrl, fetchOptions);
-
- if (response.status !== 401) return null;
-
- const wwwAuth = response.headers.get('www-authenticate');
- const metadataUrl = wwwAuth?.match(/resource_metadata="([^"]+)"/)?.[1];
-
- if (metadataUrl) {
- try {
- // Try to fetch resource metadata from the provided URL
- const metadataResponse = await fetch(metadataUrl, {
- signal: AbortSignal.timeout(mcpConfig.OAUTH_DETECTION_TIMEOUT),
- });
- const metadata = await metadataResponse.json();
-
- if (metadata?.authorization_servers?.length) {
- return {
- requiresOAuth: true,
- method: '401-challenge-metadata',
- metadata,
- };
- }
- } catch {
- // Metadata fetch failed, continue to Bearer check below
- }
- }
-
- /**
- * If we got a 401 with WWW-Authenticate containing "Bearer" (case-insensitive),
- * the server requires OAuth authentication even without discovery metadata.
- * This handles "legacy" OAuth servers (like StackOverflow's MCP) that use standard
- * OAuth endpoints (/authorize, /token, /register) without .well-known metadata.
- */
- if (wwwAuth && /bearer/i.test(wwwAuth)) {
- return {
- requiresOAuth: true,
- method: '401-challenge-metadata',
- metadata: null,
- };
- }
-
- return null;
+ if (isSSRFTarget(hintUrl.hostname)) return undefined;
+ if (await resolveHostnameSSRF(hintUrl.hostname)) return undefined;
+ return hintUrl;
} catch {
- return null;
+ // If validation itself fails (e.g. DNS lookup threw), be conservative and drop the hint.
+ return undefined;
}
}
-// Fallback method: treats any auth error as OAuth requirement if configured
+// Fallback: only called when probing threw. Caller already gates on `OAUTH_ON_AUTH_ERROR`.
async function checkAuthErrorFallback(serverUrl: string): Promise {
try {
- if (!mcpConfig.OAUTH_ON_AUTH_ERROR) return null;
-
const response = await fetch(serverUrl, {
method: 'HEAD',
signal: AbortSignal.timeout(mcpConfig.OAUTH_DETECTION_TIMEOUT),
diff --git a/packages/api/src/mcp/oauth/handler.ts b/packages/api/src/mcp/oauth/handler.ts
index ccca5b1945b3..acd31a1fca95 100644
--- a/packages/api/src/mcp/oauth/handler.ts
+++ b/packages/api/src/mcp/oauth/handler.ts
@@ -2,6 +2,10 @@ import { randomBytes } from 'crypto';
import { logger } from '@librechat/data-schemas';
import { FetchLike } from '@modelcontextprotocol/sdk/shared/transport';
import { OAuthMetadataSchema } from '@modelcontextprotocol/sdk/shared/auth.js';
+import {
+ checkResourceAllowed,
+ resourceUrlFromServerUrl,
+} from '@modelcontextprotocol/sdk/shared/auth-utils.js';
import {
registerClient,
startAuthorization,
@@ -26,6 +30,7 @@ import {
inferClientAuthMethod,
} from './methods';
import { isSSRFTarget, resolveHostnameSSRF, isOAuthUrlAllowed } from '~/auth';
+import { probeResourceMetadataHint } from './resourceHint';
import { MCPTokenStorage } from './tokens';
import { sanitizeUrlForLogging } from '~/mcp/utils';
@@ -140,14 +145,73 @@ export class MCPOAuthHandler {
const fetchFn = this.createOAuthFetch(oauthHeaders);
+ /**
+ * RFC 9728 §5.1: when the server's 401 `WWW-Authenticate` header advertises a
+ * `resource_metadata` URL, use that URL as the authoritative source. Path-aware
+ * `.well-known` discovery is a fallback for when the hint is absent — not the
+ * other way round — or a split deployment can serve stale/wrong metadata at the
+ * path-aware endpoint and strand the flow at a defunct authorization server.
+ *
+ * Reuse `fetchFn` so admin-configured `oauthHeaders` (e.g. a gateway API key
+ * required to reach the MCP endpoint at all) are attached to the probe — without
+ * them, the probe would 401 for the wrong reason and never see the real challenge.
+ */
+ const hint = await probeResourceMetadataHint(serverUrl, fetchFn);
+ /**
+ * The hint URL is attacker-controlled (it comes from the MCP server's own 401
+ * challenge). Validate it through the same SSRF/allowedDomains gate used for the
+ * authorization server — otherwise a malicious server could redirect discovery at
+ * a private IP, the metadata service, or a host the admin never intended to reach.
+ * On validation failure, discard the hint and fall back to path-aware discovery.
+ */
+ let hintUrl: URL | undefined;
+ if (hint?.resourceMetadataUrl) {
+ try {
+ await this.validateOAuthUrl(
+ hint.resourceMetadataUrl.toString(),
+ 'resource_metadata',
+ allowedDomains,
+ );
+ hintUrl = hint.resourceMetadataUrl;
+ logger.debug(
+ `[MCPOAuth] Using resource_metadata URL from WWW-Authenticate: ${sanitizeUrlForLogging(hintUrl.toString())}`,
+ );
+ } catch (error) {
+ logger.warn(
+ `[MCPOAuth] Rejecting untrusted resource_metadata hint from ${sanitizeUrlForLogging(serverUrl)}; falling back to path-aware discovery`,
+ { error },
+ );
+ }
+ }
+
try {
- // Try to discover resource metadata first
logger.debug(
- `[MCPOAuth] Attempting to discover protected resource metadata from ${serverUrl}`,
+ `[MCPOAuth] Attempting to discover protected resource metadata from ${sanitizeUrlForLogging(serverUrl)}`,
+ );
+ resourceMetadata = await discoverOAuthProtectedResourceMetadata(
+ serverUrl,
+ { resourceMetadataUrl: hintUrl },
+ fetchFn,
);
- resourceMetadata = await discoverOAuthProtectedResourceMetadata(serverUrl, {}, fetchFn);
+ } catch (error) {
+ logger.debug('[MCPOAuth] Resource metadata discovery failed, continuing with server URL', {
+ error,
+ });
+ }
- if (resourceMetadata?.authorization_servers?.length) {
+ if (resourceMetadata) {
+ /**
+ * RFC 9728 §3.3 / §7.3: the `resource` identifier in a Protected Resource Metadata
+ * document MUST match the URL the client used to fetch it. Without this check a
+ * malicious MCP server can impersonate a legitimate one by advertising the real
+ * server's resource URL plus the real server's authorization server, causing tokens
+ * minted for the real server to be sent to the attacker (GHSA-gvpj-vm2f-2m23).
+ * On mismatch, discard the entire document: `authorization_servers` and any other
+ * field on it are equally untrustworthy.
+ */
+ this.assertResourceBoundToServer(serverUrl, resourceMetadata);
+
+ if (resourceMetadata.authorization_servers?.length) {
const discoveredAuthServer = resourceMetadata.authorization_servers[0];
await this.validateOAuthUrl(discoveredAuthServer, 'authorization_server', allowedDomains);
authServerUrl = new URL(discoveredAuthServer);
@@ -157,10 +221,6 @@ export class MCPOAuthHandler {
} else {
logger.debug(`[MCPOAuth] No authorization servers found in resource metadata`);
}
- } catch (error) {
- logger.debug('[MCPOAuth] Resource metadata discovery failed, continuing with server URL', {
- error,
- });
}
// Discover OAuth metadata
@@ -586,25 +646,28 @@ export class MCPOAuthHandler {
authorizationUrl.searchParams.set('state', state);
logger.debug(`[MCPOAuth] Added state parameter to authorization URL`);
- if (resourceMetadata?.resource != null && resourceMetadata.resource) {
- try {
- const canonicalResource = new URL(resourceMetadata.resource).href;
- authorizationUrl.searchParams.set('resource', canonicalResource);
- logger.debug(
- `[MCPOAuth] Added resource parameter to authorization URL: ${canonicalResource}`,
- );
- } catch (error) {
- authorizationUrl.searchParams.set('resource', resourceMetadata.resource);
- logger.error(
- `[MCPOAuth] Invalid resource URL from metadata for ${serverName}: ` +
- `'${resourceMetadata.resource}'. Using raw value as fallback.`,
- error,
- );
- }
+ if (resourceMetadata?.resource) {
+ /**
+ * `resource` was already canonicalized and bound to `serverUrl` inside
+ * {@link discoverMetadata} via {@link assertResourceBoundToServer}, so `new URL`
+ * here cannot throw and the value is safe to echo back to the authorization server.
+ */
+ const canonicalResource = new URL(resourceMetadata.resource).href;
+ authorizationUrl.searchParams.set('resource', canonicalResource);
+ logger.debug(
+ `[MCPOAuth] Added resource parameter to authorization URL: ${canonicalResource}`,
+ );
} else {
+ /**
+ * Reachable only when `discoverOAuthProtectedResourceMetadata` did not return a
+ * document (404 / network error / server does not implement RFC 9728). If a PRM
+ * document exists but is missing `resource`, {@link assertResourceBoundToServer}
+ * rejects it before this code runs, so this branch does not warn about a
+ * malformed document — it warns about the absence of one.
+ */
logger.warn(
- `[MCPOAuth] Resource metadata missing 'resource' property for ${serverName}. ` +
- 'This can cause issues with some Authorization Servers who expect a "resource" parameter.',
+ `[MCPOAuth] No protected resource metadata available for ${serverName}. ` +
+ 'This can cause issues with some Authorization Servers that expect a "resource" parameter.',
);
}
} catch (error) {
@@ -677,17 +740,19 @@ export class MCPOAuthHandler {
}
let resource: URL | undefined;
- try {
- if (metadata.resourceMetadata?.resource != null && metadata.resourceMetadata.resource) {
+ if (metadata.resourceMetadata) {
+ /**
+ * Defense-in-depth: re-assert the RFC 9728 §3.3 binding against the flow's stored
+ * server URL. Flow state has a 10-minute TTL, so a flow initiated under older
+ * (pre-fix) code could still be in-flight at upgrade time carrying unvalidated
+ * resource metadata. Re-validating here closes that window without requiring ops
+ * teams to flush flow state on deploy (GHSA-gvpj-vm2f-2m23).
+ */
+ this.assertResourceBoundToServer(metadata.serverUrl, metadata.resourceMetadata);
+ if (metadata.resourceMetadata.resource) {
resource = new URL(metadata.resourceMetadata.resource);
logger.debug(`[MCPOAuth] Resource URL for flow ${flowId}: ${resource.toString()}`);
}
- } catch (error) {
- logger.warn(
- `[MCPOAuth] Invalid resource URL format for flow ${flowId}: '${metadata.resourceMetadata!.resource}'. ` +
- `Error: ${error instanceof Error ? error.message : 'Unknown error'}. Proceeding without resource parameter.`,
- );
- resource = undefined;
}
const tokens = await exchangeAuthorization(metadata.serverUrl, {
@@ -755,6 +820,48 @@ export class MCPOAuthHandler {
return randomBytes(32).toString('base64url');
}
+ /**
+ * Enforces RFC 9728 §3.3 / §7.3: the `resource` identifier advertised by an OAuth
+ * Protected Resource Metadata document MUST match the URL the client used to fetch
+ * the document. A mismatch means the metadata is attacker-controlled (or the server
+ * is badly misconfigured); per the RFC the whole document MUST be discarded, and in
+ * practice we must fail the OAuth flow because `authorization_servers` on the same
+ * document is also untrustworthy and was the primary theft vector in
+ * GHSA-gvpj-vm2f-2m23.
+ *
+ * Uses the MCP SDK's own {@link checkResourceAllowed} so the semantics (same origin
+ * plus configured-path-prefix) match what the SDK enforces internally via
+ * {@link selectResourceURL}, a code path LibreChat does not go through.
+ */
+ private static assertResourceBoundToServer(
+ serverUrl: string,
+ resourceMetadata: OAuthProtectedResourceMetadata,
+ ): void {
+ if (!resourceMetadata.resource) {
+ throw new Error(
+ `[MCPOAuth] Protected Resource Metadata from ${sanitizeUrlForLogging(serverUrl)} is missing the required 'resource' identifier (RFC 9728 §2). Refusing OAuth flow.`,
+ );
+ }
+
+ let allowed = false;
+ try {
+ allowed = checkResourceAllowed({
+ requestedResource: resourceUrlFromServerUrl(serverUrl),
+ configuredResource: resourceMetadata.resource,
+ });
+ } catch (error) {
+ throw new Error(
+ `[MCPOAuth] Unable to validate Protected Resource Metadata 'resource' for ${sanitizeUrlForLogging(serverUrl)}: ${error instanceof Error ? error.message : String(error)}.`,
+ );
+ }
+
+ if (!allowed) {
+ throw new Error(
+ `[MCPOAuth] Protected Resource Metadata 'resource' (${sanitizeUrlForLogging(resourceMetadata.resource)}) does not match server URL (${sanitizeUrlForLogging(serverUrl)}). Refusing OAuth flow (RFC 9728 §3.3).`,
+ );
+ }
+ }
+
/**
* Validates an OAuth URL is not targeting a private/internal address.
* Skipped when the full URL (hostname + protocol + port) matches an admin-trusted
diff --git a/packages/api/src/mcp/oauth/resourceHint.test.ts b/packages/api/src/mcp/oauth/resourceHint.test.ts
new file mode 100644
index 000000000000..3ab1f214ed37
--- /dev/null
+++ b/packages/api/src/mcp/oauth/resourceHint.test.ts
@@ -0,0 +1,260 @@
+import { probeResourceMetadataHint } from './resourceHint';
+
+jest.mock('@modelcontextprotocol/sdk/client/auth.js', () => ({
+ ...jest.requireActual('@modelcontextprotocol/sdk/client/auth.js'),
+}));
+
+describe('probeResourceMetadataHint', () => {
+ const originalFetch = global.fetch;
+ const mockFetch = jest.fn() as unknown as jest.MockedFunction;
+
+ beforeEach(() => {
+ jest.clearAllMocks();
+ global.fetch = mockFetch;
+ });
+
+ afterAll(() => {
+ global.fetch = originalFetch;
+ });
+
+ it('returns the resource_metadata URL from a HEAD 401 challenge', async () => {
+ const hintUrl = 'https://example.com/.well-known/oauth-protected-resource';
+ mockFetch.mockResolvedValueOnce({
+ status: 401,
+ headers: new Headers({
+ 'www-authenticate': `Bearer resource_metadata="${hintUrl}"`,
+ }),
+ } as Response);
+
+ const result = await probeResourceMetadataHint('https://example.com/mcp');
+
+ expect(result).toEqual({
+ resourceMetadataUrl: new URL(hintUrl),
+ bearerChallenge: true,
+ headAuthChallenge: true,
+ });
+ expect(mockFetch).toHaveBeenCalledTimes(1);
+ expect(mockFetch.mock.calls[0][1]).toEqual(expect.objectContaining({ method: 'HEAD' }));
+ });
+
+ it('falls back to POST when HEAD does not return 401', async () => {
+ const hintUrl = 'https://example.com/.well-known/oauth-protected-resource';
+ mockFetch
+ .mockResolvedValueOnce({ status: 405, headers: new Headers() } as Response)
+ .mockResolvedValueOnce({
+ status: 401,
+ headers: new Headers({
+ 'www-authenticate': `Bearer resource_metadata="${hintUrl}"`,
+ }),
+ } as Response);
+
+ const result = await probeResourceMetadataHint('https://example.com/mcp');
+
+ expect(result?.resourceMetadataUrl?.toString()).toBe(hintUrl);
+ // POST's 401 must not set headAuthChallenge — HEAD was 405.
+ expect(result?.headAuthChallenge).toBe(false);
+ expect(mockFetch).toHaveBeenCalledTimes(2);
+ expect(mockFetch.mock.calls[1][1]).toEqual(
+ expect.objectContaining({
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({}),
+ }),
+ );
+ });
+
+ it('still probes POST when HEAD returned Bearer without a hint', async () => {
+ // HEAD 401 with Bearer-but-no-params means the server definitely speaks OAuth, but
+ // some implementations surface `resource_metadata` only on POST responses. Letting
+ // POST run ensures the authoritative hint isn't dropped just because HEAD was first.
+ const hintUrl = 'https://example.com/.well-known/oauth-protected-resource';
+ mockFetch
+ .mockResolvedValueOnce({
+ status: 401,
+ headers: new Headers({ 'www-authenticate': 'Bearer' }),
+ } as Response)
+ .mockResolvedValueOnce({
+ status: 401,
+ headers: new Headers({
+ 'www-authenticate': `Bearer resource_metadata="${hintUrl}"`,
+ }),
+ } as Response);
+
+ const result = await probeResourceMetadataHint('https://example.com/mcp');
+
+ expect(result?.resourceMetadataUrl?.toString()).toBe(hintUrl);
+ expect(result?.headAuthChallenge).toBe(true);
+ expect(mockFetch).toHaveBeenCalledTimes(2);
+ });
+
+ it('marks bearerChallenge even when no resource_metadata is advertised on either method', async () => {
+ mockFetch.mockResolvedValue({
+ status: 401,
+ headers: new Headers({ 'www-authenticate': 'Bearer realm="api"' }),
+ } as Response);
+
+ const result = await probeResourceMetadataHint('https://example.com/mcp');
+
+ expect(result).toEqual({
+ resourceMetadataUrl: undefined,
+ bearerChallenge: true,
+ headAuthChallenge: true,
+ });
+ // HEAD Bearer without hint → POST runs too.
+ expect(mockFetch).toHaveBeenCalledTimes(2);
+ });
+
+ it('ignores a malformed resource_metadata value without throwing', async () => {
+ // Defense: if the server advertises garbage in `resource_metadata=`, both the SDK
+ // parser and our regex fallback wrap `new URL()` in try/catch and yield `undefined`.
+ // Guard this behavior so a future refactor can't silently drop the safety net.
+ mockFetch.mockResolvedValueOnce({
+ status: 401,
+ headers: new Headers({
+ 'www-authenticate': 'Bearer resource_metadata="not-a-url"',
+ }),
+ } as Response);
+ mockFetch.mockResolvedValueOnce({
+ status: 401,
+ headers: new Headers({
+ 'www-authenticate': 'Bearer resource_metadata="not-a-url"',
+ }),
+ } as Response);
+
+ const result = await probeResourceMetadataHint('https://example.com/mcp');
+
+ expect(result).toEqual({
+ resourceMetadataUrl: undefined,
+ bearerChallenge: true,
+ headAuthChallenge: true,
+ });
+ });
+
+ it('extracts resource_metadata from multi-scheme challenges where Bearer is not first', async () => {
+ // RFC 7235 allows multiple schemes in one header. The SDK's `extractWWWAuthenticateParams`
+ // only parses the leading token, so a header like `Basic realm="api", Bearer resource_metadata="..."`
+ // would drop the authoritative hint — hence the local regex fallback.
+ const hintUrl = 'https://example.com/.well-known/oauth-protected-resource';
+ mockFetch.mockResolvedValueOnce({
+ status: 401,
+ headers: new Headers({
+ 'www-authenticate': `Basic realm="api", Bearer resource_metadata="${hintUrl}"`,
+ }),
+ } as Response);
+
+ const result = await probeResourceMetadataHint('https://example.com/mcp');
+
+ expect(result?.resourceMetadataUrl?.toString()).toBe(hintUrl);
+ expect(result?.bearerChallenge).toBe(true);
+ expect(result?.headAuthChallenge).toBe(true);
+ });
+
+ it('returns a no-challenge result when both probes receive clean 200s', async () => {
+ mockFetch.mockResolvedValue({ status: 200, headers: new Headers() } as Response);
+
+ const result = await probeResourceMetadataHint('https://example.com/mcp');
+
+ expect(result).toEqual({
+ resourceMetadataUrl: undefined,
+ bearerChallenge: false,
+ headAuthChallenge: false,
+ });
+ expect(mockFetch).toHaveBeenCalledTimes(2);
+ });
+
+ it('returns null when the probe itself throws (e.g. network error)', async () => {
+ mockFetch.mockRejectedValue(new Error('Network error'));
+
+ const result = await probeResourceMetadataHint('https://example.com/mcp');
+
+ expect(result).toBeNull();
+ });
+
+ it('uses the injected fetchFn so admin-configured oauthHeaders reach the probe', async () => {
+ // Simulates an MCP endpoint fronted by a gateway that requires a static API key
+ // header — without it, the gateway 401s before the MCP app ever sees the request,
+ // so the probe needs the OAuth-aware fetch wrapper to attach that header.
+ const customFetch = jest.fn(async () => {
+ return {
+ status: 401,
+ headers: new Headers({ 'www-authenticate': 'Bearer' }),
+ } as Response;
+ }) as unknown as typeof fetch;
+
+ const result = await probeResourceMetadataHint('https://example.com/mcp', customFetch);
+
+ expect(result).toEqual({
+ resourceMetadataUrl: undefined,
+ bearerChallenge: true,
+ headAuthChallenge: true,
+ });
+ // HEAD + POST both via customFetch (Bearer-no-hint doesn't short-circuit).
+ expect(customFetch).toHaveBeenCalledTimes(2);
+ expect(mockFetch).not.toHaveBeenCalled();
+ });
+
+ it('surfaces headAuthChallenge when a non-Bearer 401 is the only response', async () => {
+ // Basic-only 401 carries no OAuth hint, but callers still need to know a 401 was
+ // seen so the MCP_OAUTH_ON_AUTH_ERROR fallback can fire without a duplicate HEAD.
+ mockFetch.mockResolvedValueOnce({
+ status: 401,
+ headers: new Headers({ 'www-authenticate': 'Basic realm="api"' }),
+ } as Response);
+ mockFetch.mockResolvedValueOnce({
+ status: 401,
+ headers: new Headers({ 'www-authenticate': 'Basic realm="api"' }),
+ } as Response);
+
+ const result = await probeResourceMetadataHint('https://example.com/mcp');
+
+ expect(result).toEqual({
+ resourceMetadataUrl: undefined,
+ bearerChallenge: false,
+ headAuthChallenge: true,
+ });
+ expect(mockFetch).toHaveBeenCalledTimes(2);
+ });
+
+ it('surfaces headAuthChallenge when only a 403 is observed on HEAD', async () => {
+ mockFetch.mockResolvedValue({ status: 403, headers: new Headers() } as Response);
+
+ const result = await probeResourceMetadataHint('https://example.com/mcp');
+
+ expect(result).toEqual({
+ resourceMetadataUrl: undefined,
+ bearerChallenge: false,
+ headAuthChallenge: true,
+ });
+ });
+
+ it('returns null when HEAD threw so callers can retry via the fallback', async () => {
+ // A transient HEAD failure followed by an uninformative POST used to leak a
+ // {bearerChallenge: false, headAuthChallenge: false} result, silently skipping the
+ // MCP_OAUTH_ON_AUTH_ERROR retry. Signal "unknown" via `null` instead.
+ mockFetch
+ .mockRejectedValueOnce(new Error('ETIMEDOUT'))
+ .mockResolvedValueOnce({ status: 200, headers: new Headers() } as Response);
+
+ const result = await probeResourceMetadataHint('https://example.com/mcp');
+
+ expect(result).toBeNull();
+ expect(mockFetch).toHaveBeenCalledTimes(2);
+ });
+
+ it('does not set headAuthChallenge when only POST returns 401/403 (WAF/CSRF case)', async () => {
+ // Classic WAF/CSRF posture: HEAD cleanly returns 200, but a body-less JSON POST
+ // trips a rule and gets 403. This is not an OAuth signal and must not flip the
+ // `MCP_OAUTH_ON_AUTH_ERROR` fallback, so `headAuthChallenge` stays false.
+ mockFetch
+ .mockResolvedValueOnce({ status: 200, headers: new Headers() } as Response)
+ .mockResolvedValueOnce({ status: 403, headers: new Headers() } as Response);
+
+ const result = await probeResourceMetadataHint('https://example.com/mcp');
+
+ expect(result).toEqual({
+ resourceMetadataUrl: undefined,
+ bearerChallenge: false,
+ headAuthChallenge: false,
+ });
+ });
+});
diff --git a/packages/api/src/mcp/oauth/resourceHint.ts b/packages/api/src/mcp/oauth/resourceHint.ts
new file mode 100644
index 000000000000..66eb66ce36dc
--- /dev/null
+++ b/packages/api/src/mcp/oauth/resourceHint.ts
@@ -0,0 +1,139 @@
+import { extractWWWAuthenticateParams } from '@modelcontextprotocol/sdk/client/auth.js';
+import type { FetchLike } from '@modelcontextprotocol/sdk/shared/transport';
+import { mcpConfig } from '../mcpConfig';
+
+export interface ResourceHintProbeResult {
+ /** URL advertised via the `resource_metadata` parameter of a `WWW-Authenticate: Bearer` header, if any. */
+ resourceMetadataUrl?: URL;
+ /** True when the server answered 401 with a `WWW-Authenticate: Bearer` challenge (with or without parameters). */
+ bearerChallenge: boolean;
+ /**
+ * True when the *HEAD* probe specifically returned 401 or 403. Matches the semantics
+ * of the legacy `MCP_OAUTH_ON_AUTH_ERROR` HEAD-only fallback so the caller can skip a
+ * redundant HEAD. POST-only 401/403 is intentionally excluded — servers commonly 403
+ * a body-less JSON POST for WAF/CSRF reasons unrelated to OAuth, and those should not
+ * flip the fallback.
+ */
+ headAuthChallenge: boolean;
+}
+
+/**
+ * Probes an MCP server for an OAuth 401 challenge and extracts the RFC 6750
+ * `WWW-Authenticate` `resource_metadata` hint. Per RFC 9728 §5.1, clients SHOULD prefer
+ * that URL over path-aware well-known discovery; the value returned here is meant to be
+ * threaded into `discoverOAuthProtectedResourceMetadata` as `opts.resourceMetadataUrl`.
+ *
+ * Tries HEAD first (cheap). POST still runs unless HEAD already delivered the hint —
+ * some servers surface their Bearer challenge parameters only on POST (e.g. StackOverflow),
+ * so a HEAD-Bearer-without-hint is not enough to short-circuit discovery.
+ *
+ * When `fetchFn` is supplied (for example, the OAuth-aware wrapper built by the handler)
+ * it is used for both probes so admin-configured `oauthHeaders` are attached — a gateway
+ * that requires a static API key to reach the MCP endpoint would otherwise 401 us for the
+ * wrong reason and never surface the real Bearer challenge.
+ *
+ * @returns A `ResourceHintProbeResult` when at least one probe response was observed, or
+ * `null` when every attempt threw (DNS failure, timeout, etc.). Callers should treat
+ * `null` as "status unknown" and can choose to retry.
+ */
+export async function probeResourceMetadataHint(
+ serverUrl: string,
+ fetchFn: FetchLike = fetch,
+): Promise {
+ const headResult = await probeWithMethod(serverUrl, 'HEAD', fetchFn);
+ /**
+ * Only short-circuit when HEAD already produced the authoritative hint. A Bearer
+ * challenge without `resource_metadata` is not enough: some servers emit the
+ * `resource_metadata` parameter only on POST responses, and we'd miss it by
+ * bailing here.
+ */
+ if (headResult?.resourceMetadataUrl) return headResult;
+
+ const postResult = await probeWithMethod(serverUrl, 'POST', fetchFn);
+ if (postResult?.resourceMetadataUrl || postResult?.bearerChallenge) {
+ // Carry HEAD's auth-challenge observation forward if we got one — the fallback
+ // decision is HEAD-only, so POST must not overwrite it back to false.
+ return { ...postResult, headAuthChallenge: !!headResult?.headAuthChallenge };
+ }
+
+ /**
+ * Invariant for callers: a non-null return means the HEAD probe actually observed
+ * the server. If HEAD threw (DNS, timeout, reset), signal "unknown" with `null` so
+ * the `MCP_OAUTH_ON_AUTH_ERROR` fallback can still retry a fresh HEAD — otherwise a
+ * transient HEAD failure plus a normal POST 200 would silently skip the fallback
+ * and misclassify an OAuth-required server as open.
+ */
+ if (!headResult) return null;
+ if (postResult) return mergeProbes(headResult, postResult);
+ return headResult;
+}
+
+function mergeProbes(
+ head: ResourceHintProbeResult,
+ post: ResourceHintProbeResult,
+): ResourceHintProbeResult {
+ return {
+ resourceMetadataUrl: head.resourceMetadataUrl ?? post.resourceMetadataUrl,
+ bearerChallenge: head.bearerChallenge || post.bearerChallenge,
+ /**
+ * Only HEAD's observation feeds the fallback decision — POST-only 401/403 is too
+ * noisy a signal (WAF/CSRF rules routinely 403 a body-less JSON POST on endpoints
+ * that are not OAuth-protected at all).
+ */
+ headAuthChallenge: head.headAuthChallenge,
+ };
+}
+
+async function probeWithMethod(
+ serverUrl: string,
+ method: 'HEAD' | 'POST',
+ fetchFn: FetchLike,
+): Promise {
+ try {
+ const fetchOptions: RequestInit = {
+ method,
+ signal: AbortSignal.timeout(mcpConfig.OAUTH_DETECTION_TIMEOUT),
+ };
+
+ if (method === 'POST') {
+ fetchOptions.headers = { 'Content-Type': 'application/json' };
+ fetchOptions.body = JSON.stringify({});
+ }
+
+ const response = await fetchFn(serverUrl, fetchOptions);
+ const headAuthChallenge =
+ method === 'HEAD' && (response.status === 401 || response.status === 403);
+
+ if (response.status !== 401) {
+ return { bearerChallenge: false, headAuthChallenge };
+ }
+
+ const wwwAuth = response.headers.get('www-authenticate');
+ if (!wwwAuth) return { bearerChallenge: false, headAuthChallenge };
+
+ const bearerChallenge = /bearer/i.test(wwwAuth);
+
+ /**
+ * The SDK's `extractWWWAuthenticateParams` checks only the *first* token of the
+ * header and returns `{}` for multi-scheme challenges like
+ * `Basic realm="api", Bearer resource_metadata="..."`. Fall back to our own regex
+ * across the whole header so those servers' authoritative hint isn't dropped.
+ */
+ const sdkParsed = extractWWWAuthenticateParams(response);
+ const resourceMetadataUrl = sdkParsed.resourceMetadataUrl ?? extractHintFromHeader(wwwAuth);
+
+ return { resourceMetadataUrl, bearerChallenge, headAuthChallenge };
+ } catch {
+ return null;
+ }
+}
+
+function extractHintFromHeader(header: string): URL | undefined {
+ const match = /resource_metadata="([^"]+)"/.exec(header);
+ if (!match?.[1]) return undefined;
+ try {
+ return new URL(match[1]);
+ } catch {
+ return undefined;
+ }
+}
diff --git a/packages/data-provider/src/file-config.ts b/packages/data-provider/src/file-config.ts
index 2a29b46943fd..9344884789c6 100644
--- a/packages/data-provider/src/file-config.ts
+++ b/packages/data-provider/src/file-config.ts
@@ -45,6 +45,7 @@ export const fullMimeTypesList = [
'text/x-tex',
'text/plain',
'text/css',
+ 'text/calendar',
'text/vtt',
'image/jpeg',
'text/javascript',
@@ -109,6 +110,7 @@ export const codeInterpreterMimeTypesList = [
'text/x-tex',
'text/plain',
'text/css',
+ 'text/calendar',
'image/jpeg',
'text/javascript',
'image/gif',
@@ -180,7 +182,7 @@ export const excelMimeTypes =
/^application\/(vnd\.ms-excel|msexcel|x-msexcel|x-ms-excel|x-excel|x-dos_ms_excel|xls|x-xls|vnd\.openxmlformats-officedocument\.spreadsheetml\.sheet)$/;
export const textMimeTypes =
- /^(text\/(x-c|x-csharp|tab-separated-values|x-c\+\+|x-h|x-java|html|markdown|x-php|x-python|x-script\.python|x-ruby|x-tex|plain|css|vtt|javascript|csv|xml))$/;
+ /^(text\/(x-c|x-csharp|tab-separated-values|x-c\+\+|x-h|x-java|html|markdown|x-php|x-python|x-script\.python|x-ruby|x-tex|plain|css|vtt|javascript|csv|xml|calendar))$/;
export const applicationMimeTypes =
/^(application\/(epub\+zip|csv|json|msword|pdf|x-tar|x-sh|typescript|sql|yaml|x-parquet|vnd\.apache\.parquet|vnd\.coffeescript|vnd\.openxmlformats-officedocument\.(wordprocessingml\.document|presentationml\.presentation|spreadsheetml\.sheet)|vnd\.oasis\.opendocument\.(text|spreadsheet|presentation|graphics)|xml|zip))$/;
@@ -351,6 +353,10 @@ export const codeTypeMapping: { [key: string]: string } = {
ods: 'application/vnd.oasis.opendocument.spreadsheet', // .ods - OpenDocument Spreadsheet
odp: 'application/vnd.oasis.opendocument.presentation', // .odp - OpenDocument Presentation
odg: 'application/vnd.oasis.opendocument.graphics', // .odg - OpenDocument Graphics
+ ics: 'text/calendar', // .ics - iCalendar
+ ical: 'text/calendar', // .ical - iCalendar
+ ifb: 'text/calendar', // .ifb - iCalendar free/busy
+ icalendar: 'text/calendar', // .icalendar - iCalendar
};
/** Maps image extensions to MIME types for formats browsers may not recognize */
diff --git a/packages/data-schemas/src/methods/agent.spec.ts b/packages/data-schemas/src/methods/agent.spec.ts
index 3184f51fa1e7..0c7042cde16b 100644
--- a/packages/data-schemas/src/methods/agent.spec.ts
+++ b/packages/data-schemas/src/methods/agent.spec.ts
@@ -51,6 +51,7 @@ let deleteUserAgents: AgentMethods['deleteUserAgents'];
let revertAgentVersion: AgentMethods['revertAgentVersion'];
let addAgentResourceFile: AgentMethods['addAgentResourceFile'];
let removeAgentResourceFiles: AgentMethods['removeAgentResourceFiles'];
+let removeAgentResourceFilesFromAllAgents: AgentMethods['removeAgentResourceFilesFromAllAgents'];
let getListAgentsByAccess: AgentMethods['getListAgentsByAccess'];
let generateActionMetadataHash: AgentMethods['generateActionMetadataHash'];
@@ -93,6 +94,7 @@ beforeAll(async () => {
revertAgentVersion = methods.revertAgentVersion;
addAgentResourceFile = methods.addAgentResourceFile;
removeAgentResourceFiles = methods.removeAgentResourceFiles;
+ removeAgentResourceFilesFromAllAgents = methods.removeAgentResourceFilesFromAllAgents;
getListAgentsByAccess = methods.getListAgentsByAccess;
generateActionMetadataHash = methods.generateActionMetadataHash;
@@ -2645,6 +2647,109 @@ describe('Agent Methods', () => {
).rejects.toThrow('Agent not found for removing resource files');
});
+ describe('removeAgentResourceFilesFromAllAgents', () => {
+ beforeEach(async () => {
+ await Agent.deleteMany({});
+ });
+
+ test('should strip deleted file_ids from every agent that references them', async () => {
+ const sharedFileId = `file_${uuidv4()}`;
+ const keeperFileId = `file_${uuidv4()}`;
+
+ const agentA = await createBasicAgent();
+ const agentB = await createBasicAgent();
+ const untouchedAgent = await createBasicAgent();
+
+ await addAgentResourceFile({
+ agent_id: agentA.id,
+ tool_resource: EToolResources.file_search,
+ file_id: sharedFileId,
+ });
+ await addAgentResourceFile({
+ agent_id: agentA.id,
+ tool_resource: EToolResources.file_search,
+ file_id: keeperFileId,
+ });
+ await addAgentResourceFile({
+ agent_id: agentB.id,
+ tool_resource: EToolResources.execute_code,
+ file_id: sharedFileId,
+ });
+ await addAgentResourceFile({
+ agent_id: untouchedAgent.id,
+ tool_resource: EToolResources.context,
+ file_id: keeperFileId,
+ });
+
+ const result = await removeAgentResourceFilesFromAllAgents({
+ file_ids: [sharedFileId],
+ });
+
+ expect(result.matchedCount).toBe(2);
+ expect(result.modifiedCount).toBe(2);
+
+ const updatedA = await getAgent({ id: agentA.id });
+ const updatedB = await getAgent({ id: agentB.id });
+ const updatedUntouched = await getAgent({ id: untouchedAgent.id });
+
+ const aFileIds = (updatedA!.tool_resources as Record)
+ .file_search.file_ids;
+ const bFileIds = (updatedB!.tool_resources as Record)
+ .execute_code.file_ids;
+ const untouchedFileIds = (
+ updatedUntouched!.tool_resources as Record
+ ).context.file_ids;
+
+ expect(aFileIds).not.toContain(sharedFileId);
+ expect(aFileIds).toContain(keeperFileId);
+ expect(bFileIds).not.toContain(sharedFileId);
+ expect(untouchedFileIds).toEqual([keeperFileId]);
+ });
+
+ test('should remove file_ids across multiple tool_resource types on the same agent', async () => {
+ const fileId = `file_${uuidv4()}`;
+ const agent = await createBasicAgent();
+
+ await addAgentResourceFile({
+ agent_id: agent.id,
+ tool_resource: EToolResources.file_search,
+ file_id: fileId,
+ });
+ await addAgentResourceFile({
+ agent_id: agent.id,
+ tool_resource: EToolResources.ocr,
+ file_id: fileId,
+ });
+
+ await removeAgentResourceFilesFromAllAgents({ file_ids: [fileId] });
+
+ const updated = await getAgent({ id: agent.id });
+ const resources = updated!.tool_resources as Record;
+ expect(resources.file_search.file_ids).not.toContain(fileId);
+ expect(resources.ocr.file_ids).not.toContain(fileId);
+ });
+
+ test('should no-op and not throw when file_ids is empty', async () => {
+ const result = await removeAgentResourceFilesFromAllAgents({ file_ids: [] });
+ expect(result).toEqual({ matchedCount: 0, modifiedCount: 0 });
+ });
+
+ test('should no-op when no agent references the given file_ids', async () => {
+ const fileId = `file_${uuidv4()}`;
+ const agent = await createBasicAgent();
+
+ await addAgentResourceFile({
+ agent_id: agent.id,
+ tool_resource: EToolResources.file_search,
+ file_id: `different_${uuidv4()}`,
+ });
+
+ const result = await removeAgentResourceFilesFromAllAgents({ file_ids: [fileId] });
+ expect(result.matchedCount).toBe(0);
+ expect(result.modifiedCount).toBe(0);
+ });
+ });
+
test('should handle updateAgent with complex nested updates', async () => {
const agentId = `agent_${uuidv4()}`;
const authorId = new mongoose.Types.ObjectId();
diff --git a/packages/data-schemas/src/methods/agent.ts b/packages/data-schemas/src/methods/agent.ts
index 36d2d819cb02..7cdfd68f5230 100644
--- a/packages/data-schemas/src/methods/agent.ts
+++ b/packages/data-schemas/src/methods/agent.ts
@@ -1,11 +1,26 @@
import crypto from 'node:crypto';
-import { Constants, ResourceType, actionDelimiter } from 'librechat-data-provider';
+import { Constants, EToolResources, ResourceType, actionDelimiter } from 'librechat-data-provider';
+import type { AgentToolResources } from 'librechat-data-provider';
import type { FilterQuery, Model, Types } from 'mongoose';
import type { IAgent, IAclEntry } from '~/types';
import logger from '~/config/winston';
const { mcp_delimiter } = Constants;
+/**
+ * Mirrors `TOOL_RESOURCE_KEYS` in `@librechat/api` — the subset of
+ * `EToolResources` that actually carries `file_ids` on an agent document.
+ * `code_interpreter` is excluded (it belongs to the Assistants API, not
+ * `AgentToolResources`) to avoid emitting dead MongoDB clauses.
+ */
+const TOOL_RESOURCE_KEYS: ReadonlyArray = [
+ EToolResources.execute_code,
+ EToolResources.file_search,
+ EToolResources.image_edit,
+ EToolResources.context,
+ EToolResources.ocr,
+];
+
export interface AgentDeps {
/** Removes all ACL permissions for a resource. Injected from PermissionService. */
removeAllPermissions: (params: { resourceType: string; resourceId: unknown }) => Promise;
@@ -477,6 +492,37 @@ export function createAgentMethods(mongoose: typeof import('mongoose'), deps: Ag
return agentAfterPull;
}
+ /**
+ * Removes the given file_ids from every agent's `tool_resources.*.file_ids`
+ * so file deletion cannot leave orphaned stubs behind (see issue #12776).
+ */
+ async function removeAgentResourceFilesFromAllAgents({
+ file_ids,
+ }: {
+ file_ids: string[];
+ }): Promise<{ matchedCount: number; modifiedCount: number }> {
+ if (!file_ids || file_ids.length === 0) {
+ return { matchedCount: 0, modifiedCount: 0 };
+ }
+
+ const Agent = mongoose.models.Agent as Model;
+
+ const orQuery = TOOL_RESOURCE_KEYS.map((key) => ({
+ [`tool_resources.${key}.file_ids`]: { $in: file_ids },
+ }));
+
+ const pullAllOps = TOOL_RESOURCE_KEYS.reduce>((acc, key) => {
+ acc[`tool_resources.${key}.file_ids`] = file_ids;
+ return acc;
+ }, {});
+
+ const result = await Agent.updateMany({ $or: orQuery }, { $pullAll: pullAllOps });
+ return {
+ matchedCount: result.matchedCount ?? 0,
+ modifiedCount: result.modifiedCount ?? 0,
+ };
+ }
+
/**
* Deletes an agent based on the provided search parameter.
*/
@@ -774,6 +820,7 @@ export function createAgentMethods(mongoose: typeof import('mongoose'), deps: Ag
removeAgentResourceFiles,
generateActionMetadataHash,
removeAgentFromUserFavorites,
+ removeAgentResourceFilesFromAllAgents,
};
}