diff --git a/api/package.json b/api/package.json
index 7007791088c4..3abdbc83e655 100644
--- a/api/package.json
+++ b/api/package.json
@@ -36,14 +36,16 @@
"dependencies": {
"@anthropic-ai/vertex-sdk": "^0.14.3",
"@aws-sdk/client-bedrock-runtime": "^3.1013.0",
+ "@aws-sdk/client-cloudfront": "^3.1042.0",
"@aws-sdk/client-s3": "^3.980.0",
+ "@aws-sdk/cloudfront-signer": "^3.1036.0",
"@aws-sdk/s3-request-presigner": "^3.758.0",
"@azure/identity": "^4.13.1",
"@azure/search-documents": "^12.0.0",
"@azure/storage-blob": "^12.30.0",
"@google/genai": "^1.19.0",
"@keyv/redis": "^4.3.3",
- "@librechat/agents": "^3.1.77",
+ "@librechat/agents": "^3.1.78",
"@librechat/api": "*",
"@librechat/data-schemas": "*",
"@microsoft/microsoft-graph-client": "^3.0.7",
@@ -105,6 +107,7 @@
"passport-local": "^1.0.0",
"pdfjs-dist": "^5.4.624",
"rate-limit-redis": "^4.2.0",
+ "sanitize-html": "^2.13.0",
"sharp": "^0.33.5",
"traverse": "^0.6.7",
"ua-parser-js": "^1.0.36",
@@ -116,6 +119,7 @@
"zod": "^3.22.4"
},
"devDependencies": {
+ "@types/sanitize-html": "^2.13.0",
"jest": "^30.2.0",
"mongodb-memory-server": "^11.0.1",
"nodemon": "^3.0.3",
diff --git a/api/server/controllers/AuthController.js b/api/server/controllers/AuthController.js
index eb44feffa4c0..d61bcc28449d 100644
--- a/api/server/controllers/AuthController.js
+++ b/api/server/controllers/AuthController.js
@@ -2,7 +2,7 @@ const cookies = require('cookie');
const jwt = require('jsonwebtoken');
const openIdClient = require('openid-client');
const { logger } = require('@librechat/data-schemas');
-const { isEnabled, findOpenIDUser } = require('@librechat/api');
+const { isEnabled, findOpenIDUser, getOpenIdIssuer } = require('@librechat/api');
const {
requestPasswordReset,
setOpenIDAuthTokens,
@@ -85,10 +85,12 @@ const refreshController = async (req, res) => {
refreshParams,
);
const claims = tokenset.claims();
+ const openidIssuer = getOpenIdIssuer(claims, openIdConfig);
const { user, error, migration } = await findOpenIDUser({
findUser,
email: getOpenIdEmail(claims),
openidId: claims.sub,
+ openidIssuer,
idOnTheSource: claims.oid,
strategyName: 'refreshController',
});
@@ -111,6 +113,7 @@ const refreshController = async (req, res) => {
await updateUser(user._id.toString(), {
provider: 'openid',
openidId: claims.sub,
+ ...(openidIssuer ? { openidIssuer } : {}),
});
logger.info(
`[refreshController] Updated user ${user.email} openidId (${reason}): ${user.openidId ?? 'null'} -> ${claims.sub}`,
diff --git a/api/server/controllers/AuthController.spec.js b/api/server/controllers/AuthController.spec.js
index 964947def9d1..8b19c28f36f8 100644
--- a/api/server/controllers/AuthController.spec.js
+++ b/api/server/controllers/AuthController.spec.js
@@ -23,6 +23,7 @@ jest.mock('~/models', () => ({
jest.mock('@librechat/api', () => ({
isEnabled: jest.fn(),
findOpenIDUser: jest.fn(),
+ getOpenIdIssuer: jest.fn(() => 'https://issuer.example.com'),
}));
const openIdClient = require('openid-client');
@@ -157,6 +158,7 @@ describe('refreshController – OpenID path', () => {
};
const baseClaims = {
+ iss: 'https://issuer.example.com',
sub: 'oidc-sub-123',
oid: 'oid-456',
email: 'user@example.com',
@@ -204,7 +206,10 @@ describe('refreshController – OpenID path', () => {
expect(getOpenIdEmail).toHaveBeenCalledWith(baseClaims);
expect(findOpenIDUser).toHaveBeenCalledWith(
- expect.objectContaining({ email: baseClaims.email }),
+ expect.objectContaining({
+ email: baseClaims.email,
+ openidIssuer: baseClaims.iss,
+ }),
);
expect(res.status).toHaveBeenCalledWith(200);
});
@@ -225,7 +230,10 @@ describe('refreshController – OpenID path', () => {
expect(getOpenIdEmail).toHaveBeenCalledWith(claimsWithUpn);
expect(findOpenIDUser).toHaveBeenCalledWith(
- expect.objectContaining({ email: 'user@corp.example.com' }),
+ expect.objectContaining({
+ email: 'user@corp.example.com',
+ openidIssuer: baseClaims.iss,
+ }),
);
expect(res.status).toHaveBeenCalledWith(200);
});
@@ -236,7 +244,10 @@ describe('refreshController – OpenID path', () => {
await refreshController(req, res);
expect(findOpenIDUser).toHaveBeenCalledWith(
- expect.objectContaining({ email: baseClaims.email }),
+ expect.objectContaining({
+ email: baseClaims.email,
+ openidIssuer: baseClaims.iss,
+ }),
);
});
@@ -267,7 +278,11 @@ describe('refreshController – OpenID path', () => {
expect(updateUser).toHaveBeenCalledWith(
'user-db-id',
- expect.objectContaining({ provider: 'openid', openidId: baseClaims.sub }),
+ expect.objectContaining({
+ provider: 'openid',
+ openidId: baseClaims.sub,
+ openidIssuer: baseClaims.iss,
+ }),
);
expect(res.status).toHaveBeenCalledWith(200);
});
diff --git a/api/server/controllers/agents/__tests__/callbacks.spec.js b/api/server/controllers/agents/__tests__/callbacks.spec.js
index 8bd711f9c6ea..0ba20d409cd2 100644
--- a/api/server/controllers/agents/__tests__/callbacks.spec.js
+++ b/api/server/controllers/agents/__tests__/callbacks.spec.js
@@ -28,6 +28,28 @@ jest.mock('~/server/services/Files/Citations', () => ({
jest.mock('~/server/services/Files/Code/process', () => ({
processCodeOutput: jest.fn(),
+ /* `runPreviewFinalize` is the runtime pairing for `finalize` (defined
+ * alongside processCodeOutput in process.js). The callback wires
+ * the deferred render through it; reproduce the basic happy-path here so the
+ * SSE-emit assertions still work. The catch/defensive-updateFile
+ * branch is unit-tested directly against the real helper in
+ * process.spec.js — exercising it here would add test coupling
+ * without coverage benefit. */
+ runPreviewFinalize: ({ finalize, onResolved }) => {
+ if (typeof finalize !== 'function') {
+ return;
+ }
+ finalize()
+ .then((updated) => {
+ if (!updated || !onResolved) {
+ return;
+ }
+ onResolved(updated);
+ })
+ .catch(() => {
+ /* swallowed in the mock — see process.spec.js for catch coverage */
+ });
+ },
}));
jest.mock('~/server/services/Tools/credentials', () => ({
@@ -326,4 +348,266 @@ describe('createToolEndCallback', () => {
expect(res.write).not.toHaveBeenCalled();
});
});
+
+ describe('code execution deferred-preview emit', () => {
+ /* The deferred-preview code-execution flow emits the attachment twice over
+ * SSE: the initial emit with `status: 'pending'` and the current run's
+ * messageId, the deferred render with the resolved record. The preview update emit
+ * must use the CURRENT run's messageId (not the persisted DB one)
+ * because `processCodeOutput` intentionally preserves the original
+ * `messageId` on cross-turn filename reuse — `getCodeGeneratedFiles`
+ * needs that for prior-turn priming.
+ *
+ * Codex P1 review on PR #12957: shipping `updated.messageId`
+ * straight from the DB record routed preview-update patches to the wrong
+ * message slot, leaving the current turn's pending chip stuck. */
+
+ const { processCodeOutput } = require('~/server/services/Files/Code/process');
+
+ function makeCodeExecutionEvent({ runId, threadId, toolCallId, fileId, name }) {
+ return {
+ output: {
+ name: 'execute_code',
+ tool_call_id: toolCallId,
+ artifact: {
+ session_id: 'sess-1',
+ files: [{ id: fileId, name, session_id: 'sess-1' }],
+ },
+ },
+ metadata: { run_id: runId, thread_id: threadId },
+ };
+ }
+
+ /** Parse the SSE frame `res.write` produces back to a payload object. */
+ function parseSseAttachment(call) {
+ const frame = call[0];
+ const dataLine = frame.split('\n').find((l) => l.startsWith('data: '));
+ return JSON.parse(dataLine.slice('data: '.length));
+ }
+
+ it('the preview update emit uses the current run messageId, not the persisted DB messageId (cross-turn filename reuse)', async () => {
+ /* Simulate turn-2 reusing `output.csv` from turn-1. The DB record
+ * surfaced by `updateFile` carries the original `turn-1-msg`
+ * messageId; the runtime emit must rewrite to `turn-2-msg`. */
+ res.headersSent = true;
+ const finalize = jest.fn().mockResolvedValue({
+ file_id: 'fid-shared',
+ filename: 'output.csv',
+ filepath: '/uploads/output.csv',
+ type: 'text/csv',
+ conversationId: 'thread789',
+ messageId: 'turn-1-original-msg', // persisted DB id (older turn)
+ status: 'ready',
+ text: '
',
+ textFormat: 'html',
+ });
+ processCodeOutput.mockResolvedValue({
+ file: {
+ file_id: 'fid-shared',
+ filename: 'output.csv',
+ filepath: '/uploads/output.csv',
+ type: 'text/csv',
+ conversationId: 'thread789',
+ messageId: 'turn-2-current-run', // runtime overlay (current turn)
+ toolCallId: 'tool-2',
+ status: 'pending',
+ text: null,
+ textFormat: null,
+ },
+ finalize,
+ });
+
+ const toolEndCallback = createToolEndCallback({ req, res, artifactPromises });
+ const event = makeCodeExecutionEvent({
+ runId: 'turn-2-current-run',
+ threadId: 'thread789',
+ toolCallId: 'tool-2',
+ fileId: 'fid-shared',
+ name: 'output.csv',
+ });
+ await toolEndCallback({ output: event.output }, event.metadata);
+ await Promise.all(artifactPromises);
+ // Wait one more tick so the fire-and-forget finalize() chain settles.
+ await new Promise((resolve) => setImmediate(resolve));
+
+ // Two SSE writes: the initial emit (pending) and the deferred render (ready).
+ expect(res.write).toHaveBeenCalledTimes(2);
+ const phase1 = parseSseAttachment(res.write.mock.calls[0]);
+ const phase2 = parseSseAttachment(res.write.mock.calls[1]);
+
+ // Initial emit already used the runtime messageId (sourced from result.file).
+ expect(phase1.messageId).toBe('turn-2-current-run');
+ expect(phase1.status).toBe('pending');
+
+ /* The preview update MUST also route to the current run's messageId so the
+ * frontend's `useAttachmentHandler` upserts under the same
+ * messageAttachmentsMap slot as the initial emit. Routing to
+ * `turn-1-original-msg` would land the patch on a stale message
+ * and leave turn-2's pending chip stuck. */
+ expect(phase2.messageId).toBe('turn-2-current-run');
+ expect(phase2.file_id).toBe('fid-shared');
+ expect(phase2.status).toBe('ready');
+ expect(phase2.text).toBe('');
+ expect(phase2.toolCallId).toBe('tool-2');
+ /* Wire-shape parity with the initial emit: preview update emits the full updated
+ * record so the client doesn't see one shape on the initial emit and a
+ * narrower projection on the deferred render. (Codex audit on PR #12957
+ * Finding 1.) */
+ expect(phase2.filename).toBe('output.csv');
+ expect(phase2.filepath).toBe('/uploads/output.csv');
+ expect(phase2.type).toBe('text/csv');
+ expect(phase2.conversationId).toBe('thread789');
+ expect(phase2.textFormat).toBe('html');
+ });
+
+ it('the preview update emit is skipped when finalize resolves to null (no DB update happened)', async () => {
+ res.headersSent = true;
+ processCodeOutput.mockResolvedValue({
+ file: {
+ file_id: 'fid-1',
+ filename: 'data.xlsx',
+ filepath: '/uploads/data.xlsx',
+ type: 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
+ messageId: 'run-1',
+ toolCallId: 'tool-1',
+ status: 'pending',
+ },
+ finalize: jest.fn().mockResolvedValue(null),
+ });
+
+ const toolEndCallback = createToolEndCallback({ req, res, artifactPromises });
+ const event = makeCodeExecutionEvent({
+ runId: 'run-1',
+ threadId: 'thread-1',
+ toolCallId: 'tool-1',
+ fileId: 'fid-1',
+ name: 'data.xlsx',
+ });
+ await toolEndCallback({ output: event.output }, event.metadata);
+ await Promise.all(artifactPromises);
+ await new Promise((resolve) => setImmediate(resolve));
+
+ // Only the initial emit fired; preview update noop'd because finalize returned null.
+ expect(res.write).toHaveBeenCalledTimes(1);
+ });
+
+ it('the preview update emit is skipped when the response stream has already closed', async () => {
+ res.headersSent = true;
+ /* Hand-rolled deferred so we can hold finalize() open until
+ * AFTER setting `res.writableEnded = true`. Otherwise the mock
+ * resolves synchronously, the .then() runs in the same microtask
+ * queue as the artifactPromises await, and writableEnded is set
+ * too late. */
+ let resolveFinalize;
+ const finalizeDeferred = new Promise((resolve) => {
+ resolveFinalize = resolve;
+ });
+ processCodeOutput.mockResolvedValue({
+ file: {
+ file_id: 'fid-1',
+ filename: 'data.xlsx',
+ filepath: '/uploads/data.xlsx',
+ type: 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
+ messageId: 'run-1',
+ toolCallId: 'tool-1',
+ status: 'pending',
+ },
+ finalize: jest.fn().mockReturnValue(finalizeDeferred),
+ });
+
+ const toolEndCallback = createToolEndCallback({ req, res, artifactPromises });
+ const event = makeCodeExecutionEvent({
+ runId: 'run-1',
+ threadId: 'thread-1',
+ toolCallId: 'tool-1',
+ fileId: 'fid-1',
+ name: 'data.xlsx',
+ });
+ await toolEndCallback({ output: event.output }, event.metadata);
+ await Promise.all(artifactPromises);
+ // Simulate the response closing AFTER the initial emit fires but BEFORE
+ // the deferred render lands. The frontend's polling path will catch the
+ // resolved record on its next tick.
+ res.writableEnded = true;
+ // Now resolve finalize and let the .then() chain run.
+ resolveFinalize({
+ file_id: 'fid-1',
+ filename: 'data.xlsx',
+ messageId: 'run-1',
+ status: 'ready',
+ text: '',
+ textFormat: 'html',
+ });
+ await new Promise((resolve) => setImmediate(resolve));
+
+ // Initial emit wrote; preview update noop'd because writableEnded.
+ expect(res.write).toHaveBeenCalledTimes(1);
+ });
+
+ it('does not call finalize for a non-office file (no preview expected)', async () => {
+ res.headersSent = true;
+ processCodeOutput.mockResolvedValue({
+ file: {
+ file_id: 'fid-txt',
+ filename: 'note.txt',
+ filepath: '/uploads/note.txt',
+ type: 'text/plain',
+ messageId: 'run-1',
+ toolCallId: 'tool-1',
+ // No status — non-office files skip the deferred render entirely.
+ },
+ // No finalize key — caller should not call anything.
+ });
+
+ const toolEndCallback = createToolEndCallback({ req, res, artifactPromises });
+ const event = makeCodeExecutionEvent({
+ runId: 'run-1',
+ threadId: 'thread-1',
+ toolCallId: 'tool-1',
+ fileId: 'fid-txt',
+ name: 'note.txt',
+ });
+ await toolEndCallback({ output: event.output }, event.metadata);
+ await Promise.all(artifactPromises);
+ await new Promise((resolve) => setImmediate(resolve));
+
+ expect(res.write).toHaveBeenCalledTimes(1);
+ });
+ });
+});
+
+describe('isStreamWritable', () => {
+ /* Direct parametric coverage of the predicate that gates SSE writes
+ * in both the chat-completions and Open Responses callbacks. The
+ * existing deferred-preview tests cover this indirectly via the
+ * `writeAttachmentUpdate` writableEnded path; these tests pin down
+ * each individual branch so a future modification (e.g. adding a
+ * new condition) can't silently regress.
+ * (Comprehensive review NIT on PR #12957.) */
+ const { isStreamWritable } = require('../callbacks');
+
+ it('returns true when streamId is truthy regardless of res state', () => {
+ /* Resumable mode writes go to the job emitter; res state is
+ * irrelevant. Even a closed res with no headers should not block. */
+ expect(isStreamWritable(null, 'stream-1')).toBe(true);
+ expect(isStreamWritable({ headersSent: false, writableEnded: true }, 'stream-1')).toBe(true);
+ expect(isStreamWritable(undefined, 'stream-1')).toBe(true);
+ });
+
+ it('returns false when streamId is falsy and res is null/undefined', () => {
+ expect(isStreamWritable(null, null)).toBe(false);
+ expect(isStreamWritable(undefined, null)).toBe(false);
+ });
+
+ it('returns false when headers have not been sent yet', () => {
+ expect(isStreamWritable({ headersSent: false, writableEnded: false }, null)).toBe(false);
+ });
+
+ it('returns false when the stream has already ended', () => {
+ expect(isStreamWritable({ headersSent: true, writableEnded: true }, null)).toBe(false);
+ });
+
+ it('returns true on the happy path: headers sent, not ended, no streamId', () => {
+ expect(isStreamWritable({ headersSent: true, writableEnded: false }, null)).toBe(true);
+ });
});
diff --git a/api/server/controllers/agents/callbacks.js b/api/server/controllers/agents/callbacks.js
index 2af4d5b45198..c9612c6b6248 100644
--- a/api/server/controllers/agents/callbacks.js
+++ b/api/server/controllers/agents/callbacks.js
@@ -15,7 +15,7 @@ const {
createToolExecuteHandler,
} = require('@librechat/api');
const { processFileCitations } = require('~/server/services/Files/Citations');
-const { processCodeOutput } = require('~/server/services/Files/Code/process');
+const { processCodeOutput, runPreviewFinalize } = require('~/server/services/Files/Code/process');
const { saveBase64Image } = require('~/server/services/Files/process');
class ModelEndHandler {
@@ -397,6 +397,55 @@ function writeAttachment(res, streamId, attachment) {
}
}
+/**
+ * Predicate: is it safe to push an SSE write to the caller right now?
+ *
+ * In `streamId` (resumable) mode, writes go to the job emitter and the
+ * `res` state is irrelevant — always writable.
+ *
+ * In standard mode, the caller's `res` must have headers sent (the
+ * stream has been opened) and not yet be `writableEnded` (the response
+ * hasn't closed). Writing to a closed stream raises
+ * `ERR_STREAM_WRITE_AFTER_END`.
+ *
+ * Used by deferred preview emits in both `createToolEndCallback`
+ * (chat-completions) and `createResponsesToolEndCallback` (Open
+ * Responses) so the gate logic stays in one place. (Comprehensive
+ * review #3 on PR #12957.)
+ */
+function isStreamWritable(res, streamId) {
+ if (streamId) {
+ return true;
+ }
+ return !!res && res.headersSent && !res.writableEnded;
+}
+
+/**
+ * Emit an update for an attachment that was previously sent with
+ * `status: 'pending'`. Fire-and-forget: if the response stream has
+ * already closed (the agent finished generating before the deferred
+ * preview resolved) the frontend's React Query polling on
+ * `/api/files/:file_id/preview` picks up the resolved record on its
+ * next tick. Skipping the write in that case avoids
+ * `ERR_STREAM_WRITE_AFTER_END`.
+ *
+ * Reuses the `attachment` SSE event name with a discriminated payload:
+ * the frontend's `useAttachmentHandler` upserts by `file_id`, so a
+ * second event with the same id and `status: 'ready' | 'failed'`
+ * overwrites the pending placeholder in place. No new event type, no
+ * new client listener.
+ *
+ * @param {ServerResponse} res
+ * @param {string | null} streamId
+ * @param {Object} attachment - Updated attachment payload (must carry `file_id`).
+ */
+function writeAttachmentUpdate(res, streamId, attachment) {
+ if (!isStreamWritable(res, streamId)) {
+ return;
+ }
+ writeAttachment(res, streamId, attachment);
+}
+
/**
*
* @param {Object} params
@@ -556,14 +605,15 @@ function createToolEndCallback({ req, res, artifactPromises, streamId = null })
continue;
}
const { id, name } = file;
+ const toolCallId = output.tool_call_id;
artifactPromises.push(
(async () => {
- const fileMetadata = await processCodeOutput({
+ const result = await processCodeOutput({
req,
id,
name,
messageId: metadata.run_id,
- toolCallId: output.tool_call_id,
+ toolCallId,
conversationId: metadata.thread_id,
/**
* Use the FILE's session_id (storage session), not the
@@ -583,15 +633,54 @@ function createToolEndCallback({ req, res, artifactPromises, streamId = null })
*/
session_id: file.session_id ?? output.artifact.session_id,
});
- if (!streamId && !res.headersSent) {
- return fileMetadata;
- }
-
+ const fileMetadata = result?.file ?? null;
+ const finalize = result?.finalize;
if (!fileMetadata) {
return null;
}
-
- writeAttachment(res, streamId, fileMetadata);
+ /* Initial emit: ship the attachment to the client immediately
+ * (carries `status: 'pending'` for office buckets so the UI
+ * shows "preparing preview…"). The agent's response stops
+ * blocking on extraction here.
+ *
+ * Use the shared `isStreamWritable` predicate rather than the
+ * narrower `streamId || res.headersSent` check that lived
+ * here before — a client disconnect mid-stream
+ * (`res.writableEnded`) would otherwise hit `res.write` and
+ * raise `ERR_STREAM_WRITE_AFTER_END` (caught by the outer
+ * IIFE catch but logged as noise). Same gate the Responses
+ * path uses below. */
+ if (isStreamWritable(res, streamId)) {
+ writeAttachment(res, streamId, fileMetadata);
+ }
+ /* Deferred preview rendering: extraction continues running
+ * even after the HTTP response closes. If the stream is still
+ * open when the preview resolves, push an `attachment`
+ * update event so the UI patches in place; otherwise React
+ * Query polling on `/api/files/:file_id/preview` picks it up.
+ *
+ * Spread the full updated record (mirroring the initial emit
+ * shape) and overlay `messageId`/`toolCallId` from the
+ * current run. The DB record preserves the original
+ * `messageId` across cross-turn filename reuse so
+ * `getCodeGeneratedFiles` can trace the file back to its
+ * original assistant message; routing the update SSE by the
+ * persisted id would land the patch on a stale message
+ * slot — turn-N's pending placeholder would stay stuck while
+ * turn-1's already-resolved attachment got re-merged.
+ * (Codex P1 review on PR #12957.) */
+ runPreviewFinalize({
+ finalize,
+ fileId: fileMetadata.file_id,
+ previewRevision: result?.previewRevision,
+ onResolved: (updated) => {
+ writeAttachmentUpdate(res, streamId, {
+ ...updated,
+ messageId: metadata.run_id,
+ toolCallId,
+ });
+ },
+ });
return fileMetadata;
})().catch((error) => {
logger.error('Error processing code output:', error);
@@ -782,14 +871,15 @@ function createResponsesToolEndCallback({ req, res, tracker, artifactPromises })
continue;
}
const { id, name } = file;
+ const toolCallId = output.tool_call_id;
artifactPromises.push(
(async () => {
- const fileMetadata = await processCodeOutput({
+ const result = await processCodeOutput({
req,
id,
name,
messageId: metadata.run_id,
- toolCallId: output.tool_call_id,
+ toolCallId,
conversationId: metadata.thread_id,
/**
* Use the FILE's session_id (storage session), not the
@@ -809,25 +899,45 @@ function createResponsesToolEndCallback({ req, res, tracker, artifactPromises })
*/
session_id: file.session_id ?? output.artifact.session_id,
});
-
+ const fileMetadata = result?.file ?? null;
+ const finalize = result?.finalize;
if (!fileMetadata) {
return null;
}
- // For Responses API, emit attachment during streaming
- if (res.headersSent && !res.writableEnded) {
- const attachment = {
- file_id: fileMetadata.file_id,
- filename: fileMetadata.filename,
- type: fileMetadata.type,
- url: fileMetadata.filepath,
- width: fileMetadata.width,
- height: fileMetadata.height,
- tool_call_id: output.tool_call_id,
- };
- writeResponsesAttachment(res, tracker, attachment, metadata);
+ /* Initial emit (Open Responses extension format). The agent's
+ * response no longer blocks on extraction. */
+ if (isStreamWritable(res, null)) {
+ writeResponsesAttachment(
+ res,
+ tracker,
+ buildResponsesAttachment(fileMetadata, toolCallId),
+ metadata,
+ );
}
+ /* Deferred preview rendering: extract HTML in the background
+ * and emit a follow-up `librechat:attachment` with the same
+ * `file_id` so the client merges the resolved record over the
+ * pending placeholder. Fire-and-forget — survives response
+ * close; polling covers the post-close gap. */
+ runPreviewFinalize({
+ finalize,
+ fileId: fileMetadata.file_id,
+ previewRevision: result?.previewRevision,
+ onResolved: (updated) => {
+ if (!isStreamWritable(res, null)) {
+ return;
+ }
+ writeResponsesAttachment(
+ res,
+ tracker,
+ buildResponsesAttachment(updated, toolCallId),
+ metadata,
+ );
+ },
+ });
+
return fileMetadata;
})().catch((error) => {
logger.error('Error processing code output:', error);
@@ -838,6 +948,28 @@ function createResponsesToolEndCallback({ req, res, tracker, artifactPromises })
};
}
+/**
+ * Project a file metadata record into the Open Responses attachment
+ * shape. Mirrors the legacy inline projection but adds `status` and
+ * `previewError` so deferred preview updates carry the lifecycle
+ * signal the client uses to upsert by `file_id`.
+ */
+function buildResponsesAttachment(fileMetadata, toolCallId) {
+ return {
+ file_id: fileMetadata.file_id,
+ filename: fileMetadata.filename,
+ type: fileMetadata.type,
+ url: fileMetadata.filepath,
+ width: fileMetadata.width,
+ height: fileMetadata.height,
+ tool_call_id: toolCallId,
+ text: fileMetadata.text ?? null,
+ textFormat: fileMetadata.textFormat ?? null,
+ status: fileMetadata.status,
+ previewError: fileMetadata.previewError,
+ };
+}
+
const ALLOWED_LOG_LEVELS = new Set(['debug', 'info', 'warn', 'error']);
function agentLogHandler(_event, data) {
@@ -893,6 +1025,7 @@ module.exports = {
agentLogHandlerObj,
getDefaultHandlers,
createToolEndCallback,
+ isStreamWritable,
markSummarizationUsage,
buildSummarizationHandlers,
createResponsesToolEndCallback,
diff --git a/api/server/controllers/auth/LogoutController.js b/api/server/controllers/auth/LogoutController.js
index 381bfc58b2db..ae1c94a7c9aa 100644
--- a/api/server/controllers/auth/LogoutController.js
+++ b/api/server/controllers/auth/LogoutController.js
@@ -1,5 +1,5 @@
const cookies = require('cookie');
-const { isEnabled } = require('@librechat/api');
+const { isEnabled, clearCloudFrontCookies } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { logoutUser } = require('~/server/services/AuthService');
const { getOpenIdConfig } = require('~/strategies');
@@ -44,6 +44,7 @@ const logoutController = async (req, res) => {
res.clearCookie('openid_id_token');
res.clearCookie('openid_user_id');
res.clearCookie('token_provider');
+ clearCloudFrontCookies(res);
const response = { message };
if (
isOpenIdUser &&
diff --git a/api/server/controllers/auth/LogoutController.spec.js b/api/server/controllers/auth/LogoutController.spec.js
index c9294fdcec67..ff02f5237e6d 100644
--- a/api/server/controllers/auth/LogoutController.spec.js
+++ b/api/server/controllers/auth/LogoutController.spec.js
@@ -4,9 +4,13 @@ const mockLogoutUser = jest.fn();
const mockLogger = { warn: jest.fn(), error: jest.fn(), debug: jest.fn() };
const mockIsEnabled = jest.fn();
const mockGetOpenIdConfig = jest.fn();
+const mockClearCloudFrontCookies = jest.fn();
jest.mock('cookie');
-jest.mock('@librechat/api', () => ({ isEnabled: (...args) => mockIsEnabled(...args) }));
+jest.mock('@librechat/api', () => ({
+ isEnabled: (...args) => mockIsEnabled(...args),
+ clearCloudFrontCookies: (...args) => mockClearCloudFrontCookies(...args),
+}));
jest.mock('@librechat/data-schemas', () => ({ logger: mockLogger }));
jest.mock('~/server/services/AuthService', () => ({
logoutUser: (...args) => mockLogoutUser(...args),
@@ -255,6 +259,15 @@ describe('LogoutController', () => {
expect(res.clearCookie).toHaveBeenCalledWith('openid_user_id');
expect(res.clearCookie).toHaveBeenCalledWith('token_provider');
});
+
+ it('calls clearCloudFrontCookies on successful logout', async () => {
+ const req = buildReq();
+ const res = buildRes();
+
+ await logoutController(req, res);
+
+ expect(mockClearCloudFrontCookies).toHaveBeenCalledWith(res);
+ });
});
describe('URL length limit and logout_hint fallback', () => {
diff --git a/api/server/controllers/tools.js b/api/server/controllers/tools.js
index 8124894584cc..07be1210c141 100644
--- a/api/server/controllers/tools.js
+++ b/api/server/controllers/tools.js
@@ -10,7 +10,7 @@ const {
} = require('librechat-data-provider');
const { getRoleByName, createToolCall, getToolCallsByConvo, getMessage } = require('~/models');
const { processFileURL, uploadImageBuffer } = require('~/server/services/Files/process');
-const { processCodeOutput } = require('~/server/services/Files/Code/process');
+const { processCodeOutput, runPreviewFinalize } = require('~/server/services/Files/Code/process');
const { loadAuthValues } = require('~/server/services/Tools/credentials');
const { loadTools } = require('~/app/clients/tools/util');
@@ -192,7 +192,7 @@ const callTool = async (req, res) => {
const { id, name } = file;
artifactPromises.push(
(async () => {
- const fileMetadata = await processCodeOutput({
+ const result = await processCodeOutput({
req,
id,
name,
@@ -201,11 +201,22 @@ const callTool = async (req, res) => {
conversationId,
session_id: artifact.session_id,
});
-
+ const fileMetadata = result?.file ?? null;
+ const finalize = result?.finalize;
if (!fileMetadata) {
return null;
}
-
+ /* This endpoint is non-streaming and its contract is "give
+ * me the artifacts" — return the persisted record immediately
+ * (with `status: 'pending'` for office buckets) and run the
+ * preview render in the background. The client polls
+ * `/api/files/:file_id/preview` for the resolved record.
+ * No `onResolved` — there's no live stream to write to here. */
+ runPreviewFinalize({
+ finalize,
+ fileId: fileMetadata.file_id,
+ previewRevision: result?.previewRevision,
+ });
return fileMetadata;
})().catch((error) => {
logger.error('Error processing code output:', error);
diff --git a/api/server/experimental.js b/api/server/experimental.js
index cd594c169676..b12b9deffe1e 100644
--- a/api/server/experimental.js
+++ b/api/server/experimental.js
@@ -10,7 +10,7 @@ const express = require('express');
const passport = require('passport');
const compression = require('compression');
const cookieParser = require('cookie-parser');
-const { logger } = require('@librechat/data-schemas');
+const { logger, runAsSystem } = require('@librechat/data-schemas');
const mongoSanitize = require('express-mongo-sanitize');
const {
isEnabled,
@@ -26,7 +26,12 @@ const initializeOAuthReconnectManager = require('./services/initializeOAuthRecon
const createValidateImageRequest = require('./middleware/validateImageRequest');
const { jwtLogin, ldapLogin, passportLogin } = require('~/strategies');
const { updateInterfacePermissions: updateInterfacePerms } = require('@librechat/api');
-const { getRoleByName, updateAccessPermissions, seedDatabase } = require('~/models');
+const {
+ getRoleByName,
+ updateAccessPermissions,
+ seedDatabase,
+ sweepOrphanedPreviews,
+} = require('~/models');
const { checkMigrations } = require('./services/start/migration');
const initializeMCPs = require('./services/initializeMCPs');
const configureSocialLogins = require('./socialLogins');
@@ -220,6 +225,11 @@ if (cluster.isMaster) {
/** Seed database (idempotent) */
await seedDatabase();
+ /* Mirrors `server/index.js`; `runAsSystem` for tenant-isolated File. */
+ runAsSystem(sweepOrphanedPreviews).catch((err) => {
+ logger.error('[sweepOrphanedPreviews] Background sweep failed:', err);
+ });
+
/** Initialize app configuration */
const appConfig = await getAppConfig();
initializeFileStorage(appConfig);
diff --git a/api/server/index.js b/api/server/index.js
index d798f1a1661a..6bc4a131e6ad 100644
--- a/api/server/index.js
+++ b/api/server/index.js
@@ -25,7 +25,12 @@ const {
} = require('@librechat/api');
const { connectDb, indexSync } = require('~/db');
const initializeOAuthReconnectManager = require('./services/initializeOAuthReconnectManager');
-const { getRoleByName, updateAccessPermissions, seedDatabase } = require('~/models');
+const {
+ getRoleByName,
+ updateAccessPermissions,
+ seedDatabase,
+ sweepOrphanedPreviews,
+} = require('~/models');
const { capabilityContextMiddleware } = require('./middleware/roles/capabilities');
const createValidateImageRequest = require('./middleware/validateImageRequest');
const { jwtLogin, ldapLogin, passportLogin } = require('~/strategies');
@@ -69,6 +74,13 @@ const startServer = async () => {
}
await runAsSystem(seedDatabase);
+ /* Recover stuck `status: 'pending'` records from a crash mid-render.
+ * `runAsSystem` is required — `File` is tenant-isolated and strict
+ * mode rejects unscoped queries. Lazy sweep in the preview endpoint
+ * covers anything younger than the boot cutoff. */
+ runAsSystem(sweepOrphanedPreviews).catch((err) => {
+ logger.error('[sweepOrphanedPreviews] Background sweep failed:', err);
+ });
const appConfig = await getAppConfig({ baseOnly: true });
initializeFileStorage(appConfig);
await runAsSystem(async () => {
diff --git a/api/server/routes/agents/middleware.js b/api/server/routes/agents/middleware.js
new file mode 100644
index 000000000000..f71c25c6f8fd
--- /dev/null
+++ b/api/server/routes/agents/middleware.js
@@ -0,0 +1,41 @@
+const { PermissionTypes, Permissions } = require('librechat-data-provider');
+const {
+ generateCheckAccess,
+ preAuthTenantMiddleware,
+ createRequireApiKeyAuth,
+ createRemoteAgentAuth,
+ createCheckRemoteAgentAccess,
+} = require('@librechat/api');
+const { getEffectivePermissions } = require('~/server/services/PermissionService');
+const { getAppConfig } = require('~/server/services/Config');
+const db = require('~/models');
+
+const apiKeyMiddleware = createRequireApiKeyAuth({
+ validateAgentApiKey: db.validateAgentApiKey,
+ findUser: db.findUser,
+});
+
+const requireRemoteAgentAuth = createRemoteAgentAuth({
+ apiKeyMiddleware,
+ findUser: db.findUser,
+ updateUser: db.updateUser,
+ getAppConfig,
+});
+
+const checkRemoteAgentsFeature = generateCheckAccess({
+ permissionType: PermissionTypes.REMOTE_AGENTS,
+ permissions: [Permissions.USE],
+ getRoleByName: db.getRoleByName,
+});
+
+const checkAgentPermission = createCheckRemoteAgentAccess({
+ getAgent: db.getAgent,
+ getEffectivePermissions,
+});
+
+module.exports = {
+ checkAgentPermission,
+ preAuthTenantMiddleware,
+ requireRemoteAgentAuth,
+ checkRemoteAgentsFeature,
+};
diff --git a/api/server/routes/agents/openai.js b/api/server/routes/agents/openai.js
index 72e3da6c5a0e..fa7f9b26c811 100644
--- a/api/server/routes/agents/openai.js
+++ b/api/server/routes/agents/openai.js
@@ -17,40 +17,23 @@
* }
*/
const express = require('express');
-const { PermissionTypes, Permissions } = require('librechat-data-provider');
-const {
- generateCheckAccess,
- createRequireApiKeyAuth,
- createCheckRemoteAgentAccess,
-} = require('@librechat/api');
const {
OpenAIChatCompletionController,
ListModelsController,
GetModelController,
} = require('~/server/controllers/agents/openai');
-const { getEffectivePermissions } = require('~/server/services/PermissionService');
const { configMiddleware } = require('~/server/middleware');
-const db = require('~/models');
+const {
+ checkAgentPermission,
+ preAuthTenantMiddleware,
+ requireRemoteAgentAuth,
+ checkRemoteAgentsFeature,
+} = require('./middleware');
const router = express.Router();
-const requireApiKeyAuth = createRequireApiKeyAuth({
- validateAgentApiKey: db.validateAgentApiKey,
- findUser: db.findUser,
-});
-
-const checkRemoteAgentsFeature = generateCheckAccess({
- permissionType: PermissionTypes.REMOTE_AGENTS,
- permissions: [Permissions.USE],
- getRoleByName: db.getRoleByName,
-});
-
-const checkAgentPermission = createCheckRemoteAgentAccess({
- getAgent: db.getAgent,
- getEffectivePermissions,
-});
-
-router.use(requireApiKeyAuth);
+router.use(preAuthTenantMiddleware);
+router.use(requireRemoteAgentAuth);
router.use(configMiddleware);
router.use(checkRemoteAgentsFeature);
diff --git a/api/server/routes/agents/responses.js b/api/server/routes/agents/responses.js
index 2c118e059712..401025bfd62a 100644
--- a/api/server/routes/agents/responses.js
+++ b/api/server/routes/agents/responses.js
@@ -20,40 +20,23 @@
* @see https://openresponses.org/specification
*/
const express = require('express');
-const { PermissionTypes, Permissions } = require('librechat-data-provider');
-const {
- generateCheckAccess,
- createRequireApiKeyAuth,
- createCheckRemoteAgentAccess,
-} = require('@librechat/api');
const {
createResponse,
getResponse,
listModels,
} = require('~/server/controllers/agents/responses');
-const { getEffectivePermissions } = require('~/server/services/PermissionService');
const { configMiddleware } = require('~/server/middleware');
-const db = require('~/models');
+const {
+ checkAgentPermission,
+ preAuthTenantMiddleware,
+ requireRemoteAgentAuth,
+ checkRemoteAgentsFeature,
+} = require('./middleware');
const router = express.Router();
-const requireApiKeyAuth = createRequireApiKeyAuth({
- validateAgentApiKey: db.validateAgentApiKey,
- findUser: db.findUser,
-});
-
-const checkRemoteAgentsFeature = generateCheckAccess({
- permissionType: PermissionTypes.REMOTE_AGENTS,
- permissions: [Permissions.USE],
- getRoleByName: db.getRoleByName,
-});
-
-const checkAgentPermission = createCheckRemoteAgentAccess({
- getAgent: db.getAgent,
- getEffectivePermissions,
-});
-
-router.use(requireApiKeyAuth);
+router.use(preAuthTenantMiddleware);
+router.use(requireRemoteAgentAuth);
router.use(configMiddleware);
router.use(checkRemoteAgentsFeature);
diff --git a/api/server/routes/files/files.js b/api/server/routes/files/files.js
index 5c26f65b81c9..285f78390971 100644
--- a/api/server/routes/files/files.js
+++ b/api/server/routes/files/files.js
@@ -295,6 +295,86 @@ router.get('/code/download/:session_id/:fileId', async (req, res) => {
}
});
+/* Lazy-sweep cutoff: pending records older than this are marked failed
+ * on the next poll. 2min is well past the 60s render ceiling, so any
+ * `pending` past it is definitively orphaned. Tighter than the boot
+ * sweep (5min) since this runs per-request, not per-instance. */
+const PREVIEW_LAZY_SWEEP_CUTOFF_MS = 2 * 60 * 1000;
+
+/**
+ * Poll the lifecycle status of a code-execution file's inline preview.
+ *
+ * Deferred-preview flow: the immediate persist step writes the file
+ * record at `status: 'pending'`; the background render transitions
+ * it to `'ready'` (with `text` + `textFormat`) or `'failed'` (with
+ * `previewError`). The frontend's `useFilePreview` React Query hook
+ * polls this endpoint at ~2.5s intervals while `status === 'pending'`,
+ * then auto-stops on terminal status.
+ *
+ * Returns the smallest viable shape:
+ * - `status` always present (defaults to `'ready'` for legacy records
+ * that never had the field — clients treat absent as ready).
+ * - `text` and `textFormat` only when status is 'ready' AND text
+ * is non-null (preserves the security contract from PR #12934 —
+ * office bucket files MUST NOT receive plain-text fallbacks).
+ * - `previewError` only when status is 'failed'.
+ *
+ * Lazy-sweeps stale `pending` records on the spot — see
+ * `PREVIEW_LAZY_SWEEP_CUTOFF_MS` for the rationale.
+ *
+ * Reuses the `fileAccess` middleware so ACL is identical to download.
+ *
+ * @route GET /files/:file_id/preview
+ */
+router.get('/:file_id/preview', fileAccess, async (req, res) => {
+ try {
+ const { file_id } = req.params;
+ /* `fileAccess` already fetched the record (sans `text`, the default
+ * projection drops it). Reuse for the lifecycle check; only re-fetch
+ * with `text` on a terminal ready response — the typical lifecycle
+ * is N pending polls + 1 ready, so this avoids ~N redundant text
+ * reads per file. */
+ let file = req.fileAccess.file;
+ /* Lazy sweep: if stuck `pending` past the cutoff, mark `failed`
+ * conditional on the observed `updatedAt` (concurrent legitimate
+ * updates win). */
+ if (file.status === 'pending' && file.updatedAt instanceof Date) {
+ const ageMs = Date.now() - file.updatedAt.getTime();
+ if (ageMs > PREVIEW_LAZY_SWEEP_CUTOFF_MS) {
+ const swept = await db.updateFile(
+ { file_id, status: 'failed', previewError: 'orphaned' },
+ { status: 'pending', updatedAt: file.updatedAt },
+ );
+ if (swept) {
+ file = swept;
+ logger.info(
+ `[/files/:file_id/preview] Lazy-swept orphaned pending record ${file_id} (age ${Math.round(ageMs / 1000)}s)`,
+ );
+ }
+ }
+ }
+ /* Default to 'ready' for back-compat: legacy records pre-date the
+ * field, and non-office files never get a status set on persist. */
+ const status = file.status ?? 'ready';
+ const payload = { file_id, status };
+ if (status === 'ready') {
+ const withText = await db.findFileById(file_id);
+ if (withText?.text != null) {
+ payload.text = withText.text;
+ payload.textFormat = withText.textFormat ?? null;
+ }
+ } else if (status === 'failed' && file.previewError) {
+ payload.previewError = file.previewError;
+ }
+ return res.status(200).json(payload);
+ } catch (error) {
+ logger.error('[/files/:file_id/preview] Error fetching preview status:', error);
+ return res
+ .status(500)
+ .json({ error: 'Internal Server Error', message: 'Failed to fetch preview status' });
+ }
+});
+
router.get('/download/:userId/:file_id', fileAccess, async (req, res) => {
try {
const { userId, file_id } = req.params;
diff --git a/api/server/routes/files/preview.spec.js b/api/server/routes/files/preview.spec.js
new file mode 100644
index 000000000000..f86c128d4bff
--- /dev/null
+++ b/api/server/routes/files/preview.spec.js
@@ -0,0 +1,372 @@
+/**
+ * Coverage for the new GET /files/:file_id/preview endpoint.
+ *
+ * Deferred-preview code-execution flow: the immediate persist step
+ * emits a file record at `status: 'pending'`; the background render
+ * transitions it to `'ready'` (with text) or `'failed'` (with
+ * previewError). The frontend polls this endpoint until status is
+ * terminal. This suite asserts the response shape across all four
+ * states (pending, ready, failed, legacy/back-compat) and the auth
+ * boundary (404 vs 403).
+ */
+
+jest.mock('@librechat/data-schemas', () => ({
+ logger: { warn: jest.fn(), debug: jest.fn(), error: jest.fn(), info: jest.fn() },
+ SystemCapabilities: {},
+}));
+
+jest.mock('@librechat/api', () => ({
+ refreshS3FileUrls: jest.fn(),
+ resolveUploadErrorMessage: jest.fn(),
+ verifyAgentUploadPermission: jest.fn(),
+}));
+
+const mockFindFileById = jest.fn();
+const mockGetFiles = jest.fn();
+const mockUpdateFile = jest.fn();
+jest.mock('~/models', () => ({
+ findFileById: (...args) => mockFindFileById(...args),
+ getFiles: (...args) => mockGetFiles(...args),
+ updateFile: (...args) => mockUpdateFile(...args),
+ getAgents: jest.fn().mockResolvedValue([]),
+ batchUpdateFiles: jest.fn(),
+}));
+
+jest.mock('~/server/services/Files/process', () => ({
+ filterFile: jest.fn(),
+ processFileUpload: jest.fn(),
+ processDeleteRequest: jest.fn(),
+ processAgentFileUpload: jest.fn(),
+}));
+
+jest.mock('~/server/services/Files/strategies', () => ({
+ getStrategyFunctions: jest.fn(() => ({})),
+}));
+
+jest.mock('~/server/controllers/assistants/helpers', () => ({
+ getOpenAIClient: jest.fn(),
+}));
+
+jest.mock('~/server/middleware/roles/capabilities', () => ({
+ hasCapability: jest.fn(() => (_req, _res, next) => next()),
+}));
+
+jest.mock('~/server/services/PermissionService', () => ({
+ checkPermission: jest.fn(() => (_req, _res, next) => next()),
+ getEffectivePermissions: jest.fn().mockResolvedValue(0),
+}));
+
+jest.mock('~/server/services/Files', () => ({
+ hasAccessToFilesViaAgent: jest.fn(),
+}));
+
+jest.mock('~/server/utils/files', () => ({
+ cleanFileName: (name) => name,
+}));
+
+jest.mock('~/cache', () => ({
+ getLogStores: jest.fn(() => ({ get: jest.fn(), set: jest.fn() })),
+}));
+
+const express = require('express');
+const request = require('supertest');
+const filesRouter = require('./files');
+
+/**
+ * Mount the router with a per-request user injector so we can simulate
+ * a logged-in user without spinning up the full auth stack.
+ */
+function buildApp({ user = { id: 'user-123', role: 'user' } } = {}) {
+ const app = express();
+ app.use(express.json());
+ app.use((req, _res, next) => {
+ req.user = user;
+ req.config = { fileStrategy: 'local' };
+ next();
+ });
+ app.use('/files', filesRouter);
+ return app;
+}
+
+const OWNER_USER_ID = 'user-123';
+
+describe('GET /files/:file_id/preview', () => {
+ beforeEach(() => {
+ mockFindFileById.mockReset();
+ mockGetFiles.mockReset();
+ mockUpdateFile.mockReset();
+ });
+
+ it('returns 404 when the file does not exist (auth check fails first via fileAccess)', async () => {
+ /* `fileAccess` middleware does its own getFiles lookup and returns
+ * 404 before our handler ever runs. This test asserts the boundary
+ * lives there, not that the handler duplicates the check. */
+ mockGetFiles.mockResolvedValueOnce([]);
+ const res = await request(buildApp()).get('/files/missing-id/preview');
+ expect(res.status).toBe(404);
+ expect(res.body).toMatchObject({ error: 'Not Found' });
+ expect(mockFindFileById).not.toHaveBeenCalled();
+ });
+
+ it('returns 403 when the requester does not own the file and has no agent-based access', async () => {
+ /* fileAccess returns 403 — the file exists but belongs to someone
+ * else and no agent grants access. The preview handler should
+ * never run. */
+ mockGetFiles.mockResolvedValueOnce([
+ { file_id: 'someone-elses', user: 'other-user', filename: 'x.xlsx' },
+ ]);
+ const res = await request(buildApp()).get('/files/someone-elses/preview');
+ expect(res.status).toBe(403);
+ expect(mockFindFileById).not.toHaveBeenCalled();
+ });
+
+ it('returns status:pending without text/textFormat while the deferred render is in flight', async () => {
+ mockGetFiles.mockResolvedValueOnce([
+ {
+ file_id: 'fid-pending',
+ user: OWNER_USER_ID,
+ filename: 'data.xlsx',
+ status: 'pending',
+ },
+ ]);
+ const res = await request(buildApp()).get('/files/fid-pending/preview');
+ expect(res.status).toBe(200);
+ expect(res.body).toEqual({ file_id: 'fid-pending', status: 'pending' });
+ /* Pending must NOT leak `text` and must NOT trigger the text re-fetch. */
+ expect(res.body).not.toHaveProperty('text');
+ expect(mockFindFileById).not.toHaveBeenCalled();
+ });
+
+ it('returns status:ready with text + textFormat when the deferred render succeeded', async () => {
+ mockGetFiles.mockResolvedValueOnce([
+ { file_id: 'fid-ready', user: OWNER_USER_ID, filename: 'data.xlsx', status: 'ready' },
+ ]);
+ /* Text is fetched only on the terminal ready response. */
+ mockFindFileById.mockResolvedValueOnce({
+ file_id: 'fid-ready',
+ text: '',
+ textFormat: 'html',
+ });
+ const res = await request(buildApp()).get('/files/fid-ready/preview');
+ expect(res.status).toBe(200);
+ expect(res.body).toEqual({
+ file_id: 'fid-ready',
+ status: 'ready',
+ text: '',
+ textFormat: 'html',
+ });
+ });
+
+ it('returns status:failed with previewError when the deferred render errored', async () => {
+ mockGetFiles.mockResolvedValueOnce([
+ {
+ file_id: 'fid-failed',
+ user: OWNER_USER_ID,
+ filename: 'data.xlsx',
+ status: 'failed',
+ previewError: 'parser-error',
+ },
+ ]);
+ const res = await request(buildApp()).get('/files/fid-failed/preview');
+ expect(res.status).toBe(200);
+ expect(res.body).toEqual({
+ file_id: 'fid-failed',
+ status: 'failed',
+ previewError: 'parser-error',
+ });
+ expect(mockFindFileById).not.toHaveBeenCalled();
+ });
+
+ it('defaults to status:ready for legacy records with no status field (back-compat)', async () => {
+ mockGetFiles.mockResolvedValueOnce([
+ {
+ file_id: 'fid-legacy',
+ user: OWNER_USER_ID,
+ filename: 'old.csv',
+ // status intentionally absent
+ },
+ ]);
+ mockFindFileById.mockResolvedValueOnce({
+ file_id: 'fid-legacy',
+ text: 'csv,header\n1,2',
+ textFormat: 'text',
+ });
+ const res = await request(buildApp()).get('/files/fid-legacy/preview');
+ expect(res.status).toBe(200);
+ expect(res.body).toEqual({
+ file_id: 'fid-legacy',
+ status: 'ready',
+ text: 'csv,header\n1,2',
+ textFormat: 'text',
+ });
+ });
+
+ it('returns status:ready with no text when the record is ready but text is null (binary/oversized)', async () => {
+ mockGetFiles.mockResolvedValueOnce([
+ { file_id: 'fid-binary', user: OWNER_USER_ID, filename: 'image.bin' },
+ ]);
+ mockFindFileById.mockResolvedValueOnce({
+ file_id: 'fid-binary',
+ text: null,
+ textFormat: null,
+ });
+ const res = await request(buildApp()).get('/files/fid-binary/preview');
+ expect(res.status).toBe(200);
+ expect(res.body).toEqual({ file_id: 'fid-binary', status: 'ready' });
+ });
+
+ it('returns ready with no text when ready record was deleted between fileAccess and text fetch', async () => {
+ /* `fileAccess` saw the record but the concurrent delete removed it
+ * before the text fetch. Surface ready-without-text rather than
+ * 500 — the client routes to download-only and stops polling. */
+ mockGetFiles.mockResolvedValueOnce([
+ { file_id: 'fid-race', user: OWNER_USER_ID, filename: 'data.xlsx', status: 'ready' },
+ ]);
+ mockFindFileById.mockResolvedValueOnce(null);
+ const res = await request(buildApp()).get('/files/fid-race/preview');
+ expect(res.status).toBe(200);
+ expect(res.body).toEqual({ file_id: 'fid-race', status: 'ready' });
+ });
+
+ it('returns 500 with a stable shape if the text fetch throws unexpectedly', async () => {
+ mockGetFiles.mockResolvedValueOnce([
+ { file_id: 'fid-boom', user: OWNER_USER_ID, filename: 'data.xlsx', status: 'ready' },
+ ]);
+ mockFindFileById.mockRejectedValueOnce(new Error('mongo down'));
+ const res = await request(buildApp()).get('/files/fid-boom/preview');
+ expect(res.status).toBe(500);
+ expect(res.body).toMatchObject({ error: 'Internal Server Error' });
+ });
+
+ describe('lazy sweep for stale pending records', () => {
+ /* The boot-time `sweepOrphanedPreviews` only runs once at startup
+ * with a 5-min cutoff. A backend crash + quick restart can leave
+ * `pending` records younger than 5 min that never get touched
+ * again. This endpoint sweeps them on the spot whenever a polling
+ * request lands on one — the user is exactly the consumer who
+ * cares, so on-demand sweep is the right shape. (Codex P2 review
+ * on PR #12957.) */
+ const STALE_MS = 6 * 60 * 1000;
+ const FRESH_MS = 30 * 1000;
+
+ it('marks a stale pending record as failed:orphaned and returns the swept state', async () => {
+ const updatedAt = new Date(Date.now() - STALE_MS);
+ mockGetFiles.mockResolvedValueOnce([
+ {
+ file_id: 'fid-stale',
+ user: OWNER_USER_ID,
+ filename: 'data.xlsx',
+ status: 'pending',
+ updatedAt,
+ },
+ ]);
+ mockUpdateFile.mockResolvedValueOnce({
+ file_id: 'fid-stale',
+ status: 'failed',
+ previewError: 'orphaned',
+ });
+
+ const res = await request(buildApp()).get('/files/fid-stale/preview');
+
+ expect(mockUpdateFile).toHaveBeenCalledWith(
+ { file_id: 'fid-stale', status: 'failed', previewError: 'orphaned' },
+ { status: 'pending', updatedAt },
+ );
+ expect(res.status).toBe(200);
+ expect(res.body).toEqual({
+ file_id: 'fid-stale',
+ status: 'failed',
+ previewError: 'orphaned',
+ });
+ });
+
+ it('does NOT sweep a fresh pending record (within the cutoff window)', async () => {
+ mockGetFiles.mockResolvedValueOnce([
+ {
+ file_id: 'fid-fresh',
+ user: OWNER_USER_ID,
+ filename: 'data.xlsx',
+ status: 'pending',
+ updatedAt: new Date(Date.now() - FRESH_MS),
+ },
+ ]);
+
+ const res = await request(buildApp()).get('/files/fid-fresh/preview');
+
+ expect(mockUpdateFile).not.toHaveBeenCalled();
+ expect(res.status).toBe(200);
+ expect(res.body).toEqual({ file_id: 'fid-fresh', status: 'pending' });
+ });
+
+ it('sweeps a record past the 2min cutoff but below the 5min boot-sweep threshold', async () => {
+ /* Pins the cutoff change from 5min to 2min — without this, a
+ * future revert wouldn't fail the suite. */
+ const updatedAt = new Date(Date.now() - 3 * 60 * 1000);
+ mockGetFiles.mockResolvedValueOnce([
+ {
+ file_id: 'fid-mid',
+ user: OWNER_USER_ID,
+ filename: 'data.xlsx',
+ status: 'pending',
+ updatedAt,
+ },
+ ]);
+ mockUpdateFile.mockResolvedValueOnce({
+ file_id: 'fid-mid',
+ status: 'failed',
+ previewError: 'orphaned',
+ });
+
+ const res = await request(buildApp()).get('/files/fid-mid/preview');
+
+ expect(mockUpdateFile).toHaveBeenCalled();
+ expect(res.body).toEqual({
+ file_id: 'fid-mid',
+ status: 'failed',
+ previewError: 'orphaned',
+ });
+ });
+
+ it('does NOT sweep a stale ready record (only pending qualifies)', async () => {
+ mockGetFiles.mockResolvedValueOnce([
+ {
+ file_id: 'fid-ready',
+ user: OWNER_USER_ID,
+ filename: 'data.xlsx',
+ status: 'ready',
+ updatedAt: new Date(Date.now() - STALE_MS),
+ },
+ ]);
+ mockFindFileById.mockResolvedValueOnce({
+ file_id: 'fid-ready',
+ text: 'final',
+ textFormat: 'html',
+ });
+
+ const res = await request(buildApp()).get('/files/fid-ready/preview');
+
+ expect(mockUpdateFile).not.toHaveBeenCalled();
+ expect(res.body).toMatchObject({ status: 'ready', text: 'final' });
+ });
+
+ it('falls through to the original pending payload if the conditional sweep loses the race', async () => {
+ const updatedAt = new Date(Date.now() - STALE_MS);
+ mockGetFiles.mockResolvedValueOnce([
+ {
+ file_id: 'fid-race',
+ user: OWNER_USER_ID,
+ filename: 'data.xlsx',
+ status: 'pending',
+ updatedAt,
+ },
+ ]);
+ mockUpdateFile.mockResolvedValueOnce(null);
+
+ const res = await request(buildApp()).get('/files/fid-race/preview');
+
+ expect(mockUpdateFile).toHaveBeenCalled();
+ expect(res.status).toBe(200);
+ expect(res.body).toEqual({ file_id: 'fid-race', status: 'pending' });
+ });
+ });
+});
diff --git a/api/server/services/AuthService.js b/api/server/services/AuthService.js
index 816a0eac5b10..40b3c1a72526 100644
--- a/api/server/services/AuthService.js
+++ b/api/server/services/AuthService.js
@@ -11,6 +11,7 @@ const {
math,
isEnabled,
checkEmailConfig,
+ setCloudFrontCookies,
isEmailDomainAllowed,
shouldUseSecureCookie,
resolveAppConfigForUser,
@@ -440,6 +441,9 @@ const setAuthTokens = async (userId, res, _session = null) => {
secure: shouldUseSecureCookie(),
sameSite: 'strict',
});
+
+ setCloudFrontCookies(res);
+
return token;
} catch (error) {
logger.error('[setAuthTokens] Error in setting authentication tokens:', error);
@@ -557,6 +561,9 @@ const setOpenIDAuthTokens = (tokenset, req, res, userId, existingRefreshToken) =
sameSite: 'strict',
});
}
+
+ setCloudFrontCookies(res);
+
return appAuthToken;
} catch (error) {
logger.error('[setOpenIDAuthTokens] Error in setting authentication tokens:', error);
diff --git a/api/server/services/AuthService.spec.js b/api/server/services/AuthService.spec.js
index c8abafdbe59f..df89f3f2c9c8 100644
--- a/api/server/services/AuthService.spec.js
+++ b/api/server/services/AuthService.spec.js
@@ -15,6 +15,7 @@ jest.mock('@librechat/api', () => ({
math: jest.fn((val, fallback) => (val ? Number(val) : fallback)),
shouldUseSecureCookie: jest.fn(() => false),
resolveAppConfigForUser: jest.fn(async (_getAppConfig, _user) => ({})),
+ setCloudFrontCookies: jest.fn(() => true),
}));
jest.mock('~/models', () => ({
findUser: jest.fn(),
@@ -40,10 +41,17 @@ const {
shouldUseSecureCookie,
isEmailDomainAllowed,
resolveAppConfigForUser,
+ setCloudFrontCookies,
} = require('@librechat/api');
-const { findUser } = require('~/models');
+const {
+ findUser,
+ getUserById,
+ generateToken,
+ generateRefreshToken,
+ createSession,
+} = require('~/models');
const { getAppConfig } = require('~/server/services/Config');
-const { setOpenIDAuthTokens, requestPasswordReset } = require('./AuthService');
+const { setOpenIDAuthTokens, requestPasswordReset, setAuthTokens } = require('./AuthService');
/** Helper to build a mock Express response */
function mockResponse() {
@@ -339,3 +347,67 @@ describe('requestPasswordReset', () => {
expect(result.message).toContain('If an account with that email exists');
});
});
+
+describe('CloudFront cookie integration', () => {
+ beforeEach(() => {
+ jest.clearAllMocks();
+ });
+
+ describe('setOpenIDAuthTokens', () => {
+ const validTokenset = {
+ id_token: 'the-id-token',
+ access_token: 'the-access-token',
+ refresh_token: 'the-refresh-token',
+ };
+
+ it('calls setCloudFrontCookies with response object', () => {
+ const req = mockRequest();
+ const res = mockResponse();
+
+ setOpenIDAuthTokens(validTokenset, req, res, 'user-123');
+
+ expect(setCloudFrontCookies).toHaveBeenCalledWith(res);
+ });
+
+ it('succeeds even when setCloudFrontCookies returns false', () => {
+ setCloudFrontCookies.mockReturnValue(false);
+
+ const req = mockRequest();
+ const res = mockResponse();
+
+ const result = setOpenIDAuthTokens(validTokenset, req, res, 'user-123');
+
+ expect(result).toBe('the-id-token');
+ });
+ });
+
+ describe('setAuthTokens', () => {
+ beforeEach(() => {
+ getUserById.mockResolvedValue({ _id: 'user-123' });
+ generateToken.mockResolvedValue('mock-access-token');
+ generateRefreshToken.mockReturnValue('mock-refresh-token');
+ createSession.mockResolvedValue({
+ session: { expiration: new Date(Date.now() + 604800000) },
+ refreshToken: 'mock-refresh-token',
+ });
+ });
+
+ it('calls setCloudFrontCookies with response object', async () => {
+ const res = mockResponse();
+
+ await setAuthTokens('user-123', res);
+
+ expect(setCloudFrontCookies).toHaveBeenCalledWith(res);
+ });
+
+ it('succeeds even when setCloudFrontCookies returns false', async () => {
+ setCloudFrontCookies.mockReturnValue(false);
+
+ const res = mockResponse();
+
+ const result = await setAuthTokens('user-123', res);
+
+ expect(result).toBe('mock-access-token');
+ });
+ });
+});
diff --git a/api/server/services/Files/Code/__tests__/process-traversal.spec.js b/api/server/services/Files/Code/__tests__/process-traversal.spec.js
index 016f3a71c636..b6fcc2636f8c 100644
--- a/api/server/services/Files/Code/__tests__/process-traversal.spec.js
+++ b/api/server/services/Files/Code/__tests__/process-traversal.spec.js
@@ -27,6 +27,18 @@ jest.mock('@librechat/api', () => {
createAxiosInstance: jest.fn(() => mockAxios),
classifyCodeArtifact: jest.fn(() => 'other'),
extractCodeArtifactText: jest.fn(async () => null),
+ /* `processCodeOutput` calls this to derive the trust flag persisted
+ * on `IMongoFile.textFormat` — Codex P1 review on PR #12934. The
+ * mock returns null in lockstep with the null `text` above so
+ * downstream consumers don't see a phantom format. */
+ getExtractedTextFormat: jest.fn(() => null),
+ /* Pass-through `withTimeout`: this suite asserts traversal sanitization,
+ * not deferred preview timing. */
+ withTimeout: async (promise) => promise,
+ /* These traversal cases all use non-office filenames — keep the
+ * inline (non-finalize) path so existing assertions on a single
+ * createFile call hold. */
+ hasOfficeHtmlPath: jest.fn(() => false),
codeServerHttpAgent: new http.Agent({ keepAlive: false }),
codeServerHttpsAgent: new https.Agent({ keepAlive: false }),
};
diff --git a/api/server/services/Files/Code/process.js b/api/server/services/Files/Code/process.js
index 5ae78a7e9a9d..4efc938223ee 100644
--- a/api/server/services/Files/Code/process.js
+++ b/api/server/services/Files/Code/process.js
@@ -3,8 +3,10 @@ const { v4 } = require('uuid');
const { logger } = require('@librechat/data-schemas');
const { getCodeBaseURL } = require('@librechat/agents');
const {
+ withTimeout,
getBasePath,
logAxiosError,
+ hasOfficeHtmlPath,
sanitizeArtifactPath,
flattenArtifactPath,
createAxiosInstance,
@@ -12,6 +14,7 @@ const {
codeServerHttpAgent,
codeServerHttpsAgent,
extractCodeArtifactText,
+ getExtractedTextFormat,
} = require('@librechat/api');
const {
Tools,
@@ -68,8 +71,235 @@ const createDownloadFallback = ({
};
/**
- * Process code execution output files - downloads and saves both images and non-image files.
- * All files are saved to local storage with fileIdentifier metadata for code env re-upload.
+ * Hard ceiling on the deferred preview rendering (HTML extraction + DB
+ * update). The inner office-render path already has its own 12s timeout
+ * and a concurrency-limited queue; this is the outer guard that catches
+ * pathological cases where queue wait + render + DB write would
+ * otherwise hang the file in `status: 'pending'` indefinitely.
+ *
+ * If the timeout fires the record is updated to `status: 'failed'`
+ * with `previewError: 'timeout'` and the UI shows download-only.
+ */
+const PREVIEW_FINALIZE_TIMEOUT_MS = 60_000;
+
+/**
+ * Render the inline HTML preview for a code-execution file (or plain
+ * text for non-office buckets that still benefit from caching), then
+ * atomically transition the DB record to `status: 'ready'` (with
+ * `text`/`textFormat`) or `status: 'failed'` (with `previewError`).
+ *
+ * Decoupled from `processCodeOutput` so the agent's final response is
+ * not blocked on potentially slow office rendering. The caller fires
+ * this without awaiting; promises continue running after the HTTP
+ * response closes (Node doesn't kill them) and the frontend learns of
+ * completion via the `attachment` update SSE event (if the stream is
+ * still open) or via React Query polling otherwise. Process restart
+ * is the only thing that can lose progress — covered by the boot-time
+ * orphan sweep.
+ *
+ * @param {object} params
+ * @param {Buffer} params.buffer - The full downloaded file contents,
+ * bounded by the server's `fileSizeLimit` config (defaults far above
+ * the 1MB extractor cap). The buffer is captured by the closure
+ * returned in `{ finalize }`, so when many office files queue behind
+ * the inner concurrency limiter (cap 2), all queued buffers stay
+ * resident until each one's slot frees. For a tool result emitting
+ * N office files, peak heap usage from this path is up to
+ * `N * fileSizeLimit`. Acceptable for typical agent runs (a handful
+ * of files at a few hundred KB each); pathological cases are bounded
+ * by the inner per-file 12s timeout and the outer 60s render cap.
+ * @param {string} params.leafName - Basename for classification.
+ * @param {string} params.mimeType - Detected/inferred MIME.
+ * @param {string} params.category - Classifier output.
+ * @param {string} params.file_id - The DB record key for the update.
+ * @param {string} [params.previewRevision] - Generation marker stamped
+ * by the immediate persist step. The DB commit is conditional on
+ * this — if a newer emit (cross-turn filename reuse) has rotated
+ * the revision before this render finishes, `updateFile` returns
+ * null and the stale render is silently discarded rather than
+ * overwriting the newer record.
+ * @returns {Promise} The post-update record on
+ * success; `null` if the DB update itself failed (extraction failure
+ * is reflected as `status: 'failed'`, not a thrown error) or if the
+ * `previewRevision` guard rejected the write.
+ */
+const finalizePreview = async ({
+ buffer,
+ leafName,
+ mimeType,
+ category,
+ file_id,
+ previewRevision,
+}) => {
+ let text = null;
+ let previewError;
+ try {
+ text = await withTimeout(
+ extractCodeArtifactText(buffer, leafName, mimeType, category),
+ PREVIEW_FINALIZE_TIMEOUT_MS,
+ `Preview extraction exceeded ${PREVIEW_FINALIZE_TIMEOUT_MS}ms`,
+ );
+ } catch (_error) {
+ /* `extractCodeArtifactText` swallows its own errors and returns null,
+ * so the only way to reach here is a `withTimeout` rejection — i.e.
+ * the queue + render combined exceeded the outer 60s ceiling. */
+ previewError = 'timeout';
+ logger.warn(
+ `[finalizePreview] ${file_id}: extraction timed out after ${PREVIEW_FINALIZE_TIMEOUT_MS}ms`,
+ );
+ }
+ /* HTML-or-null contract (PR #12934): null result on an office file
+ * must NOT fall back to plain text — surface as failed. Caller gates
+ * on `hasOfficeHtmlPath`, so reaching here always means office. */
+ const textFormat = getExtractedTextFormat(leafName, mimeType, text);
+ const failed = text == null;
+ const status = failed ? 'failed' : 'ready';
+ if (failed && !previewError) {
+ previewError = 'parser-error';
+ }
+ try {
+ /* Conditional update: commit only if `previewRevision` still
+ * matches what the immediate persist step stamped. If a newer
+ * emit has rotated the revision (cross-turn filename reuse),
+ * `updateFile` returns null and the stale render is silently
+ * discarded. (Codex P1 review on PR #12957.) */
+ const updated = await updateFile(
+ {
+ file_id,
+ text,
+ textFormat,
+ status,
+ previewError: failed ? previewError : null,
+ },
+ previewRevision ? { previewRevision } : undefined,
+ );
+ if (!updated && previewRevision) {
+ logger.debug(
+ `[finalizePreview] ${file_id}: stale render skipped — newer emit has superseded revision ${previewRevision}`,
+ );
+ }
+ return updated;
+ } catch (error) {
+ logger.error(
+ `[finalizePreview] ${file_id}: failed to persist preview result: ${error?.message ?? error}`,
+ );
+ return null;
+ }
+};
+
+/**
+ * Run the background `finalize` thunk returned by `processCodeOutput`
+ * and route the resolved record to the caller's emit logic. Shared
+ * between `callbacks.js` (chat-completions + Open Responses) and
+ * `tools.js` (direct tool endpoint) so the fire-and-forget pattern
+ * doesn't drift across callsites.
+ *
+ * `onResolved` receives the post-update DB record and is the only piece
+ * that varies — chat-completions writes the legacy `attachment` SSE
+ * event, Open Responses writes the spec-shaped `librechat:attachment`
+ * event with a sequence number, and the direct tool endpoint has no
+ * stream to write to (caller passes a no-op).
+ *
+ * The catch path is the safety net for unexpected programming errors
+ * inside `finalizePreview` ONLY. The function is designed to never
+ * throw (extraction and DB failures are translated to `status: 'failed'`
+ * inside it), but a ref error or future regression would otherwise
+ * leave the DB record stuck at `'pending'` until the boot-time orphan
+ * sweep — potentially hours away on a stable server. We attempt a
+ * best-effort `updateFile` to mark the record `'failed'` with
+ * `previewError: 'unexpected'` so the UI stops polling and the
+ * next-turn LLM context surfaces the failure.
+ *
+ * `onResolved` errors are deliberately isolated in their own try/catch.
+ * Without that isolation, a transient transport-side failure (SSE write
+ * race after the stream closed, an emitter listener throwing) would
+ * propagate into the finalize catch and downgrade an *already-resolved*
+ * record to `failed` with `previewError: 'unexpected'` — surfacing
+ * "preview unavailable" in the UI even though extraction succeeded
+ * and the file is on disk. The emit failure is logged but the DB
+ * record stays at whatever `finalizePreview` wrote (typically
+ * `'ready'`), so the polling layer / next page load still sees the
+ * resolved preview.
+ *
+ * @param {object} params
+ * @param {(() => Promise