diff --git a/packages/opencode/src/cli/cmd/tui/routes/session/dialog-timeline.tsx b/packages/opencode/src/cli/cmd/tui/routes/session/dialog-timeline.tsx index 87248a6a8ba..edb7cb6e402 100644 --- a/packages/opencode/src/cli/cmd/tui/routes/session/dialog-timeline.tsx +++ b/packages/opencode/src/cli/cmd/tui/routes/session/dialog-timeline.tsx @@ -1,37 +1,183 @@ import { createMemo, onMount } from "solid-js" import { useSync } from "@tui/context/sync" import { DialogSelect, type DialogSelectOption } from "@tui/ui/dialog-select" -import type { TextPart } from "@opencode-ai/sdk/v2" +import type { Part, Message, AssistantMessage, ToolPart, FilePart } from "@opencode-ai/sdk/v2" import { Locale } from "@/util/locale" import { DialogMessage } from "./dialog-message" import { useDialog } from "../../ui/dialog" import type { PromptInfo } from "../../component/prompt/history" +import { Token } from "@/util/token" +import { useTheme } from "@tui/context/theme" +import { useSDK } from "@tui/context/sdk" +import fs from "fs" +import path from "path" +import { produce } from "solid-js/store" +import { Binary } from "@opencode-ai/util/binary" +import { Global } from "@/global" + +function formatTokenCount(tokens: number): string { + return tokens.toString().padStart(7) +} + +function getMessageTokens(message: Message, parts: Part[], isCompaction: boolean = false): number { + if (message.role === "assistant") { + const assistantMsg = message as AssistantMessage + let total = 0 + + // Calculate tokens for this message turn only (not cumulative) + if (assistantMsg.tokens) { + const input = assistantMsg.tokens.input || 0 + const output = assistantMsg.tokens.output || 0 + const cacheWrite = assistantMsg.tokens.cache?.write || 0 + const reasoning = assistantMsg.tokens.reasoning || 0 + + // Exclude cacheRead as it represents cumulative context, not this message's cost + total = input + output + cacheWrite + reasoning + } else { + // Fall back to aggregating from step-finish parts + for (const part of parts) { + if (part.type === "step-finish" && (part as any).tokens) { + const tokens = (part as any).tokens + total += tokens.input + tokens.output + (tokens.reasoning || 0) + } + } + } + + // Add tool output tokens (not included in message.tokens) + for (const part of parts) { + if (part.type === "tool") { + const toolPart = part as ToolPart + const state = toolPart.state as any + if (state?.output) { + const output = typeof state.output === "string" ? state.output : JSON.stringify(state.output) + total += Token.estimate(output) + } + } + } + + return total + } + + // User message - estimate from parts + let estimate = 0 + for (const part of parts) { + if (part.type === "text" && !part.synthetic && !part.ignored) { + estimate += Token.estimate(part.text) + } + if (part.type === "file") { + const filePart = part as FilePart + if (filePart.source?.text?.value) { + estimate += Token.estimate(filePart.source.text.value) + } else if (filePart.mime.startsWith("image/")) { + estimate += Token.estimateImage(filePart.url) + } + } + } + return estimate +} + +function getMessageSummary(parts: Part[]): string { + const textPart = parts.find((x) => x.type === "text" && !x.synthetic && !x.ignored) + if (textPart && textPart.type === "text") { + return textPart.text.replace(/\n/g, " ") + } + + const toolParts = parts.filter((x) => x.type === "tool") as ToolPart[] + if (toolParts.length > 0) { + const tools = toolParts.map((p) => p.tool).join(", ") + return `[${tools}]` + } + + const fileParts = parts.filter((x) => x.type === "file") as FilePart[] + if (fileParts.length > 0) { + const files = fileParts.map((p) => p.filename || "file").join(", ") + return `[files: ${files}]` + } + + return "[no content]" +} export function DialogTimeline(props: { sessionID: string onMove: (messageID: string) => void setPrompt?: (prompt: PromptInfo) => void }) { - const sync = useSync() + const syncCtx = useSync() + const sync = syncCtx.data + const setStore = syncCtx.set const dialog = useDialog() + const { theme } = useTheme() + const sdk = useSDK() onMount(() => { dialog.setSize("large") }) const options = createMemo((): DialogSelectOption[] => { - const messages = sync.data.message[props.sessionID] ?? [] + const messages = sync.message[props.sessionID] ?? [] const result = [] as DialogSelectOption[] + for (const message of messages) { - if (message.role !== "user") continue - const part = (sync.data.part[message.id] ?? []).find( - (x) => x.type === "text" && !x.synthetic && !x.ignored, - ) as TextPart - if (!part) continue + const parts = sync.part[message.id] ?? [] + + // Check if this is a compaction summary message + const isCompactionSummary = message.role === "assistant" && (message as AssistantMessage).summary === true + + // Get the token count for this specific message (delta only, not cumulative) + const messageTokens = getMessageTokens(message, parts, isCompactionSummary) + + // Display the tokens directly (no cumulative calculation needed) + const delta = messageTokens + + const formatted = formatTokenCount(delta) + + // Token count color based on thresholds (cold to hot gradient) + // Using delta for color coding + let tokenColor = theme.textMuted // grey < 1k + if (delta >= 20000) { + tokenColor = theme.error // red 20k+ + } else if (delta >= 10000) { + tokenColor = theme.warning // orange 10k+ + } else if (delta >= 5000) { + tokenColor = theme.accent // purple 5k+ + } else if (delta >= 2000) { + tokenColor = theme.secondary // blue 2k+ + } else if (delta >= 1000) { + tokenColor = theme.info // cyan 1k+ + } + + const summary = getMessageSummary(parts) + + // Debug: Extract token breakdown for assistant messages + let tokenDebug = "" + if (message.role === "assistant") { + const assistantMsg = message as AssistantMessage + if (assistantMsg.tokens) { + const input = assistantMsg.tokens.input || 0 + const output = assistantMsg.tokens.output || 0 + const cacheRead = assistantMsg.tokens.cache?.read || 0 + const cacheWrite = assistantMsg.tokens.cache?.write || 0 + const reasoning = assistantMsg.tokens.reasoning || 0 + tokenDebug = `(${input}/${output}/${cacheRead}/${cacheWrite}/${reasoning}) ` + } + } + + const prefix = isCompactionSummary ? "[compaction] " : message.role === "assistant" ? "agent: " : "" + const title = tokenDebug + prefix + summary + + const gutter = [{formatted}] + + // Normal assistant messages use textMuted for title + const isAssistant = message.role === "assistant" && !isCompactionSummary + result.push({ - title: part.text.replace(/\n/g, " "), + title, + gutter: isCompactionSummary ? [{formatted}] : gutter, value: message.id, footer: Locale.time(message.time.created), + titleColor: isCompactionSummary ? theme.success : isAssistant ? theme.textMuted : undefined, + footerColor: isCompactionSummary ? theme.success : undefined, + bg: isCompactionSummary ? theme.success : undefined, onSelect: (dialog) => { dialog.replace(() => ( @@ -39,9 +185,84 @@ export function DialogTimeline(props: { }, }) } + result.reverse() return result }) - return props.onMove(option.value)} title="Timeline" options={options()} /> + const handleDelete = async (messageID: string) => { + try { + const storageBase = path.join(Global.Path.data, "storage") + + // Delete message file + const messagePath = path.join(storageBase, "message", props.sessionID, `${messageID}.json`) + if (fs.existsSync(messagePath)) { + fs.unlinkSync(messagePath) + } + + // Delete all part files + const partsDir = path.join(storageBase, "part", messageID) + if (fs.existsSync(partsDir)) { + const partFiles = fs.readdirSync(partsDir) + for (const file of partFiles) { + fs.unlinkSync(path.join(partsDir, file)) + } + fs.rmdirSync(partsDir) + } + + // Invalidate session cache by setting the flag in storage + const sessionPath = path.join( + storageBase, + "session", + "project_" + sync.session.find((s) => s.id === props.sessionID)?.projectID || "", + `${props.sessionID}.json`, + ) + if (fs.existsSync(sessionPath)) { + const sessionData = JSON.parse(fs.readFileSync(sessionPath, "utf-8")) + sessionData.cacheInvalidated = true + fs.writeFileSync(sessionPath, JSON.stringify(sessionData, null, 2)) + } + + // Update the UI store to remove the message + const messages = sync.message[props.sessionID] + const result = Binary.search(messages, messageID, (m) => m.id) + if (result.found) { + setStore( + "message", + props.sessionID, + produce((draft) => { + draft.splice(result.index, 1) + }), + ) + } + + // Also remove parts from UI + setStore("part", messageID, []) + + // Update session in UI store to reflect cache invalidation + const sessionIndex = sync.session.findIndex((s) => s.id === props.sessionID) + if (sessionIndex >= 0) { + setStore("session", sessionIndex, "cacheInvalidated", true) + } + } catch (error) { + // Silent fail + } + } + + return ( + props.onMove(option.value)} + title="Timeline" + options={options()} + keybind={[ + { + keybind: { name: "delete", ctrl: false, meta: false, shift: false, leader: false }, + title: "Delete", + onTrigger: (option) => { + handleDelete(option.value) + }, + }, + ]} + /> + ) } diff --git a/packages/opencode/src/cli/cmd/tui/ui/dialog-select.tsx b/packages/opencode/src/cli/cmd/tui/ui/dialog-select.tsx index 5c37a493dfa..6100cec6961 100644 --- a/packages/opencode/src/cli/cmd/tui/ui/dialog-select.tsx +++ b/packages/opencode/src/cli/cmd/tui/ui/dialog-select.tsx @@ -38,7 +38,9 @@ export interface DialogSelectOption { disabled?: boolean bg?: RGBA gutter?: JSX.Element - onSelect?: (ctx: DialogContext) => void + titleColor?: RGBA + footerColor?: RGBA + onSelect?: (ctx: DialogContext, trigger?: "prompt") => void } export type DialogSelectRef = { @@ -280,6 +282,8 @@ export function DialogSelect(props: DialogSelectProps) { active={active()} current={current()} gutter={option.gutter} + titleColor={option.titleColor} + footerColor={option.footerColor} /> ) @@ -315,6 +319,8 @@ function Option(props: { current?: boolean footer?: JSX.Element | string gutter?: JSX.Element + titleColor?: RGBA + footerColor?: RGBA onMouseOver?: () => void }) { const { theme } = useTheme() @@ -334,19 +340,19 @@ function Option(props: { - {Locale.truncate(props.title, 61)} + {Locale.truncate(props.title, 60)} {props.description} - {props.footer} + {props.footer} diff --git a/packages/opencode/src/cli/cmd/tui/ui/dialog.tsx b/packages/opencode/src/cli/cmd/tui/ui/dialog.tsx index 57375ba09db..809f98d101e 100644 --- a/packages/opencode/src/cli/cmd/tui/ui/dialog.tsx +++ b/packages/opencode/src/cli/cmd/tui/ui/dialog.tsx @@ -36,7 +36,7 @@ export function Dialog( if (renderer.getSelection()) return e.stopPropagation() }} - width={props.size === "large" ? 80 : 60} + width={props.size === "large" ? 90 : 60} maxWidth={dimensions().width - 2} backgroundColor={theme.backgroundPanel} paddingTop={1} diff --git a/packages/opencode/src/provider/transform.ts b/packages/opencode/src/provider/transform.ts index f6b7ec8cbcc..9b68beb5c62 100644 --- a/packages/opencode/src/provider/transform.ts +++ b/packages/opencode/src/provider/transform.ts @@ -43,6 +43,35 @@ export namespace ProviderTransform { model: Provider.Model, options: Record, ): ModelMessage[] { + // Strip openai itemId metadata following what codex does + if (model.api.npm === "@ai-sdk/openai" || options.store === false) { + msgs = msgs.map((msg) => { + if (msg.providerOptions) { + for (const options of Object.values(msg.providerOptions)) { + if (options && typeof options === "object") { + delete options["itemId"] + delete options["reasoningEncryptedContent"] + } + } + } + if (!Array.isArray(msg.content)) { + return msg + } + const content = msg.content.map((part) => { + if (part.providerOptions) { + for (const options of Object.values(part.providerOptions)) { + if (options && typeof options === "object") { + delete options["itemId"] + delete options["reasoningEncryptedContent"] + } + } + } + return part + }) + return { ...msg, content } as typeof msg + }) + } + // Anthropic rejects messages with empty content - filter out empty string messages // and remove empty text/reasoning parts from array content if (model.api.npm === "@ai-sdk/anthropic") { @@ -164,7 +193,20 @@ export namespace ProviderTransform { return msgs } - function applyCaching(msgs: ModelMessage[], providerID: string): ModelMessage[] { + async function applyCaching(msgs: ModelMessage[], providerID: string, sessionID?: string): Promise { + // Skip caching if session cache was invalidated (e.g., message deletion) + if (sessionID) { + const { Session } = await import("../session") + const session = await Session.get(sessionID).catch(() => null) + if (session?.cacheInvalidated) { + // Clear flag and return without cache control markers + await Session.update(sessionID, (draft) => { + delete draft.cacheInvalidated + }).catch(() => {}) + return msgs + } + } + const system = msgs.filter((msg) => msg.role === "system").slice(0, 2) const final = msgs.filter((msg) => msg.role !== "system").slice(-2) @@ -244,7 +286,12 @@ export namespace ProviderTransform { }) } - export function message(msgs: ModelMessage[], model: Provider.Model, options: Record) { + export async function message( + msgs: ModelMessage[], + model: Provider.Model, + options: Record = {}, + sessionID?: string, + ) { msgs = unsupportedParts(msgs, model) msgs = normalizeMessages(msgs, model, options) if ( @@ -253,7 +300,7 @@ export namespace ProviderTransform { model.api.id.includes("claude") || model.api.npm === "@ai-sdk/anthropic" ) { - msgs = applyCaching(msgs, model.providerID) + msgs = await applyCaching(msgs, model.providerID, sessionID) } // Remap providerOptions keys from stored providerID to expected SDK key diff --git a/packages/opencode/src/session/index.ts b/packages/opencode/src/session/index.ts index 3fcdab5238c..772b3dc94af 100644 --- a/packages/opencode/src/session/index.ts +++ b/packages/opencode/src/session/index.ts @@ -76,6 +76,7 @@ export namespace Session { diff: z.string().optional(), }) .optional(), + cacheInvalidated: z.boolean().optional(), }) .meta({ ref: "Session", diff --git a/packages/opencode/src/session/llm.ts b/packages/opencode/src/session/llm.ts index 1029b45ea0d..142415a135b 100644 --- a/packages/opencode/src/session/llm.ts +++ b/packages/opencode/src/session/llm.ts @@ -243,7 +243,7 @@ export namespace LLM { async transformParams(args) { if (args.type === "stream") { // @ts-expect-error - args.params.prompt = ProviderTransform.message(args.params.prompt, input.model, options) + args.params.prompt = await ProviderTransform.message(args.params.prompt, input.model, input.sessionID) } return args.params }, diff --git a/packages/opencode/src/util/token.ts b/packages/opencode/src/util/token.ts index cee5adc3771..dcd8c4c97cc 100644 --- a/packages/opencode/src/util/token.ts +++ b/packages/opencode/src/util/token.ts @@ -4,4 +4,10 @@ export namespace Token { export function estimate(input: string) { return Math.max(0, Math.round((input || "").length / CHARS_PER_TOKEN)) } + + export function estimateImage(urlOrData: string): number { + // Estimate tokens for image data/URLs since providers don't return image token counts + // Uses string length as proxy: data URLs contain base64 image data, file paths are small + return Math.max(100, Math.round(urlOrData.length / 170)) + } } diff --git a/packages/opencode/test/provider/transform.test.ts b/packages/opencode/test/provider/transform.test.ts index 2b8f1872f56..1f7de5138f4 100644 --- a/packages/opencode/test/provider/transform.test.ts +++ b/packages/opencode/test/provider/transform.test.ts @@ -222,7 +222,7 @@ describe("ProviderTransform.schema - gemini array items", () => { }) describe("ProviderTransform.message - DeepSeek reasoning content", () => { - test("DeepSeek with tool calls includes reasoning_content in providerOptions", () => { + test("DeepSeek with tool calls includes reasoning_content in providerOptions", async () => { const msgs = [ { role: "assistant", @@ -238,7 +238,7 @@ describe("ProviderTransform.message - DeepSeek reasoning content", () => { }, ] as any[] - const result = ProviderTransform.message( + const result = await ProviderTransform.message( msgs, { id: "deepseek/deepseek-chat", @@ -289,7 +289,7 @@ describe("ProviderTransform.message - DeepSeek reasoning content", () => { expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBe("Let me think about this...") }) - test("Non-DeepSeek providers leave reasoning content unchanged", () => { + test("Non-DeepSeek providers leave reasoning content unchanged", async () => { const msgs = [ { role: "assistant", @@ -300,7 +300,7 @@ describe("ProviderTransform.message - DeepSeek reasoning content", () => { }, ] as any[] - const result = ProviderTransform.message( + const result = await ProviderTransform.message( msgs, { id: "openai/gpt-4", @@ -378,7 +378,7 @@ describe("ProviderTransform.message - empty image handling", () => { headers: {}, } as any - test("should replace empty base64 image with error text", () => { + test("should replace empty base64 image with error text", async () => { const msgs = [ { role: "user", @@ -389,7 +389,7 @@ describe("ProviderTransform.message - empty image handling", () => { }, ] as any[] - const result = ProviderTransform.message(msgs, mockModel, {}) + const result = await ProviderTransform.message(msgs, mockModel, {}) expect(result).toHaveLength(1) expect(result[0].content).toHaveLength(2) @@ -400,7 +400,7 @@ describe("ProviderTransform.message - empty image handling", () => { }) }) - test("should keep valid base64 images unchanged", () => { + test("should keep valid base64 images unchanged", async () => { const validBase64 = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==" const msgs = [ @@ -413,7 +413,7 @@ describe("ProviderTransform.message - empty image handling", () => { }, ] as any[] - const result = ProviderTransform.message(msgs, mockModel, {}) + const result = await ProviderTransform.message(msgs, mockModel, {}) expect(result).toHaveLength(1) expect(result[0].content).toHaveLength(2) @@ -421,7 +421,7 @@ describe("ProviderTransform.message - empty image handling", () => { expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` }) }) - test("should handle mixed valid and empty images", () => { + test("should handle mixed valid and empty images", async () => { const validBase64 = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==" const msgs = [ @@ -435,7 +435,7 @@ describe("ProviderTransform.message - empty image handling", () => { }, ] as any[] - const result = ProviderTransform.message(msgs, mockModel, {}) + const result = await ProviderTransform.message(msgs, mockModel, {}) expect(result).toHaveLength(1) expect(result[0].content).toHaveLength(3) @@ -481,21 +481,21 @@ describe("ProviderTransform.message - anthropic empty content filtering", () => headers: {}, } as any - test("filters out messages with empty string content", () => { + test("filters out messages with empty string content", async () => { const msgs = [ { role: "user", content: "Hello" }, { role: "assistant", content: "" }, { role: "user", content: "World" }, ] as any[] - const result = ProviderTransform.message(msgs, anthropicModel, {}) + const result = await ProviderTransform.message(msgs, anthropicModel, {}) expect(result).toHaveLength(2) expect(result[0].content).toBe("Hello") expect(result[1].content).toBe("World") }) - test("filters out empty text parts from array content", () => { + test("filters out empty text parts from array content", async () => { const msgs = [ { role: "assistant", @@ -507,14 +507,14 @@ describe("ProviderTransform.message - anthropic empty content filtering", () => }, ] as any[] - const result = ProviderTransform.message(msgs, anthropicModel, {}) + const result = await ProviderTransform.message(msgs, anthropicModel, {}) expect(result).toHaveLength(1) expect(result[0].content).toHaveLength(1) expect(result[0].content[0]).toEqual({ type: "text", text: "Hello" }) }) - test("filters out empty reasoning parts from array content", () => { + test("filters out empty reasoning parts from array content", async () => { const msgs = [ { role: "assistant", @@ -526,14 +526,14 @@ describe("ProviderTransform.message - anthropic empty content filtering", () => }, ] as any[] - const result = ProviderTransform.message(msgs, anthropicModel, {}) + const result = await ProviderTransform.message(msgs, anthropicModel, {}) expect(result).toHaveLength(1) expect(result[0].content).toHaveLength(1) expect(result[0].content[0]).toEqual({ type: "text", text: "Answer" }) }) - test("removes entire message when all parts are empty", () => { + test("removes entire message when all parts are empty", async () => { const msgs = [ { role: "user", content: "Hello" }, { @@ -546,14 +546,14 @@ describe("ProviderTransform.message - anthropic empty content filtering", () => { role: "user", content: "World" }, ] as any[] - const result = ProviderTransform.message(msgs, anthropicModel, {}) + const result = await ProviderTransform.message(msgs, anthropicModel, {}) expect(result).toHaveLength(2) expect(result[0].content).toBe("Hello") expect(result[1].content).toBe("World") }) - test("keeps non-text/reasoning parts even if text parts are empty", () => { + test("keeps non-text/reasoning parts even if text parts are empty", async () => { const msgs = [ { role: "assistant", @@ -564,7 +564,7 @@ describe("ProviderTransform.message - anthropic empty content filtering", () => }, ] as any[] - const result = ProviderTransform.message(msgs, anthropicModel, {}) + const result = await ProviderTransform.message(msgs, anthropicModel, {}) expect(result).toHaveLength(1) expect(result[0].content).toHaveLength(1) @@ -576,7 +576,7 @@ describe("ProviderTransform.message - anthropic empty content filtering", () => }) }) - test("keeps messages with valid text alongside empty parts", () => { + test("keeps messages with valid text alongside empty parts", async () => { const msgs = [ { role: "assistant", @@ -588,7 +588,7 @@ describe("ProviderTransform.message - anthropic empty content filtering", () => }, ] as any[] - const result = ProviderTransform.message(msgs, anthropicModel, {}) + const result = await ProviderTransform.message(msgs, anthropicModel, {}) expect(result).toHaveLength(1) expect(result[0].content).toHaveLength(2) @@ -596,7 +596,7 @@ describe("ProviderTransform.message - anthropic empty content filtering", () => expect(result[0].content[1]).toEqual({ type: "text", text: "Result" }) }) - test("does not filter for non-anthropic providers", () => { + test("does not filter for non-anthropic providers", async () => { const openaiModel = { ...anthropicModel, providerID: "openai", @@ -615,7 +615,7 @@ describe("ProviderTransform.message - anthropic empty content filtering", () => }, ] as any[] - const result = ProviderTransform.message(msgs, openaiModel, {}) + const result = await ProviderTransform.message(msgs, openaiModel, {}) expect(result).toHaveLength(2) expect(result[0].content).toBe("") @@ -649,7 +649,7 @@ describe("ProviderTransform.message - strip openai metadata when store=false", ( headers: {}, } as any - test("preserves itemId and reasoningEncryptedContent when store=false", () => { + test("strips itemId and reasoningEncryptedContent when store=false", async () => { const msgs = [ { role: "assistant", @@ -677,14 +677,14 @@ describe("ProviderTransform.message - strip openai metadata when store=false", ( }, ] as any[] - const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[] + const result = (await ProviderTransform.message(msgs, openaiModel, { store: false })) as any[] expect(result).toHaveLength(1) - expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("rs_123") - expect(result[0].content[1].providerOptions?.openai?.itemId).toBe("msg_456") + expect(result[0].content[0].providerOptions?.openai?.itemId).toBeUndefined() + expect(result[0].content[1].providerOptions?.openai?.itemId).toBeUndefined() }) - test("preserves itemId and reasoningEncryptedContent when store=false even when not openai", () => { + test("strips itemId and reasoningEncryptedContent when store=false even when not openai", async () => { const zenModel = { ...openaiModel, providerID: "zen", @@ -716,14 +716,14 @@ describe("ProviderTransform.message - strip openai metadata when store=false", ( }, ] as any[] - const result = ProviderTransform.message(msgs, zenModel, { store: false }) as any[] + const result = (await ProviderTransform.message(msgs, zenModel, { store: false })) as any[] expect(result).toHaveLength(1) - expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("rs_123") - expect(result[0].content[1].providerOptions?.openai?.itemId).toBe("msg_456") + expect(result[0].content[0].providerOptions?.openai?.itemId).toBeUndefined() + expect(result[0].content[1].providerOptions?.openai?.itemId).toBeUndefined() }) - test("preserves other openai options including itemId", () => { + test("preserves other openai options when stripping itemId", async () => { const msgs = [ { role: "assistant", @@ -742,13 +742,13 @@ describe("ProviderTransform.message - strip openai metadata when store=false", ( }, ] as any[] - const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[] + const result = (await ProviderTransform.message(msgs, openaiModel, { store: false })) as any[] - expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123") + expect(result[0].content[0].providerOptions?.openai?.itemId).toBeUndefined() expect(result[0].content[0].providerOptions?.openai?.otherOption).toBe("value") }) - test("preserves metadata for openai package when store is true", () => { + test("strips metadata for openai package even when store is true", async () => { const msgs = [ { role: "assistant", @@ -766,13 +766,13 @@ describe("ProviderTransform.message - strip openai metadata when store=false", ( }, ] as any[] - // openai package preserves itemId regardless of store value - const result = ProviderTransform.message(msgs, openaiModel, { store: true }) as any[] + // openai package always strips itemId regardless of store value + const result = (await ProviderTransform.message(msgs, openaiModel, { store: true })) as any[] - expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123") + expect(result[0].content[0].providerOptions?.openai?.itemId).toBeUndefined() }) - test("preserves metadata for non-openai packages when store is false", () => { + test("strips metadata for non-openai packages when store is false", async () => { const anthropicModel = { ...openaiModel, providerID: "anthropic", @@ -799,13 +799,13 @@ describe("ProviderTransform.message - strip openai metadata when store=false", ( }, ] as any[] - // store=false preserves metadata for non-openai packages - const result = ProviderTransform.message(msgs, anthropicModel, { store: false }) as any[] + // store=false triggers stripping even for non-openai packages + const result = (await ProviderTransform.message(msgs, anthropicModel, { store: false })) as any[] - expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123") + expect(result[0].content[0].providerOptions?.openai?.itemId).toBeUndefined() }) - test("preserves metadata using providerID key when store is false", () => { + test("strips metadata using providerID key when store is false", async () => { const opencodeModel = { ...openaiModel, providerID: "opencode", @@ -833,13 +833,13 @@ describe("ProviderTransform.message - strip openai metadata when store=false", ( }, ] as any[] - const result = ProviderTransform.message(msgs, opencodeModel, { store: false }) as any[] + const result = (await ProviderTransform.message(msgs, opencodeModel, { store: false })) as any[] - expect(result[0].content[0].providerOptions?.opencode?.itemId).toBe("msg_123") + expect(result[0].content[0].providerOptions?.opencode?.itemId).toBeUndefined() expect(result[0].content[0].providerOptions?.opencode?.otherOption).toBe("value") }) - test("preserves itemId across all providerOptions keys", () => { + test("strips itemId across all providerOptions keys", async () => { const opencodeModel = { ...openaiModel, providerID: "opencode", @@ -871,17 +871,17 @@ describe("ProviderTransform.message - strip openai metadata when store=false", ( }, ] as any[] - const result = ProviderTransform.message(msgs, opencodeModel, { store: false }) as any[] + const result = (await ProviderTransform.message(msgs, opencodeModel, { store: false })) as any[] - expect(result[0].providerOptions?.openai?.itemId).toBe("msg_root") - expect(result[0].providerOptions?.opencode?.itemId).toBe("msg_opencode") - expect(result[0].providerOptions?.extra?.itemId).toBe("msg_extra") - expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_openai_part") - expect(result[0].content[0].providerOptions?.opencode?.itemId).toBe("msg_opencode_part") - expect(result[0].content[0].providerOptions?.extra?.itemId).toBe("msg_extra_part") + expect(result[0].providerOptions?.openai?.itemId).toBeUndefined() + expect(result[0].providerOptions?.opencode?.itemId).toBeUndefined() + expect(result[0].providerOptions?.extra?.itemId).toBeUndefined() + expect(result[0].content[0].providerOptions?.openai?.itemId).toBeUndefined() + expect(result[0].content[0].providerOptions?.opencode?.itemId).toBeUndefined() + expect(result[0].content[0].providerOptions?.extra?.itemId).toBeUndefined() }) - test("does not strip metadata for non-openai packages when store is not false", () => { + test("does not strip metadata for non-openai packages when store is not false", async () => { const anthropicModel = { ...openaiModel, providerID: "anthropic", @@ -908,94 +908,12 @@ describe("ProviderTransform.message - strip openai metadata when store=false", ( }, ] as any[] - const result = ProviderTransform.message(msgs, anthropicModel, {}) as any[] + const result = (await ProviderTransform.message(msgs, anthropicModel, {})) as any[] expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123") }) }) -describe("ProviderTransform.message - providerOptions key remapping", () => { - const createModel = (providerID: string, npm: string) => - ({ - id: `${providerID}/test-model`, - providerID, - api: { - id: "test-model", - url: "https://api.test.com", - npm, - }, - name: "Test Model", - capabilities: { - temperature: true, - reasoning: false, - attachment: true, - toolcall: true, - input: { text: true, audio: false, image: true, video: false, pdf: true }, - output: { text: true, audio: false, image: false, video: false, pdf: false }, - interleaved: false, - }, - cost: { input: 0.001, output: 0.002, cache: { read: 0.0001, write: 0.0002 } }, - limit: { context: 128000, output: 8192 }, - status: "active", - options: {}, - headers: {}, - }) as any - - test("azure keeps 'azure' key and does not remap to 'openai'", () => { - const model = createModel("azure", "@ai-sdk/azure") - const msgs = [ - { - role: "user", - content: "Hello", - providerOptions: { - azure: { someOption: "value" }, - }, - }, - ] as any[] - - const result = ProviderTransform.message(msgs, model, {}) - - expect(result[0].providerOptions?.azure).toEqual({ someOption: "value" }) - expect(result[0].providerOptions?.openai).toBeUndefined() - }) - - test("openai with github-copilot npm remaps providerID to 'openai'", () => { - const model = createModel("github-copilot", "@ai-sdk/github-copilot") - const msgs = [ - { - role: "user", - content: "Hello", - providerOptions: { - "github-copilot": { someOption: "value" }, - }, - }, - ] as any[] - - const result = ProviderTransform.message(msgs, model, {}) - - expect(result[0].providerOptions?.openai).toEqual({ someOption: "value" }) - expect(result[0].providerOptions?.["github-copilot"]).toBeUndefined() - }) - - test("bedrock remaps providerID to 'bedrock' key", () => { - const model = createModel("my-bedrock", "@ai-sdk/amazon-bedrock") - const msgs = [ - { - role: "user", - content: "Hello", - providerOptions: { - "my-bedrock": { someOption: "value" }, - }, - }, - ] as any[] - - const result = ProviderTransform.message(msgs, model, {}) - - expect(result[0].providerOptions?.bedrock).toEqual({ someOption: "value" }) - expect(result[0].providerOptions?.["my-bedrock"]).toBeUndefined() - }) -}) - describe("ProviderTransform.variants", () => { const createMockModel = (overrides: Partial = {}): any => ({ id: "test/test-model", diff --git a/packages/sdk/js/src/v2/gen/types.gen.ts b/packages/sdk/js/src/v2/gen/types.gen.ts index 75540f90724..08cd1c98451 100644 --- a/packages/sdk/js/src/v2/gen/types.gen.ts +++ b/packages/sdk/js/src/v2/gen/types.gen.ts @@ -760,6 +760,7 @@ export type Session = { snapshot?: string diff?: string } + cacheInvalidated?: boolean } export type EventSessionCreated = {