@@ -458,7 +457,7 @@ function groupIntoUserPrompts(data: LlmPromptAndResponse[]): AllPromptsForUserIn
if (currentGroup.length > 0) {
const totalCompletionTokens = currentGroup.reduce((sum, item) => {
const usage = item.usage;
- return sum + usage.promptTokens;
+ return sum + usage.inputTokens;
}, 0);
const cachedCompletionTokens = currentGroup.reduce((sum, item) => {
const usage = item.usage;
@@ -466,7 +465,7 @@ function groupIntoUserPrompts(data: LlmPromptAndResponse[]): AllPromptsForUserIn
}, 0);
const totalOutputTokens = currentGroup.reduce((sum, item) => {
const usage = item.usage;
- return sum + usage.completionTokens;
+ return sum + usage.outputTokens;
}, 0);
const totalChefTokens = currentGroup.reduce((sum, item) => {
return sum + (item.chefTokens || 0);
@@ -593,16 +592,16 @@ export default function DebugAllPromptsForChat({ chatInitialId, onClose, isDebug
const totals = userPromptGroups.reduce(
(acc, group) => {
return {
- promptTokens: acc.promptTokens + group.summary.totalCompletionTokens,
+ inputTokens: acc.inputTokens + group.summary.totalCompletionTokens,
cachedPromptTokens: acc.cachedPromptTokens + group.summary.cachedCompletionTokens,
- completionTokens: acc.completionTokens + group.summary.totalOutputTokens,
+ outputTokens: acc.outputTokens + group.summary.totalOutputTokens,
chefTokens: acc.chefTokens + group.summary.totalChefTokens,
};
},
{
- promptTokens: 0,
+ inputTokens: 0,
cachedPromptTokens: 0,
- completionTokens: 0,
+ outputTokens: 0,
chefTokens: 0,
},
);
@@ -664,14 +663,14 @@ export default function DebugAllPromptsForChat({ chatInitialId, onClose, isDebug
- {formatNumber(totals.promptTokens - totals.cachedPromptTokens)}
+ {formatNumber(totals.inputTokens - totals.cachedPromptTokens)}
{' '}
total prompt tokens
{totals.cachedPromptTokens ? ` (+${formatNumber(totals.cachedPromptTokens)} cached)` : ''}
- {formatNumber(totals.completionTokens)}
+ {formatNumber(totals.outputTokens)}
{' '}
total completion tokens
diff --git a/app/components/chat/AssistantMessage.tsx b/app/components/chat/AssistantMessage.tsx
index 92016a78b..c4d69f836 100644
--- a/app/components/chat/AssistantMessage.tsx
+++ b/app/components/chat/AssistantMessage.tsx
@@ -1,6 +1,6 @@
import { memo, useMemo } from 'react';
import { Markdown } from './Markdown';
-import type { Message } from 'ai';
+import type { UIMessage } from 'ai';
import { ToolCall } from './ToolCall';
import { makePartId, type PartId } from 'chef-agent/partId.js';
import { ExclamationTriangleIcon, DotFilledIcon } from '@radix-ui/react-icons';
@@ -10,7 +10,7 @@ import { calculateChefTokens, usageFromGeneration, type ChefTokenBreakdown } fro
import { captureMessage } from '@sentry/remix';
interface AssistantMessageProps {
- message: Message;
+ message: UIMessage;
}
export const AssistantMessage = memo(function AssistantMessage({ message }: AssistantMessageProps) {
@@ -64,7 +64,7 @@ function AssistantMessagePart({
partId,
parsedAnnotations,
}: {
- part: NonNullable
[number];
+ part: NonNullable[number];
showUsageAnnotations: boolean;
partId: PartId;
parsedAnnotations: ReturnType;
@@ -138,7 +138,7 @@ function displayChefTokenNumber(num: number) {
function displayUsage(usageAnnotation: UsageAnnotation, provider: ProviderType, showUsageAnnotations: boolean) {
const usage: Usage = usageFromGeneration({
usage: usageAnnotation,
- providerMetadata: usageAnnotation.providerMetadata,
+ providerOptions: usageAnnotation.providerOptions,
});
const { chefTokens, breakdown } = calculateChefTokens(usage, provider);
return (
@@ -151,20 +151,20 @@ function displayUsage(usageAnnotation: UsageAnnotation, provider: ProviderType,
function displayBreakdownForSingleAnnotation(breakdown: ChefTokenBreakdown) {
// A single annotation should always have a single provider.
- if (breakdown.completionTokens.anthropic > 0) {
- return `${displayChefTokenNumber(breakdown.promptTokens.anthropic.uncached)} uncached, ${displayChefTokenNumber(breakdown.promptTokens.anthropic.cached)} cached, ${displayChefTokenNumber(breakdown.completionTokens.anthropic)} completion`;
+ if (breakdown.outputTokens.anthropic > 0) {
+ return `${displayChefTokenNumber(breakdown.inputTokens.anthropic.uncached)} uncached, ${displayChefTokenNumber(breakdown.inputTokens.anthropic.cached)} cached, ${displayChefTokenNumber(breakdown.outputTokens.anthropic)} completion`;
}
- if (breakdown.completionTokens.openai > 0) {
- return `${displayChefTokenNumber(breakdown.promptTokens.openai.uncached)} uncached, ${displayChefTokenNumber(breakdown.promptTokens.openai.cached)} cached, ${displayChefTokenNumber(breakdown.completionTokens.openai)} completion`;
+ if (breakdown.outputTokens.openai > 0) {
+ return `${displayChefTokenNumber(breakdown.inputTokens.openai.uncached)} uncached, ${displayChefTokenNumber(breakdown.inputTokens.openai.cached)} cached, ${displayChefTokenNumber(breakdown.outputTokens.openai)} completion`;
}
- if (breakdown.completionTokens.xai > 0) {
- return `${displayChefTokenNumber(breakdown.promptTokens.xai.uncached)} uncached, ${displayChefTokenNumber(breakdown.promptTokens.xai.cached)} cached, ${displayChefTokenNumber(breakdown.completionTokens.xai)} completion`;
+ if (breakdown.outputTokens.xai > 0) {
+ return `${displayChefTokenNumber(breakdown.inputTokens.xai.uncached)} uncached, ${displayChefTokenNumber(breakdown.inputTokens.xai.cached)} cached, ${displayChefTokenNumber(breakdown.outputTokens.xai)} completion`;
}
- if (breakdown.completionTokens.google > 0) {
- return `${displayChefTokenNumber(breakdown.promptTokens.google.uncached)} uncached, ${displayChefTokenNumber(breakdown.promptTokens.google.cached)} cached, ${displayChefTokenNumber(breakdown.completionTokens.google)} completion`;
+ if (breakdown.outputTokens.google > 0) {
+ return `${displayChefTokenNumber(breakdown.inputTokens.google.uncached)} uncached, ${displayChefTokenNumber(breakdown.inputTokens.google.cached)} cached, ${displayChefTokenNumber(breakdown.outputTokens.google)} completion`;
}
- if (breakdown.completionTokens.bedrock > 0) {
- return `${displayChefTokenNumber(breakdown.promptTokens.bedrock.uncached)} uncached, ${displayChefTokenNumber(breakdown.promptTokens.bedrock.cached)} cached, ${displayChefTokenNumber(breakdown.completionTokens.bedrock)} completion`;
+ if (breakdown.outputTokens.bedrock > 0) {
+ return `${displayChefTokenNumber(breakdown.inputTokens.bedrock.uncached)} uncached, ${displayChefTokenNumber(breakdown.inputTokens.bedrock.cached)} cached, ${displayChefTokenNumber(breakdown.outputTokens.bedrock)} completion`;
}
return 'unknown';
}
diff --git a/app/components/chat/BaseChat.client.tsx b/app/components/chat/BaseChat.client.tsx
index 90c020e0a..26cf9715f 100644
--- a/app/components/chat/BaseChat.client.tsx
+++ b/app/components/chat/BaseChat.client.tsx
@@ -1,5 +1,5 @@
import { Sheet } from '@ui/Sheet';
-import type { Message } from 'ai';
+import type { UIMessage } from 'ai';
import React, { type ReactNode, type RefCallback, useCallback, useEffect, useMemo, useState } from 'react';
import Landing from '~/components/landing/Landing';
import { Workbench } from '~/components/workbench/Workbench.client';
@@ -48,7 +48,7 @@ interface BaseChatProps {
streamStatus: 'streaming' | 'submitted' | 'ready' | 'error';
currentError: Error | undefined;
toolStatus: ToolStatus;
- messages: Message[];
+ messages: UIMessage[];
terminalInitializationOptions: TerminalInitializationOptions | undefined;
disableChatMessage: ReactNode | string | null;
diff --git a/app/components/chat/Chat.tsx b/app/components/chat/Chat.tsx
index 9504a6d3f..c491ebcba 100644
--- a/app/components/chat/Chat.tsx
+++ b/app/components/chat/Chat.tsx
@@ -1,5 +1,5 @@
import { useStore } from '@nanostores/react';
-import type { Message, UIMessage } from 'ai';
+import type { UIMessage } from 'ai';
import { useChat } from '@ai-sdk/react';
import { useAnimate } from 'framer-motion';
import { memo, useCallback, useEffect, useMemo, useRef, useState, type ReactNode } from 'react';
@@ -54,12 +54,12 @@ const MAX_RETRIES = 4;
const processSampledMessages = createSampler(
(options: {
- messages: Message[];
- initialMessages: Message[];
- parseMessages: (messages: Message[]) => void;
+ messages: UIMessage[];
+ initialMessages: UIMessage[];
+ parseMessages: (messages: UIMessage[]) => void;
streamStatus: 'streaming' | 'submitted' | 'ready' | 'error';
storeMessageHistory: (
- messages: Message[],
+ messages: UIMessage[],
streamStatus: 'streaming' | 'submitted' | 'ready' | 'error',
) => Promise;
}) => {
@@ -74,10 +74,10 @@ const processSampledMessages = createSampler(
);
interface ChatProps {
- initialMessages: Message[];
+ initialMessages: UIMessage[];
partCache: PartCache;
storeMessageHistory: (
- messages: Message[],
+ messages: UIMessage[],
streamStatus: 'streaming' | 'submitted' | 'ready' | 'error',
) => Promise;
initializeChat: () => Promise;
@@ -283,7 +283,7 @@ export const Chat = memo(
const { messages, status, stop, append, setMessages, reload, error } = useChat({
initialMessages,
api: '/api/chat',
- sendExtraMessageFields: true,
+
experimental_prepareRequestBody: ({ messages }) => {
const chatInitialId = initialIdStore.get();
const deploymentName = convexProjectStore.get()?.deploymentName;
diff --git a/app/components/chat/Messages.client.tsx b/app/components/chat/Messages.client.tsx
index 987fc56cf..51bbbbdc1 100644
--- a/app/components/chat/Messages.client.tsx
+++ b/app/components/chat/Messages.client.tsx
@@ -1,4 +1,4 @@
-import type { Message } from 'ai';
+import type { UIMessage } from 'ai';
import { Fragment, useCallback, useState } from 'react';
import { classNames } from '~/utils/classNames';
import { AssistantMessage } from './AssistantMessage';
@@ -20,7 +20,7 @@ interface MessagesProps {
id?: string;
className?: string;
isStreaming?: boolean;
- messages?: Message[];
+ messages?: UIMessage[];
subchatsLength?: number;
onRewindToMessage?: (subchatIndex?: number, messageIndex?: number) => void;
}
diff --git a/app/components/debug/UsageBreakdownView.tsx b/app/components/debug/UsageBreakdownView.tsx
index 3fd1635ce..186c84586 100644
--- a/app/components/debug/UsageBreakdownView.tsx
+++ b/app/components/debug/UsageBreakdownView.tsx
@@ -1,4 +1,4 @@
-import type { Message } from 'ai';
+import type { UIMessage } from 'ai';
import { useEffect } from 'react';
@@ -89,7 +89,7 @@ export function UsageBreakdownView({
fileContent: Blob | null;
convexSiteUrl: string;
}) {
- const [messages, setMessages] = useState([]);
+ const [messages, setMessages] = useState([]);
const [usageData, setUsageData] = useState(null);
const convex = useConvex();
useEffect(() => {
@@ -238,18 +238,18 @@ function BreakdownView({
startOpen?: boolean;
}) {
const tokensData = {
- 'Prompt - Anthropic (Uncached)': chefBreakdown.promptTokens.anthropic.uncached,
- 'Prompt - Anthropic (Cached)': chefBreakdown.promptTokens.anthropic.cached,
- 'Prompt - OpenAI (Uncached)': chefBreakdown.promptTokens.openai.uncached,
- 'Prompt - OpenAI (Cached)': chefBreakdown.promptTokens.openai.cached,
- 'Prompt - XAI (Uncached)': chefBreakdown.promptTokens.xai.uncached,
- 'Prompt - XAI (Cached)': chefBreakdown.promptTokens.xai.cached,
- 'Prompt - Google (Uncached)': chefBreakdown.promptTokens.google.uncached,
- 'Prompt - Google (Cached)': chefBreakdown.promptTokens.google.cached,
- 'Completion - Anthropic': chefBreakdown.completionTokens.anthropic,
- 'Completion - OpenAI': chefBreakdown.completionTokens.openai,
- 'Completion - XAI': chefBreakdown.completionTokens.xai,
- 'Completion - Google': chefBreakdown.completionTokens.google,
+ 'Prompt - Anthropic (Uncached)': chefBreakdown.inputTokens.anthropic.uncached,
+ 'Prompt - Anthropic (Cached)': chefBreakdown.inputTokens.anthropic.cached,
+ 'Prompt - OpenAI (Uncached)': chefBreakdown.inputTokens.openai.uncached,
+ 'Prompt - OpenAI (Cached)': chefBreakdown.inputTokens.openai.cached,
+ 'Prompt - XAI (Uncached)': chefBreakdown.inputTokens.xai.uncached,
+ 'Prompt - XAI (Cached)': chefBreakdown.inputTokens.xai.cached,
+ 'Prompt - Google (Uncached)': chefBreakdown.inputTokens.google.uncached,
+ 'Prompt - Google (Cached)': chefBreakdown.inputTokens.google.cached,
+ 'Completion - Anthropic': chefBreakdown.outputTokens.anthropic,
+ 'Completion - OpenAI': chefBreakdown.outputTokens.openai,
+ 'Completion - XAI': chefBreakdown.outputTokens.xai,
+ 'Completion - Google': chefBreakdown.outputTokens.google,
};
return (
@@ -258,8 +258,8 @@ function BreakdownView({
-
Completion Tokens: {formatNumber(rawUsage.completionTokens)}
-
Prompt Tokens: {formatNumber(rawUsage.promptTokens)}
+
Completion Tokens: {formatNumber(rawUsage.outputTokens)}
+
Prompt Tokens: {formatNumber(rawUsage.inputTokens)}
Total Tokens: {formatNumber(rawUsage.totalTokens)}
@@ -270,8 +270,8 @@ function BreakdownView({
-
Completion Tokens: {formatNumber(billedUsage.completionTokens)}
-
Prompt Tokens: {formatNumber(billedUsage.promptTokens)}
+
Completion Tokens: {formatNumber(billedUsage.outputTokens)}
+
Prompt Tokens: {formatNumber(billedUsage.inputTokens)}
Total Tokens: {formatNumber(billedUsage.totalTokens)}
@@ -295,10 +295,10 @@ function BreakdownView({
);
}
-async function getUsageBreakdown(messages: Message[]) {
+async function getUsageBreakdown(messages: UIMessage[]) {
const chatTotalRawUsage = {
- completionTokens: 0,
- promptTokens: 0,
+ outputTokens: 0,
+ inputTokens: 0,
totalTokens: 0,
anthropicCacheCreationInputTokens: 0,
anthropicCacheReadInputTokens: 0,
@@ -308,8 +308,8 @@ async function getUsageBreakdown(messages: Message[]) {
googleThoughtsTokenCount: 0,
};
const chatTotalUsageBilledFor = {
- completionTokens: 0,
- promptTokens: 0,
+ outputTokens: 0,
+ inputTokens: 0,
totalTokens: 0,
anthropicCacheCreationInputTokens: 0,
anthropicCacheReadInputTokens: 0,
@@ -320,13 +320,13 @@ async function getUsageBreakdown(messages: Message[]) {
};
let chatTotalChefTokens = 0;
const chatTotalChefBreakdown: ChefBreakdown = {
- completionTokens: {
+ outputTokens: {
anthropic: 0,
openai: 0,
xai: 0,
google: 0,
},
- promptTokens: {
+ inputTokens: {
anthropic: {
uncached: 0,
cached: 0,
@@ -413,7 +413,7 @@ function getPartInfos({
usageAnnotationsForToolCalls,
providerAnnotationsForToolCalls,
}: {
- message: Message;
+ message: UIMessage;
usageAnnotationsForToolCalls: Record
;
providerAnnotationsForToolCalls: Record;
}) {
@@ -441,7 +441,7 @@ function getPartInfos({
const rawUsageForPart = usageAnnotationsForToolCalls[part.toolInvocation.toolCallId]
? usageFromGeneration({
usage: usageAnnotationsForToolCalls[part.toolInvocation.toolCallId]!,
- providerMetadata: usageAnnotationsForToolCalls[part.toolInvocation.toolCallId]?.providerMetadata,
+ providerOptions: usageAnnotationsForToolCalls[part.toolInvocation.toolCallId]?.providerOptions,
})
: initializeUsage();
const billedUsageForPart = rawUsageForPart;
@@ -461,7 +461,7 @@ function getPartInfos({
}
const finalUsage = usageFromGeneration({
usage: usageAnnotationsForToolCalls.final ?? initializeUsage(),
- providerMetadata: usageAnnotationsForToolCalls.final?.providerMetadata ?? undefined,
+ providerOptions: usageAnnotationsForToolCalls.final?.providerOptions ?? undefined,
});
const provider = providerAnnotationsForToolCalls.final?.provider ?? 'Anthropic';
const { chefTokens, breakdown } = calculateChefTokens(finalUsage, provider);
@@ -480,8 +480,8 @@ function getPartInfos({
}
function addUsage(usageA: Usage, update: Usage) {
- usageA.completionTokens += update.completionTokens;
- usageA.promptTokens += update.promptTokens;
+ usageA.outputTokens += update.outputTokens;
+ usageA.inputTokens += update.inputTokens;
usageA.totalTokens += update.totalTokens;
usageA.anthropicCacheCreationInputTokens += update.anthropicCacheCreationInputTokens;
usageA.anthropicCacheReadInputTokens += update.anthropicCacheReadInputTokens;
@@ -490,13 +490,13 @@ function addUsage(usageA: Usage, update: Usage) {
}
type ChefBreakdown = {
- completionTokens: {
+ outputTokens: {
anthropic: number;
openai: number;
xai: number;
google: number;
};
- promptTokens: {
+ inputTokens: {
anthropic: {
uncached: number;
cached: number;
@@ -517,18 +517,18 @@ type ChefBreakdown = {
};
function addBreakdown(breakdownA: ChefBreakdown, update: ChefBreakdown) {
- breakdownA.completionTokens.anthropic += update.completionTokens.anthropic;
- breakdownA.completionTokens.openai += update.completionTokens.openai;
- breakdownA.completionTokens.xai += update.completionTokens.xai;
- breakdownA.completionTokens.google += update.completionTokens.google;
- breakdownA.promptTokens.anthropic.cached += update.promptTokens.anthropic.cached;
- breakdownA.promptTokens.anthropic.uncached += update.promptTokens.anthropic.uncached;
- breakdownA.promptTokens.openai.cached += update.promptTokens.openai.cached;
- breakdownA.promptTokens.openai.uncached += update.promptTokens.openai.uncached;
- breakdownA.promptTokens.xai.cached += update.promptTokens.xai.cached;
- breakdownA.promptTokens.xai.uncached += update.promptTokens.xai.uncached;
- breakdownA.promptTokens.google.cached += update.promptTokens.google.cached;
- breakdownA.promptTokens.google.uncached += update.promptTokens.google.uncached;
+ breakdownA.outputTokens.anthropic += update.outputTokens.anthropic;
+ breakdownA.outputTokens.openai += update.outputTokens.openai;
+ breakdownA.outputTokens.xai += update.outputTokens.xai;
+ breakdownA.outputTokens.google += update.outputTokens.google;
+ breakdownA.inputTokens.anthropic.cached += update.inputTokens.anthropic.cached;
+ breakdownA.inputTokens.anthropic.uncached += update.inputTokens.anthropic.uncached;
+ breakdownA.inputTokens.openai.cached += update.inputTokens.openai.cached;
+ breakdownA.inputTokens.openai.uncached += update.inputTokens.openai.uncached;
+ breakdownA.inputTokens.xai.cached += update.inputTokens.xai.cached;
+ breakdownA.inputTokens.xai.uncached += update.inputTokens.xai.uncached;
+ breakdownA.inputTokens.google.cached += update.inputTokens.google.cached;
+ breakdownA.inputTokens.google.uncached += update.inputTokens.google.uncached;
}
function CollapsibleView({
diff --git a/app/lib/common/annotations.ts b/app/lib/common/annotations.ts
index 662f61eb2..deddf248c 100644
--- a/app/lib/common/annotations.ts
+++ b/app/lib/common/annotations.ts
@@ -1,4 +1,4 @@
-import type { Message } from 'ai';
+import type { UIMessage } from 'ai';
import { z } from 'zod';
// This is added as a message annotation by the server when the agent has
@@ -9,10 +9,10 @@ export const REPEATED_ERROR_REASON = 'repeated-errors';
export const usageAnnotationValidator = z.object({
toolCallId: z.string().optional(),
- completionTokens: z.number(),
- promptTokens: z.number(),
+ outputTokens: z.number(),
+ inputTokens: z.number(),
totalTokens: z.number(),
- providerMetadata: z
+ providerOptions: z
.object({
openai: z
.object({
@@ -72,7 +72,7 @@ export const annotationValidator = z.discriminatedUnion('type', [
}),
]);
-export const failedDueToRepeatedErrors = (annotations: Message['annotations']) => {
+export const failedDueToRepeatedErrors = (annotations: UIMessage['annotations']) => {
if (!annotations) {
return false;
}
@@ -83,7 +83,7 @@ export const failedDueToRepeatedErrors = (annotations: Message['annotations']) =
};
export const parseAnnotations = (
- annotations: Message['annotations'],
+ annotations: UIMessage['annotations'],
): {
failedDueToRepeatedErrors: boolean;
usageForToolCall: Record;
diff --git a/app/lib/common/usage.test.ts b/app/lib/common/usage.test.ts
index acf1ee760..a99a9a920 100644
--- a/app/lib/common/usage.test.ts
+++ b/app/lib/common/usage.test.ts
@@ -4,8 +4,8 @@ import { calculateChefTokens, initializeUsage } from './usage';
test('calculateChefTokensGoogle', () => {
const usage = {
...initializeUsage(),
- completionTokens: 100,
- promptTokens: 200,
+ outputTokens: 100,
+ inputTokens: 200,
totalTokens: 300,
googleCachedContentTokenCount: 50,
};
@@ -18,16 +18,16 @@ test('calculateChefTokensGoogle', () => {
// Total: 14000 + 2700 + 250 = 16950
expect(chefTokens).toBe(16950);
- expect(breakdown.completionTokens.google).toBe(14000);
- expect(breakdown.promptTokens.google.uncached).toBe(2700);
- expect(breakdown.promptTokens.google.cached).toBe(250);
+ expect(breakdown.outputTokens.google).toBe(14000);
+ expect(breakdown.inputTokens.google.uncached).toBe(2700);
+ expect(breakdown.inputTokens.google.cached).toBe(250);
});
test('calculateChefTokensGoogleNoCachedContent', () => {
const usage = {
...initializeUsage(),
- completionTokens: 100,
- promptTokens: 200,
+ outputTokens: 100,
+ inputTokens: 200,
totalTokens: 300,
googleCachedContentTokenCount: 0,
};
@@ -39,16 +39,16 @@ test('calculateChefTokensGoogleNoCachedContent', () => {
// Total: 14000 + 3600 = 17600
expect(chefTokens).toBe(17600);
- expect(breakdown.completionTokens.google).toBe(14000);
- expect(breakdown.promptTokens.google.uncached).toBe(3600);
- expect(breakdown.promptTokens.google.cached).toBe(0);
+ expect(breakdown.outputTokens.google).toBe(14000);
+ expect(breakdown.inputTokens.google.uncached).toBe(3600);
+ expect(breakdown.inputTokens.google.cached).toBe(0);
});
test('calculateChefTokensGoogleWithThoughtTokens', () => {
const usage = {
...initializeUsage(),
- completionTokens: 100,
- promptTokens: 200,
+ outputTokens: 100,
+ inputTokens: 200,
totalTokens: 300,
googleCachedContentTokenCount: 0,
googleThoughtsTokenCount: 50,
@@ -61,7 +61,7 @@ test('calculateChefTokensGoogleWithThoughtTokens', () => {
// Total: 21000 + 3600 = 24600
expect(chefTokens).toBe(24600);
- expect(breakdown.completionTokens.google).toBe(14000);
- expect(breakdown.promptTokens.google.uncached).toBe(3600);
- expect(breakdown.promptTokens.google.cached).toBe(0);
+ expect(breakdown.outputTokens.google).toBe(14000);
+ expect(breakdown.inputTokens.google.uncached).toBe(3600);
+ expect(breakdown.inputTokens.google.cached).toBe(0);
});
diff --git a/app/lib/common/usage.ts b/app/lib/common/usage.ts
index ddc29a68e..5e5ad9275 100644
--- a/app/lib/common/usage.ts
+++ b/app/lib/common/usage.ts
@@ -1,29 +1,29 @@
-import type { LanguageModelUsage, Message, ProviderMetadata } from 'ai';
+import type { LanguageModelUsage, UIMessage, ProviderMetadata } from 'ai';
import { type ProviderType, type Usage, type UsageAnnotation, parseAnnotations } from '~/lib/common/annotations';
import { captureMessage } from '@sentry/remix';
export function usageFromGeneration(generation: {
usage: LanguageModelUsage;
- providerMetadata?: ProviderMetadata;
+ providerOptions?: ProviderMetadata;
}): Usage {
return {
- completionTokens: generation.usage.completionTokens,
- promptTokens: generation.usage.promptTokens,
+ outputTokens: generation.usage.outputTokens,
+ inputTokens: generation.usage.inputTokens,
totalTokens: generation.usage.totalTokens,
- providerMetadata: generation.providerMetadata,
- anthropicCacheCreationInputTokens: Number(generation.providerMetadata?.anthropic?.cacheCreationInputTokens ?? 0),
- anthropicCacheReadInputTokens: Number(generation.providerMetadata?.anthropic?.cacheReadInputTokens ?? 0),
- openaiCachedPromptTokens: Number(generation.providerMetadata?.openai?.cachedPromptTokens ?? 0),
- xaiCachedPromptTokens: Number(generation.providerMetadata?.xai?.cachedPromptTokens ?? 0),
- googleCachedContentTokenCount: Number(generation.providerMetadata?.google?.cachedContentTokenCount ?? 0),
- googleThoughtsTokenCount: Number(generation.providerMetadata?.google?.thoughtsTokenCount ?? 0),
+ providerOptions: generation.providerOptions,
+ anthropicCacheCreationInputTokens: Number(generation.providerOptions?.anthropic?.cacheCreationInputTokens ?? 0),
+ anthropicCacheReadInputTokens: Number(generation.providerOptions?.anthropic?.cacheReadInputTokens ?? 0),
+ openaiCachedPromptTokens: Number(generation.providerOptions?.openai?.cachedPromptTokens ?? 0),
+ xaiCachedPromptTokens: Number(generation.providerOptions?.xai?.cachedPromptTokens ?? 0),
+ googleCachedContentTokenCount: Number(generation.providerOptions?.google?.cachedContentTokenCount ?? 0),
+ googleThoughtsTokenCount: Number(generation.providerOptions?.google?.thoughtsTokenCount ?? 0),
};
}
export function initializeUsage(): Usage {
return {
- completionTokens: 0,
- promptTokens: 0,
+ outputTokens: 0,
+ inputTokens: 0,
totalTokens: 0,
anthropicCacheCreationInputTokens: 0,
anthropicCacheReadInputTokens: 0,
@@ -34,7 +34,7 @@ export function initializeUsage(): Usage {
};
}
-export function getFailedToolCalls(message: Message): Set {
+export function getFailedToolCalls(message: UIMessage): Set {
const failedToolCalls: Set = new Set();
for (const part of message.parts ?? []) {
if (part.type !== 'tool-invocation') {
@@ -68,8 +68,8 @@ export function calculateTotalUsage(args: {
}
export async function calculateTotalBilledUsageForMessage(
- lastMessage: Message | undefined,
- finalGeneration: { usage: LanguageModelUsage; providerMetadata?: ProviderMetadata },
+ lastMessage: UIMessage | undefined,
+ finalGeneration: { usage: LanguageModelUsage; providerOptions?: ProviderMetadata },
): Promise {
const { usageForToolCall } = parseAnnotations(lastMessage?.annotations ?? []);
// If there's an annotation for the final part, start with an empty usage, otherwise, create a
@@ -83,25 +83,25 @@ export async function calculateTotalBilledUsageForMessage(
}
function addUsage(totalUsage: Usage, payload: UsageAnnotation) {
- totalUsage.completionTokens += payload.completionTokens;
- totalUsage.promptTokens += payload.promptTokens;
+ totalUsage.outputTokens += payload.outputTokens;
+ totalUsage.inputTokens += payload.inputTokens;
totalUsage.totalTokens += payload.totalTokens;
- totalUsage.anthropicCacheCreationInputTokens += payload.providerMetadata?.anthropic?.cacheCreationInputTokens ?? 0;
- totalUsage.anthropicCacheReadInputTokens += payload.providerMetadata?.anthropic?.cacheReadInputTokens ?? 0;
- totalUsage.openaiCachedPromptTokens += payload.providerMetadata?.openai?.cachedPromptTokens ?? 0;
- totalUsage.xaiCachedPromptTokens += payload.providerMetadata?.xai?.cachedPromptTokens ?? 0;
- totalUsage.googleCachedContentTokenCount += payload.providerMetadata?.google?.cachedContentTokenCount ?? 0;
+ totalUsage.anthropicCacheCreationInputTokens += payload.providerOptions?.anthropic?.cacheCreationInputTokens ?? 0;
+ totalUsage.anthropicCacheReadInputTokens += payload.providerOptions?.anthropic?.cacheReadInputTokens ?? 0;
+ totalUsage.openaiCachedPromptTokens += payload.providerOptions?.openai?.cachedPromptTokens ?? 0;
+ totalUsage.xaiCachedPromptTokens += payload.providerOptions?.xai?.cachedPromptTokens ?? 0;
+ totalUsage.googleCachedContentTokenCount += payload.providerOptions?.google?.cachedContentTokenCount ?? 0;
}
export type ChefTokenBreakdown = {
- completionTokens: {
+ outputTokens: {
anthropic: number;
openai: number;
xai: number;
google: number;
bedrock: number;
};
- promptTokens: {
+ inputTokens: {
anthropic: { uncached: number; cached: number };
openai: { uncached: number; cached: number };
xai: { uncached: number; cached: number };
@@ -116,14 +116,14 @@ export type ChefTokenBreakdown = {
export function calculateChefTokens(totalUsage: Usage, provider?: ProviderType) {
let chefTokens = 0;
const breakdown = {
- completionTokens: {
+ outputTokens: {
anthropic: 0,
openai: 0,
xai: 0,
google: 0,
bedrock: 0,
},
- promptTokens: {
+ inputTokens: {
anthropic: {
uncached: 0,
cached: 0,
@@ -147,57 +147,57 @@ export function calculateChefTokens(totalUsage: Usage, provider?: ProviderType)
},
};
if (provider === 'Anthropic') {
- const anthropicCompletionTokens = totalUsage.completionTokens * 200;
+ const anthropicCompletionTokens = totalUsage.outputTokens * 200;
chefTokens += anthropicCompletionTokens;
- breakdown.completionTokens.anthropic = anthropicCompletionTokens;
- const anthropicPromptTokens = totalUsage.promptTokens * 40;
+ breakdown.outputTokens.anthropic = anthropicCompletionTokens;
+ const anthropicPromptTokens = totalUsage.inputTokens * 40;
chefTokens += anthropicPromptTokens;
- breakdown.promptTokens.anthropic.uncached = anthropicPromptTokens;
+ breakdown.inputTokens.anthropic.uncached = anthropicPromptTokens;
const cacheCreationInputTokens = totalUsage.anthropicCacheCreationInputTokens * 40;
chefTokens += cacheCreationInputTokens;
- breakdown.promptTokens.anthropic.cached = cacheCreationInputTokens;
+ breakdown.inputTokens.anthropic.cached = cacheCreationInputTokens;
const cacheReadInputTokens = totalUsage.anthropicCacheReadInputTokens * 3;
chefTokens += cacheReadInputTokens;
- breakdown.promptTokens.anthropic.cached += cacheReadInputTokens;
+ breakdown.inputTokens.anthropic.cached += cacheReadInputTokens;
} else if (provider === 'Bedrock') {
- const bedrockCompletionTokens = totalUsage.completionTokens * 200;
+ const bedrockCompletionTokens = totalUsage.outputTokens * 200;
chefTokens += bedrockCompletionTokens;
- breakdown.completionTokens.bedrock = bedrockCompletionTokens;
- const bedrockPromptTokens = totalUsage.promptTokens * 40;
+ breakdown.outputTokens.bedrock = bedrockCompletionTokens;
+ const bedrockPromptTokens = totalUsage.inputTokens * 40;
chefTokens += bedrockPromptTokens;
- breakdown.promptTokens.bedrock.uncached = bedrockPromptTokens;
+ breakdown.inputTokens.bedrock.uncached = bedrockPromptTokens;
} else if (provider === 'OpenAI') {
- const openaiCompletionTokens = totalUsage.completionTokens * 100;
+ const openaiCompletionTokens = totalUsage.outputTokens * 100;
chefTokens += openaiCompletionTokens;
- breakdown.completionTokens.openai = openaiCompletionTokens;
+ breakdown.outputTokens.openai = openaiCompletionTokens;
const openaiCachedPromptTokens = totalUsage.openaiCachedPromptTokens * 5;
chefTokens += openaiCachedPromptTokens;
- breakdown.promptTokens.openai.cached = openaiCachedPromptTokens;
- const openaiUncachedPromptTokens = (totalUsage.promptTokens - totalUsage.openaiCachedPromptTokens) * 26;
+ breakdown.inputTokens.openai.cached = openaiCachedPromptTokens;
+ const openaiUncachedPromptTokens = (totalUsage.inputTokens - totalUsage.openaiCachedPromptTokens) * 26;
chefTokens += openaiUncachedPromptTokens;
- breakdown.promptTokens.openai.uncached = openaiUncachedPromptTokens;
+ breakdown.inputTokens.openai.uncached = openaiUncachedPromptTokens;
} else if (provider === 'XAI') {
// TODO: This is a guess. Billing like anthropic
- const xaiCompletionTokens = totalUsage.completionTokens * 200;
+ const xaiCompletionTokens = totalUsage.outputTokens * 200;
chefTokens += xaiCompletionTokens;
- breakdown.completionTokens.xai = xaiCompletionTokens;
- const xaiPromptTokens = totalUsage.promptTokens * 40;
+ breakdown.outputTokens.xai = xaiCompletionTokens;
+ const xaiPromptTokens = totalUsage.inputTokens * 40;
chefTokens += xaiPromptTokens;
- breakdown.promptTokens.xai.uncached = xaiPromptTokens;
+ breakdown.inputTokens.xai.uncached = xaiPromptTokens;
// TODO - never seen xai set this field to anything but 0, so holding off until we understand.
//chefTokens += totalUsage.xaiCachedPromptTokens * 3;
} else if (provider === 'Google') {
- const googleCompletionTokens = totalUsage.completionTokens * 140;
+ const googleCompletionTokens = totalUsage.outputTokens * 140;
chefTokens += googleCompletionTokens;
const googleThoughtTokens = totalUsage.googleThoughtsTokenCount * 140;
chefTokens += googleThoughtTokens;
- breakdown.completionTokens.google = googleCompletionTokens;
- const googlePromptTokens = (totalUsage.promptTokens - totalUsage.googleCachedContentTokenCount) * 18;
+ breakdown.outputTokens.google = googleCompletionTokens;
+ const googlePromptTokens = (totalUsage.inputTokens - totalUsage.googleCachedContentTokenCount) * 18;
chefTokens += googlePromptTokens;
- breakdown.promptTokens.google.uncached = googlePromptTokens;
+ breakdown.inputTokens.google.uncached = googlePromptTokens;
const googleCachedContentTokens = totalUsage.googleCachedContentTokenCount * 5;
chefTokens += googleCachedContentTokens;
- breakdown.promptTokens.google.cached = googleCachedContentTokens;
+ breakdown.inputTokens.google.cached = googleCachedContentTokens;
} else {
captureMessage('WARNING: Unknown provider. Not recording usage. Giving away for free.', {
level: 'error',
diff --git a/app/lib/hooks/useDebugPrompt.ts b/app/lib/hooks/useDebugPrompt.ts
index e5398daa1..b28cc4762 100644
--- a/app/lib/hooks/useDebugPrompt.ts
+++ b/app/lib/hooks/useDebugPrompt.ts
@@ -1,13 +1,13 @@
import { useConvex, useMutation, useQuery } from 'convex/react';
import { useQueries as useReactQueries } from '@tanstack/react-query';
import { api } from '@convex/_generated/api';
-import type { CoreMessage } from 'ai';
+import type { ModelMessage } from 'ai';
import { decompressWithLz4 } from '~/lib/compression.client';
import { queryClientStore } from '~/lib/stores/reactQueryClient';
import { useEffect, useState } from 'react';
import { getConvexAuthToken } from '~/lib/stores/sessionId';
-async function fetchPromptData(url: string): Promise {
+async function fetchPromptData(url: string): Promise {
const response = await fetch(url);
if (!response.ok) {
throw new Error(`Failed to fetch prompt data: ${response.statusText}`);
@@ -17,7 +17,7 @@ async function fetchPromptData(url: string): Promise {
const decompressedData = decompressWithLz4(new Uint8Array(compressedData));
const textDecoder = new TextDecoder();
const jsonString = textDecoder.decode(decompressedData);
- return JSON.parse(jsonString) as CoreMessage[];
+ return JSON.parse(jsonString) as ModelMessage[];
}
export function useAuthToken() {
diff --git a/app/lib/hooks/useMessageParser.ts b/app/lib/hooks/useMessageParser.ts
index c12abd2f0..65a8d3c87 100644
--- a/app/lib/hooks/useMessageParser.ts
+++ b/app/lib/hooks/useMessageParser.ts
@@ -1,4 +1,4 @@
-import type { Message, UIMessage } from 'ai';
+import type { UIMessage } from 'ai';
import { useCallback, useRef, useState } from 'react';
import { StreamingMessageParser } from 'chef-agent/message-parser';
import { workbenchStore } from '~/lib/stores/workbench.client';
@@ -52,9 +52,9 @@ function isPartMaybeEqual(a: Part, b: Part): boolean {
}
export function processMessage(
- message: Message,
+ message: UIMessage,
previousParts: PartCache,
-): { message: Message; hitRate: [number, number] } {
+): { message: UIMessage; hitRate: [number, number] } {
if (message.role === 'user') {
return { message, hitRate: [0, 0] };
}
@@ -132,13 +132,13 @@ export function processMessage(
type Part = UIMessage['parts'][number];
export function useMessageParser(partCache: PartCache) {
- const [parsedMessages, setParsedMessages] = useState([]);
+ const [parsedMessages, setParsedMessages] = useState([]);
- const previousMessages = useRef<{ original: Message; parsed: Message }[]>([]);
+ const previousMessages = useRef<{ original: UIMessage; parsed: UIMessage }[]>([]);
const previousParts = useRef(partCache);
- const parseMessages = useCallback((messages: Message[]) => {
- const nextPrevMessages: { original: Message; parsed: Message }[] = [];
+ const parseMessages = useCallback((messages: UIMessage[]) => {
+ const nextPrevMessages: { original: UIMessage; parsed: UIMessage }[] = [];
for (let i = 0; i < messages.length; i++) {
const prev = previousMessages.current[i];
diff --git a/app/lib/runtime/action-runner.ts b/app/lib/runtime/action-runner.ts
index 4031cc95b..b6beddedf 100644
--- a/app/lib/runtime/action-runner.ts
+++ b/app/lib/runtime/action-runner.ts
@@ -215,7 +215,7 @@ export class ActionRunner {
try {
switch (action.type) {
case 'file': {
- await this.#runFileAction(action);
+ await this.#runFileAction(action.file);
break;
}
case 'toolUse': {
@@ -309,9 +309,12 @@ export class ActionRunner {
await this.#runFileAction({
type: 'file',
- filePath: historyPath,
- content: JSON.stringify(history),
- changeSource: 'auto-save',
+
+ file: {
+ filePath: historyPath,
+ content: JSON.stringify(history),
+ changeSource: 'auto-save'
+ }
} as any);
}
diff --git a/app/lib/stores/files.ts b/app/lib/stores/files.ts
index 2fc2152eb..a122844fe 100644
--- a/app/lib/stores/files.ts
+++ b/app/lib/stores/files.ts
@@ -117,7 +117,14 @@ export class FilesStore {
}
// we immediately update the file and don't rely on the `change` event coming from the watcher
- this.files.setKey(filePath, { type: 'file', content, isBinary: false });
+ this.files.setKey(filePath, {
+ type: 'file',
+
+ file: {
+ content,
+ isBinary: false
+ }
+ });
this.userWrites.set(filePath, Date.now());
logger.info('File updated');
@@ -166,7 +173,14 @@ export class FilesStore {
if (!isBinary) {
content = this.#decodeFileContent(buffer);
}
- this.files.setKey(getAbsolutePath(absPath), { type: 'file', content, isBinary });
+ this.files.setKey(getAbsolutePath(absPath), {
+ type: 'file',
+
+ file: {
+ content,
+ isBinary
+ }
+ });
};
await Promise.all(absFilePaths.map(loadFile));
}
@@ -215,7 +229,14 @@ export class FilesStore {
content = this.#decodeFileContent(buffer);
}
- this.files.setKey(getAbsolutePath(sanitizedPath), { type: 'file', content, isBinary });
+ this.files.setKey(getAbsolutePath(sanitizedPath), {
+ type: 'file',
+
+ file: {
+ content,
+ isBinary
+ }
+ });
break;
}
diff --git a/app/lib/stores/startup/history.ts b/app/lib/stores/startup/history.ts
index d67922adc..1203b04af 100644
--- a/app/lib/stores/startup/history.ts
+++ b/app/lib/stores/startup/history.ts
@@ -1,4 +1,4 @@
-import type { Message } from 'ai';
+import type { UIMessage } from 'ai';
import { useConvex, useQuery, type ConvexReactClient } from 'convex/react';
import { useConvexSessionIdOrNullOrLoading, waitForConvexSessionId } from '~/lib/stores/sessionId';
import { getFileUpdateCounter, waitForFileUpdateCounterChanged } from '~/lib/stores/fileUpdateCounter';
@@ -25,7 +25,7 @@ const logger = createScopedLogger('history');
const BACKUP_DEBOUNCE_MS = 1000;
-export function useBackupSyncState(chatId: string, loadedSubchatIndex?: number, initialMessages?: Message[]) {
+export function useBackupSyncState(chatId: string, loadedSubchatIndex?: number, initialMessages?: UIMessage[]) {
const convex = useConvex();
const subchatIndex = useStore(subchatIndexStore);
const sessionId = useConvexSessionIdOrNullOrLoading();
diff --git a/app/lib/stores/startup/reloadMessages.ts b/app/lib/stores/startup/reloadMessages.ts
index 77c48c6c8..afa958302 100644
--- a/app/lib/stores/startup/reloadMessages.ts
+++ b/app/lib/stores/startup/reloadMessages.ts
@@ -1,4 +1,4 @@
-import type { Message } from 'ai';
+import type { UIMessage } from 'ai';
import { useEffect, useState } from 'react';
import { makePartId } from 'chef-agent/partId';
import { toast } from 'sonner';
@@ -11,7 +11,7 @@ export type ReloadedMessages = {
partCache: PartCache;
};
-export function useReloadMessages(initialMessages: Message[] | undefined): ReloadedMessages | undefined {
+export function useReloadMessages(initialMessages: UIMessage[] | undefined): ReloadedMessages | undefined {
const [reloadState, setReloadState] = useState(undefined);
const subchatIndex = useStore(subchatIndexStore);
useEffect(() => {
diff --git a/app/utils/diff.ts b/app/utils/diff.ts
index 8709e087c..e491da643 100644
--- a/app/utils/diff.ts
+++ b/app/utils/diff.ts
@@ -32,7 +32,9 @@ export function computeFileModifications(files: FileMap, modifiedFiles: Map file.content.length) {
// if there are lots of changes we simply grab the current file content since it's smaller than the diff
- modifications[filePath] = { type: 'file', content: file.content };
+ modifications[filePath] = { type: 'file', file: {
+ content: file.content
+ } };
} else {
// otherwise we use the diff since it's smaller
modifications[filePath] = { type: 'diff', content: unifiedDiff };
diff --git a/app/utils/fileUtils.ts b/app/utils/fileUtils.ts
index f59de1646..54fb17787 100644
--- a/app/utils/fileUtils.ts
+++ b/app/utils/fileUtils.ts
@@ -67,5 +67,12 @@ export async function readPath(
// try it as a file below.
}
const content = await container.fs.readFile(relPath, 'utf-8');
- return { type: 'file', content, isBinary: false };
+ return {
+ type: 'file',
+
+ file: {
+ content,
+ isBinary: false
+ }
+ };
}
diff --git a/chef-agent/ChatContextManager.ts b/chef-agent/ChatContextManager.ts
index bc497f88d..e894dd631 100644
--- a/chef-agent/ChatContextManager.ts
+++ b/chef-agent/ChatContextManager.ts
@@ -141,7 +141,7 @@ export class ChatContextManager {
if (entry.type === 'file') {
const content = renderFile(entry.content);
fileActions.push(`${content}`);
- const size = estimateSize(entry);
+ const size = estimateSize(entry.file);
sizeEstimate += size;
numFiles++;
}
@@ -306,11 +306,11 @@ export class ChatContextManager {
result = part.text.length;
break;
case 'file':
- result += part.data.length;
- result += part.mimeType.length;
+ result += part.file.data.length;
+ result += part.file.mimeType.length;
break;
case 'reasoning':
- result += part.reasoning.length;
+ result += part.reasoningText.length;
break;
case 'tool-invocation':
result += JSON.stringify(part.toolInvocation.args).length;
@@ -319,8 +319,8 @@ export class ChatContextManager {
}
break;
case 'source':
- result += (part.source.title ?? '').length;
- result += part.source.url.length;
+ result += (part.title ?? '').length;
+ result += part.url.length;
break;
case 'step-start':
break;
diff --git a/chef-agent/cleanupAssistantMessages.ts b/chef-agent/cleanupAssistantMessages.ts
index f5f7379a8..39824dfad 100644
--- a/chef-agent/cleanupAssistantMessages.ts
+++ b/chef-agent/cleanupAssistantMessages.ts
@@ -1,8 +1,8 @@
-import { convertToCoreMessages } from 'ai';
-import type { Message } from 'ai';
+import { convertToModelMessages } from 'ai';
+import type { UIMessage } from 'ai';
import { EXCLUDED_FILE_PATHS } from './constants.js';
-export function cleanupAssistantMessages(messages: Message[]) {
+export function cleanupAssistantMessages(messages: UIMessage[]) {
let processedMessages = messages.map((message) => {
if (message.role == 'assistant') {
let content = cleanMessage(message.content);
@@ -24,7 +24,7 @@ export function cleanupAssistantMessages(messages: Message[]) {
(message.parts &&
message.parts.filter((part) => part.type === 'text' || part.type === 'tool-invocation').length > 0),
);
- return convertToCoreMessages(processedMessages).filter((message) => message.content.length > 0);
+ return convertToModelMessages(processedMessages).filter((message) => message.content.length > 0);
}
function cleanMessage(message: string) {
diff --git a/chef-agent/package.json b/chef-agent/package.json
index 8f0df1455..6e8d3cafa 100644
--- a/chef-agent/package.json
+++ b/chef-agent/package.json
@@ -8,11 +8,11 @@
"typecheck": "tsc"
},
"dependencies": {
- "ai": "^4.3.2",
+ "ai": "^5.0.8",
"jose": "^5.9.6",
"path-browserify": "^1.0.1",
"typescript": "^5.4.2",
- "zod": "^3.24.1"
+ "zod": "^3.25.0"
},
"devDependencies": {
"@types/node": "^20.17.30",
diff --git a/chef-agent/tools/addEnvironmentVariables.ts b/chef-agent/tools/addEnvironmentVariables.ts
index c746f377e..b82bbc653 100644
--- a/chef-agent/tools/addEnvironmentVariables.ts
+++ b/chef-agent/tools/addEnvironmentVariables.ts
@@ -8,6 +8,6 @@ export const addEnvironmentVariablesParameters = z.object({
export function addEnvironmentVariablesTool(): Tool {
return {
description: `Add environment variables to the Convex deployment. The user still needs to manually add the values in the Convex dashboard page this tool opens.`,
- parameters: addEnvironmentVariablesParameters,
+ inputSchema: addEnvironmentVariablesParameters,
};
}
diff --git a/chef-agent/tools/deploy.ts b/chef-agent/tools/deploy.ts
index dca1e4d00..88ebcb177 100644
--- a/chef-agent/tools/deploy.ts
+++ b/chef-agent/tools/deploy.ts
@@ -18,7 +18,7 @@ top can only contain actions. They can NEVER contains queries or mutations.
export const deployTool: Tool = {
description: deployToolDescription,
- parameters: z.object({}),
+ inputSchema: z.object({}),
};
export const deployToolParameters = z.object({});
diff --git a/chef-agent/tools/edit.ts b/chef-agent/tools/edit.ts
index d6beb0e82..6fbf92ab7 100644
--- a/chef-agent/tools/edit.ts
+++ b/chef-agent/tools/edit.ts
@@ -20,5 +20,5 @@ export const editToolParameters = z.object({
export const editTool: Tool = {
description: editToolDescription,
- parameters: editToolParameters,
+ inputSchema: editToolParameters,
};
diff --git a/chef-agent/tools/getConvexDeploymentName.ts b/chef-agent/tools/getConvexDeploymentName.ts
index 96a8b0ffa..76973f569 100644
--- a/chef-agent/tools/getConvexDeploymentName.ts
+++ b/chef-agent/tools/getConvexDeploymentName.ts
@@ -13,5 +13,5 @@ export const getConvexDeploymentNameParameters = z.object({});
export const getConvexDeploymentNameTool: Tool = {
description: getConvexDeploymentNameDescription,
- parameters: getConvexDeploymentNameParameters,
+ inputSchema: getConvexDeploymentNameParameters,
};
diff --git a/chef-agent/tools/lookupDocs.ts b/chef-agent/tools/lookupDocs.ts
index be4ca6425..6ef97b8fc 100644
--- a/chef-agent/tools/lookupDocs.ts
+++ b/chef-agent/tools/lookupDocs.ts
@@ -15,7 +15,7 @@ export const lookupDocsParameters = z.object({
export function lookupDocsTool(): Tool {
return {
description: `Lookup documentation for a list of features. Valid features to lookup are: \`proseMirror\` and \`presence\``,
- parameters: lookupDocsParameters,
+ inputSchema: lookupDocsParameters,
};
}
diff --git a/chef-agent/tools/npmInstall.ts b/chef-agent/tools/npmInstall.ts
index bea5a289e..42cf165cd 100644
--- a/chef-agent/tools/npmInstall.ts
+++ b/chef-agent/tools/npmInstall.ts
@@ -23,5 +23,5 @@ export const npmInstallToolParameters = z.object({
export const npmInstallTool: Tool = {
description: npmInstallToolDescription,
- parameters: npmInstallToolParameters,
+ inputSchema: npmInstallToolParameters,
};
diff --git a/chef-agent/tools/view.ts b/chef-agent/tools/view.ts
index f16f7d6b3..e610526b1 100644
--- a/chef-agent/tools/view.ts
+++ b/chef-agent/tools/view.ts
@@ -21,5 +21,5 @@ export const viewParameters = z.object({
export const viewTool: Tool = {
description: viewDescription,
- parameters: viewParameters,
+ inputSchema: viewParameters,
};
diff --git a/chef-agent/utils/chefDebug.ts b/chef-agent/utils/chefDebug.ts
index e02d35906..9f6e12fce 100644
--- a/chef-agent/utils/chefDebug.ts
+++ b/chef-agent/utils/chefDebug.ts
@@ -1,9 +1,9 @@
import type { WebContainer } from '@webcontainer/api';
-import type { Message } from 'ai';
+import type { UIMessage } from 'ai';
type ChefDebug = {
- messages?: Message[];
- parsedMessages?: Message[];
+ messages?: UIMessage[];
+ parsedMessages?: UIMessage[];
webcontainer?: WebContainer;
setLogLevel?: (level: any) => void;
chatInitialId?: string;
diff --git a/convex/cleanup.test.ts b/convex/cleanup.test.ts
index 4344d4e58..b836025bc 100644
--- a/convex/cleanup.test.ts
+++ b/convex/cleanup.test.ts
@@ -55,8 +55,8 @@ describe("cleanup", () => {
finishReason: "stop",
modelId: "test-model",
usage: {
- completionTokens: 0,
- promptTokens: 0,
+ outputTokens: 0,
+ inputTokens: 0,
cachedPromptTokens: 0,
},
chefTokens: 0,
@@ -655,8 +655,8 @@ describe("file cleanup tests", () => {
finishReason: "stop",
modelId: "test-model",
usage: {
- completionTokens: 0,
- promptTokens: 0,
+ outputTokens: 0,
+ inputTokens: 0,
cachedPromptTokens: 0,
},
chefTokens: 0,
diff --git a/convex/messages.ts b/convex/messages.ts
index 58a34b50f..094428594 100644
--- a/convex/messages.ts
+++ b/convex/messages.ts
@@ -7,7 +7,7 @@ import {
type MutationCtx,
type QueryCtx,
} from "./_generated/server";
-import type { Message as AIMessage } from "ai";
+import type { UIMessage as AIMessage } from "ai";
import { ConvexError, v } from "convex/values";
import type { Infer } from "convex/values";
import { isValidSession } from "./sessions";
diff --git a/convex/schema.ts b/convex/schema.ts
index c2ea9d048..d2a77ca75 100644
--- a/convex/schema.ts
+++ b/convex/schema.ts
@@ -1,7 +1,7 @@
import { defineSchema, defineTable } from "convex/server";
import { v } from "convex/values";
import type { Infer, Validator } from "convex/values";
-import type { CoreMessage } from "ai";
+import type { ModelMessage } from "ai";
export const apiKeyValidator = v.object({
preference: v.union(v.literal("always"), v.literal("quotaExhausted")),
@@ -14,8 +14,8 @@ export const apiKeyValidator = v.object({
// A stable-enough way to store token usage.
export const usageRecordValidator = v.object({
- completionTokens: v.number(),
- promptTokens: v.number(),
+ outputTokens: v.number(),
+ inputTokens: v.number(),
/** Included in promptTokens total! */
cachedPromptTokens: v.number(),
});
@@ -208,7 +208,7 @@ export default defineSchema({
// Such a loose type doesn't feel so bad since this is debugging data, but if we try
// to display older versions of this we need to make any fields added to CoreMessage in
// later versions of the Vercel AI SDK optional on the read path.
- responseCoreMessages: v.array(v.any() as Validator),
+ responseCoreMessages: v.array(v.any() as Validator),
promptCoreMessagesStorageId: v.id("_storage"),
finishReason: v.string(),
modelId: v.string(),
diff --git a/package.json b/package.json
index 8d3f7fdc0..ea9b7df0f 100644
--- a/package.json
+++ b/package.json
@@ -31,13 +31,13 @@
"node": ">=18.18.0"
},
"dependencies": {
- "@ai-sdk/amazon-bedrock": "^2.2.9",
- "@ai-sdk/anthropic": "^1.2.12",
- "@ai-sdk/google": "^1.2.11",
- "@ai-sdk/google-vertex": "^2.2.24",
- "@ai-sdk/openai": "^1.3.6",
- "@ai-sdk/react": "^1.2.5",
- "@ai-sdk/xai": "^1.2.13",
+ "@ai-sdk/amazon-bedrock": "^2.0.0",
+ "@ai-sdk/anthropic": "^2.0.0",
+ "@ai-sdk/google": "^2.0.0",
+ "@ai-sdk/google-vertex": "^2.0.0",
+ "@ai-sdk/openai": "^2.0.5",
+ "@ai-sdk/react": "^2.0.0",
+ "@ai-sdk/xai": "^2.0.0",
"@auth0/auth0-react": "^2.3.0",
"@aws-sdk/credential-providers": "^3.782.0",
"@aws-sdk/rds-signer": "^3.782.0",
@@ -90,7 +90,7 @@
"@xterm/addon-fit": "^0.10.0",
"@xterm/addon-web-links": "^0.11.0",
"@xterm/xterm": "^5.5.0",
- "ai": "^4.3.2",
+ "ai": "^5.0.8",
"allotment": "^1.20.3",
"chart.js": "^4.4.9",
"chef-agent": "workspace:*",
@@ -188,15 +188,14 @@
"vite-plugin-optimize-css-modules": "^1.1.0",
"vite-tsconfig-paths": "^4.3.2",
"vitest": "^2.1.9",
- "zod": "^3.24.1"
+ "zod": "^3.25.0"
},
"resolutions": {
"@typescript-eslint/utils": "^8.0.0-alpha.30"
},
"pnpm": {
"overrides": {
- "@remix-run/cloudflare": "npm:@remix-run/node@2.15.3",
- "@ai-sdk/google": "npm:@convex-dev/ai-sdk-google@1.2.17"
+ "@remix-run/cloudflare": "npm:@remix-run/node@2.15.3"
}
},
"packageManager": "pnpm@9.5.0"
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index d2ba0bf1b..cde243488 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -7,33 +7,32 @@ settings:
overrides:
'@typescript-eslint/utils': ^8.0.0-alpha.30
'@remix-run/cloudflare': npm:@remix-run/node@2.15.3
- '@ai-sdk/google': npm:@convex-dev/ai-sdk-google@1.2.17
importers:
.:
dependencies:
'@ai-sdk/amazon-bedrock':
- specifier: ^2.2.9
- version: 2.2.9(zod@3.24.1)
+ specifier: ^2.0.0
+ version: 2.2.9(zod@3.25.76)
'@ai-sdk/anthropic':
- specifier: ^1.2.12
- version: 1.2.12(zod@3.24.1)
+ specifier: ^2.0.0
+ version: 2.0.1(zod@3.25.76)
'@ai-sdk/google':
- specifier: npm:@convex-dev/ai-sdk-google@1.2.17
- version: '@convex-dev/ai-sdk-google@1.2.17(zod@3.24.1)'
+ specifier: ^2.0.0
+ version: 2.0.3(zod@3.25.76)
'@ai-sdk/google-vertex':
- specifier: ^2.2.24
- version: 2.2.24(zod@3.24.1)
+ specifier: ^2.0.0
+ version: 2.2.24(zod@3.25.76)
'@ai-sdk/openai':
- specifier: ^1.3.6
- version: 1.3.6(zod@3.24.1)
+ specifier: ^2.0.5
+ version: 2.0.5(zod@3.25.76)
'@ai-sdk/react':
- specifier: ^1.2.5
- version: 1.2.6(react@18.3.1)(zod@3.24.1)
+ specifier: ^2.0.0
+ version: 2.0.8(react@18.3.1)(zod@3.25.76)
'@ai-sdk/xai':
- specifier: ^1.2.13
- version: 1.2.13(zod@3.24.1)
+ specifier: ^2.0.0
+ version: 2.0.2(zod@3.25.76)
'@auth0/auth0-react':
specifier: ^2.3.0
version: 2.3.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
@@ -93,7 +92,7 @@ importers:
version: 6.36.2
'@convex-dev/ai-sdk-google':
specifier: 1.2.17
- version: 1.2.17(zod@3.24.1)
+ version: 1.2.17(zod@3.25.76)
'@convex-dev/design-system':
specifier: 0.1.11
version: 0.1.11(@popperjs/core@2.11.8)(@radix-ui/react-icons@1.3.2(react@18.3.1))(@tailwindcss/forms@0.5.10(tailwindcss@3.4.17(ts-node@10.9.2(@types/node@22.14.0)(typescript@5.7.3))))(@types/react-dom@18.3.6(@types/react@18.3.20))(@types/react@18.3.20)(react@18.3.1)(tailwind-scrollbar@3.0.3(tailwindcss@3.4.17(ts-node@10.9.2(@types/node@22.14.0)(typescript@5.7.3))))(tailwindcss@3.4.17(ts-node@10.9.2(@types/node@22.14.0)(typescript@5.7.3)))
@@ -191,8 +190,8 @@ importers:
specifier: ^5.5.0
version: 5.5.0
ai:
- specifier: ^4.3.2
- version: 4.3.2(react@18.3.1)(zod@3.24.1)
+ specifier: ^5.0.8
+ version: 5.0.8(zod@3.25.76)
allotment:
specifier: ^1.20.3
version: 1.20.3(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
@@ -216,7 +215,7 @@ importers:
version: 1.25.0-alpha.3(@auth0/auth0-react@2.3.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)
convex-helpers:
specifier: ^0.1.79
- version: 0.1.79(convex@1.25.0-alpha.3(@auth0/auth0-react@2.3.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1))(react@18.3.1)(typescript@5.7.3)(zod@3.24.1)
+ version: 0.1.79(convex@1.25.0-alpha.3(@auth0/auth0-react@2.3.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1))(react@18.3.1)(typescript@5.7.3)(zod@3.25.76)
date-fns:
specifier: ^3.6.0
version: 3.6.0
@@ -276,7 +275,7 @@ importers:
version: link:@vercel/functions/oidc
openai:
specifier: ^4.93.0
- version: 4.93.0(ws@8.18.0)(zod@3.24.1)
+ version: 4.93.0(ws@8.18.0)(zod@3.25.76)
posthog-js:
specifier: ^1.235.4
version: 1.235.4
@@ -321,7 +320,7 @@ importers:
version: 0.2.0(@remix-run/react@2.15.3(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.7.3))(@remix-run/server-runtime@2.15.3(typescript@5.7.3))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
remix-utils:
specifier: ^7.7.0
- version: 7.7.0(@remix-run/node@2.15.3(typescript@5.7.3))(@remix-run/react@2.15.3(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.7.3))(@remix-run/router@1.22.0)(react@18.3.1)(zod@3.24.1)
+ version: 7.7.0(@remix-run/node@2.15.3(typescript@5.7.3))(@remix-run/react@2.15.3(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.7.3))(@remix-run/router@1.22.0)(react@18.3.1)(zod@3.25.76)
shiki:
specifier: ^1.24.0
version: 1.29.2
@@ -480,14 +479,14 @@ importers:
specifier: ^2.1.9
version: 2.1.9(@edge-runtime/vm@5.0.0)(@types/node@22.14.0)(jsdom@26.0.0)(sass-embedded@1.83.4)(sugarss@4.0.1(postcss@8.5.3))
zod:
- specifier: ^3.24.1
- version: 3.24.1
+ specifier: ^3.25.0
+ version: 3.25.76
chef-agent:
dependencies:
ai:
- specifier: ^4.3.2
- version: 4.3.2(react@18.3.1)(zod@3.24.1)
+ specifier: ^5.0.8
+ version: 5.0.8(zod@3.25.76)
jose:
specifier: ^5.9.6
version: 5.9.6
@@ -498,8 +497,8 @@ importers:
specifier: ^5.4.2
version: 5.8.3
zod:
- specifier: ^3.24.1
- version: 3.24.1
+ specifier: ^3.25.0
+ version: 3.25.76
devDependencies:
'@types/node':
specifier: ^20.17.30
@@ -543,22 +542,22 @@ importers:
dependencies:
'@ai-sdk/anthropic':
specifier: ^1.2.4
- version: 1.2.4(zod@3.24.1)
+ version: 1.2.4(zod@3.25.76)
'@ai-sdk/google':
- specifier: npm:@convex-dev/ai-sdk-google@1.2.17
- version: '@convex-dev/ai-sdk-google@1.2.17(zod@3.24.1)'
+ specifier: ^1.2.11
+ version: 1.2.22(zod@3.25.76)
'@ai-sdk/openai':
specifier: ^1.3.6
- version: 1.3.6(zod@3.24.1)
+ version: 1.3.6(zod@3.25.76)
'@ai-sdk/xai':
specifier: ^1.2.13
- version: 1.2.13(zod@3.24.1)
+ version: 1.2.13(zod@3.25.76)
async-mutex:
specifier: ^0.5.0
version: 0.5.0
braintrust:
specifier: ^0.0.199
- version: 0.0.199(@aws-sdk/credential-provider-web-identity@3.782.0)(openai@4.93.0(ws@8.18.0)(zod@3.24.1))(react@18.3.1)(sswr@2.2.0(svelte@5.28.1))(svelte@5.28.1)(vue@3.5.13(typescript@5.8.3))(zod@3.24.1)
+ version: 0.0.199(@aws-sdk/credential-provider-web-identity@3.782.0)(openai@4.93.0(ws@8.18.0)(zod@3.25.76))(react@18.3.1)(sswr@2.2.0(svelte@5.28.1))(svelte@5.28.1)(vue@3.5.13(typescript@5.8.3))(zod@3.25.76)
chef-agent:
specifier: workspace:*
version: link:../chef-agent
@@ -596,24 +595,66 @@ packages:
peerDependencies:
zod: ^3.0.0
+ '@ai-sdk/anthropic@2.0.1':
+ resolution: {integrity: sha512-HtNbpNV9qXQosHu00+CBMEcdTerwZY+kpVMNak0xP/P5TF6XkPf7IyizhLuc7y5zcXMjZCMA7jDGkcEdZCEdkw==}
+ engines: {node: '>=18'}
+ peerDependencies:
+ zod: ^3.25.76 || ^4
+
+ '@ai-sdk/gateway@1.0.4':
+ resolution: {integrity: sha512-1roLdgMbFU3Nr4MC97/te7w6OqxsWBkDUkpbCcvxF3jz/ku91WVaJldn/PKU8feMKNyI5W9wnqhbjb1BqbExOQ==}
+ engines: {node: '>=18'}
+ peerDependencies:
+ zod: ^3.25.76 || ^4
+
'@ai-sdk/google-vertex@2.2.24':
resolution: {integrity: sha512-zi1ZN6jQEBRke/WMbZv0YkeqQ3nOs8ihxjVh/8z1tUn+S1xgRaYXf4+r6+Izh2YqVHIMNwjhUYryQRBGq20cgQ==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.0.0
+ '@ai-sdk/google@1.2.19':
+ resolution: {integrity: sha512-Xgl6eftIRQ4srUdCzxM112JuewVMij5q4JLcNmHcB68Bxn9dpr3MVUSPlJwmameuiQuISIA8lMB+iRiRbFsaqA==}
+ engines: {node: '>=18'}
+ peerDependencies:
+ zod: ^3.0.0
+
+ '@ai-sdk/google@1.2.22':
+ resolution: {integrity: sha512-Ppxu3DIieF1G9pyQ5O1Z646GYR0gkC57YdBqXJ82qvCdhEhZHu0TWhmnOoeIWe2olSbuDeoOY+MfJrW8dzS3Hw==}
+ engines: {node: '>=18'}
+ peerDependencies:
+ zod: ^3.0.0
+
+ '@ai-sdk/google@2.0.3':
+ resolution: {integrity: sha512-rQkY4nQGZnQw6pfftqsGsXXAqkGnl1gYjSbvD8+zyrVg2KAYeJU0NxIaacLWCORNcW+yQ/cqTpK/Uku/SIdEqA==}
+ engines: {node: '>=18'}
+ peerDependencies:
+ zod: ^3.25.76 || ^4
+
'@ai-sdk/openai-compatible@0.2.11':
resolution: {integrity: sha512-56U0uNCcFTygA4h6R/uREv8r5sKA3/pGkpIAnMOpRzs5wiARlTYakWW3LZgxg6D4Gpeswo4gwNJczB7nM0K1Qg==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.0.0
+ '@ai-sdk/openai-compatible@1.0.2':
+ resolution: {integrity: sha512-VqTEzo1ueUsS9FGHtyoAHK11nPIgtziwwGGy5qtGOs+JRcZVEdPqcWe1n2+Ichl4edchoAHo/tygAymaiom9mg==}
+ engines: {node: '>=18'}
+ peerDependencies:
+ zod: ^3.25.76 || ^4
+
'@ai-sdk/openai@1.3.6':
resolution: {integrity: sha512-Lyp6W6dg+ERMJru3DI8/pWAjXLB0GbMMlXh4jxA3mVny8CJHlCAjlEJRuAdLg1/CFz4J1UDN2/4qBnIWtLFIqw==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.0.0
+ '@ai-sdk/openai@2.0.5':
+ resolution: {integrity: sha512-1oFXNudUNRfl4QXlE2Q0v8GCvGngx8HMwHN6pyOTMBP8SI9VoOcCJzRPVBMLd0SI7dkcAvGVkpSVTnaLaXEtxQ==}
+ engines: {node: '>=18'}
+ peerDependencies:
+ zod: ^3.25.76 || ^4
+
'@ai-sdk/provider-utils@1.0.22':
resolution: {integrity: sha512-YHK2rpj++wnLVc9vPGzGFP3Pjeld2MwhKinetA0zKXOoHAT/Jit5O8kZsxcSlJPu9wvcGT1UGZEjZrtO7PfFOQ==}
engines: {node: '>=18'}
@@ -629,12 +670,6 @@ packages:
peerDependencies:
zod: ^3.23.8
- '@ai-sdk/provider-utils@2.2.4':
- resolution: {integrity: sha512-13sEGBxB6kgaMPGOgCLYibF6r8iv8mgjhuToFrOTU09bBxbFQd8ZoARarCfJN6VomCUbUvMKwjTBLb1vQnN+WA==}
- engines: {node: '>=18'}
- peerDependencies:
- zod: ^3.23.8
-
'@ai-sdk/provider-utils@2.2.7':
resolution: {integrity: sha512-kM0xS3GWg3aMChh9zfeM+80vEZfXzR3JEUBdycZLtbRZ2TRT8xOj3WodGHPb06sUK5yD7pAXC/P7ctsi2fvUGQ==}
engines: {node: '>=18'}
@@ -647,6 +682,12 @@ packages:
peerDependencies:
zod: ^3.23.8
+ '@ai-sdk/provider-utils@3.0.1':
+ resolution: {integrity: sha512-/iP1sKc6UdJgGH98OCly7sWJKv+J9G47PnTjIj40IJMUQKwDrUMyf7zOOfRtPwSuNifYhSoJQ4s1WltI65gJ/g==}
+ engines: {node: '>=18'}
+ peerDependencies:
+ zod: ^3.25.76 || ^4
+
'@ai-sdk/provider@0.0.26':
resolution: {integrity: sha512-dQkfBDs2lTYpKM8389oopPdQgIU007GQyCbuPPrV+K6MtSII3HBfE0stUIMXUb44L+LK1t6GXPP7wjSzjO6uKg==}
engines: {node: '>=18'}
@@ -659,6 +700,10 @@ packages:
resolution: {integrity: sha512-qZMxYJ0qqX/RfnuIaab+zp8UAeJn/ygXXAffR5I4N0n1IrvA6qBsjc8hXLmBiMV2zoXlifkacF7sEFnYnjBcqg==}
engines: {node: '>=18'}
+ '@ai-sdk/provider@2.0.0':
+ resolution: {integrity: sha512-6o7Y2SeO9vFKB8lArHXehNuusnpddKPk7xqL7T2/b+OvXMRIXUO1rR4wcv1hAFUAT9avGZshty3Wlua/XA7TvA==}
+ engines: {node: '>=18'}
+
'@ai-sdk/react@0.0.70':
resolution: {integrity: sha512-GnwbtjW4/4z7MleLiW+TOZC2M29eCg1tOUpuEiYFMmFNZK8mkrqM0PFZMo6UsYeUYMWqEOOcPOU9OQVJMJh7IQ==}
engines: {node: '>=18'}
@@ -671,12 +716,12 @@ packages:
zod:
optional: true
- '@ai-sdk/react@1.2.6':
- resolution: {integrity: sha512-5BFChNbcYtcY9MBStcDev7WZRHf0NpTrk8yfSoedWctB3jfWkFd1HECBvdc8w3mUQshF2MumLHtAhRO7IFtGGQ==}
+ '@ai-sdk/react@2.0.8':
+ resolution: {integrity: sha512-I9CI53lncv0nUy3TPW0lrnoHCZx4NUaAuFCgm6U0bId0PQKeYi61Z9XFrgo3mDByH2SSC3OVsPq6d5Db8kF1FQ==}
engines: {node: '>=18'}
peerDependencies:
react: ^18 || ^19 || ^19.0.0-rc
- zod: ^3.23.8
+ zod: ^3.25.76 || ^4
peerDependenciesMeta:
zod:
optional: true
@@ -708,12 +753,6 @@ packages:
zod:
optional: true
- '@ai-sdk/ui-utils@1.2.5':
- resolution: {integrity: sha512-XDgqnJcaCkDez7qolvk+PDbs/ceJvgkNkxkOlc9uDWqxfDJxtvCZ+14MP/1qr4IBwGIgKVHzMDYDXvqVhSWLzg==}
- engines: {node: '>=18'}
- peerDependencies:
- zod: ^3.23.8
-
'@ai-sdk/vue@0.0.59':
resolution: {integrity: sha512-+ofYlnqdc8c4F6tM0IKF0+7NagZRAiqBJpGDJ+6EYhDW8FHLUP/JFBgu32SjxSxC6IKFZxEnl68ZoP/Z38EMlw==}
engines: {node: '>=18'}
@@ -729,6 +768,12 @@ packages:
peerDependencies:
zod: ^3.0.0
+ '@ai-sdk/xai@2.0.2':
+ resolution: {integrity: sha512-4P10qVSUj1/Ufem3UHNLExW8hHxGO+aYBDs3vQLmOXJyuCWnk2lsefOJz3XNvNpW0Avavl0A4rJyHS9W9gGglQ==}
+ engines: {node: '>=18'}
+ peerDependencies:
+ zod: ^3.25.76 || ^4
+
'@alloc/quick-lru@5.2.0':
resolution: {integrity: sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==}
engines: {node: '>=10'}
@@ -4003,6 +4048,9 @@ packages:
engines: {node: '>=8.10'}
hasBin: true
+ '@standard-schema/spec@1.0.0':
+ resolution: {integrity: sha512-m2bOd0f2RT9k8QJx1JN85cZYyH1RqFBdlwtkSlf4tBDYLCiiZnv1fIIwacK6cqwXavOydf0NPToMQgpKq+dVlA==}
+
'@stylistic/eslint-plugin-ts@2.13.0':
resolution: {integrity: sha512-nooe1oTwz60T4wQhZ+5u0/GAu3ygkKF9vPPZeRn/meG71ntQ0EZXVOKEonluAYl/+CV2T+nN0dknHa4evAW13Q==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
@@ -4530,15 +4578,11 @@ packages:
zod:
optional: true
- ai@4.3.2:
- resolution: {integrity: sha512-h643SfhKil0Pnxk2tVIazFDL1JevutUghvc3mOpWqJFMcudmgtwQYlvxCkwSfljrrq+qIfne8d6jCihMMhM7pw==}
+ ai@5.0.8:
+ resolution: {integrity: sha512-qbnhj046UvG30V1S5WhjBn+RBGEAmi8PSZWqMhRsE3EPxvO5BcePXTZFA23e9MYyWS9zr4Vm8Mv3wQXwLmtIBw==}
engines: {node: '>=18'}
peerDependencies:
- react: ^18 || ^19 || ^19.0.0-rc
- zod: ^3.23.8
- peerDependenciesMeta:
- react:
- optional: true
+ zod: ^3.25.76 || ^4
ajv@6.12.6:
resolution: {integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==}
@@ -5807,6 +5851,10 @@ packages:
resolution: {integrity: sha512-v0eOBUbiaFojBu2s2NPBfYUoRR9GjcDNvCXVaqEf5vVfpIAh9f8RCo4vXTP8c63QRKCFwoLpMpTdPwwhEKVgzA==}
engines: {node: '>=14.18'}
+ eventsource-parser@3.0.3:
+ resolution: {integrity: sha512-nVpZkTMM9rF6AQ9gPJpFsNAMt48wIzB5TQgiTLdHiuO8XEDhUgZEhqKlZWXbIzo9VmJ/HvysHqEaVeD5v9TPvA==}
+ engines: {node: '>=20.0.0'}
+
evp_bytestokey@1.0.3:
resolution: {integrity: sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==}
@@ -8702,11 +8750,6 @@ packages:
resolution: {integrity: sha512-iOa9WmfNG95lSOSJdMhdjJ4Afok7IRAQYXpbnxhd5EINnXseG0GVa9j6WPght4eX78XfFez45Fi+uRglGKPV/Q==}
engines: {node: '>=18'}
- swr@2.3.2:
- resolution: {integrity: sha512-RosxFpiabojs75IwQ316DGoDRmOqtiAj0tg8wCcbEu4CiLZBs/a9QNtHV7TUfDXmmlgqij/NqzKq/eLelyv9xA==}
- peerDependencies:
- react: ^16.11.0 || ^17.0.0 || ^18.0.0 || ^19.0.0
-
swr@2.3.4:
resolution: {integrity: sha512-bYd2lrhc+VarcpkgWclcUi92wYCpOgMws9Sd1hG1ntAu0NEy+14CbotuFjshBU2kt9rYj9TSmDcybpxpeTU1fg==}
peerDependencies:
@@ -9570,95 +9613,138 @@ packages:
zod@3.22.3:
resolution: {integrity: sha512-EjIevzuJRiRPbVH4mGc8nApb/lVLKVpmUhAaR5R5doKGfAnGJ6Gr3CViAVjP+4FWSxCsybeWQdcgCtbX+7oZug==}
- zod@3.24.1:
- resolution: {integrity: sha512-muH7gBL9sI1nciMZV67X5fTKKBLtwpZ5VBp1vsOQzj1MhrBZ4wlVCm3gedKZWLp0Oyel8sIGfeiz54Su+OVT+A==}
+ zod@3.25.76:
+ resolution: {integrity: sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==}
zwitch@2.0.4:
resolution: {integrity: sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==}
snapshots:
- '@ai-sdk/amazon-bedrock@2.2.9(zod@3.24.1)':
+ '@ai-sdk/amazon-bedrock@2.2.9(zod@3.25.76)':
dependencies:
'@ai-sdk/provider': 1.1.3
- '@ai-sdk/provider-utils': 2.2.8(zod@3.24.1)
+ '@ai-sdk/provider-utils': 2.2.8(zod@3.25.76)
'@smithy/eventstream-codec': 4.0.2
'@smithy/util-utf8': 4.0.0
aws4fetch: 1.0.20
- zod: 3.24.1
+ zod: 3.25.76
- '@ai-sdk/anthropic@1.2.12(zod@3.24.1)':
+ '@ai-sdk/anthropic@1.2.12(zod@3.25.76)':
dependencies:
'@ai-sdk/provider': 1.1.3
- '@ai-sdk/provider-utils': 2.2.8(zod@3.24.1)
- zod: 3.24.1
+ '@ai-sdk/provider-utils': 2.2.8(zod@3.25.76)
+ zod: 3.25.76
- '@ai-sdk/anthropic@1.2.4(zod@3.24.1)':
+ '@ai-sdk/anthropic@1.2.4(zod@3.25.76)':
dependencies:
'@ai-sdk/provider': 1.1.0
- '@ai-sdk/provider-utils': 2.2.3(zod@3.24.1)
- zod: 3.24.1
+ '@ai-sdk/provider-utils': 2.2.3(zod@3.25.76)
+ zod: 3.25.76
+
+ '@ai-sdk/anthropic@2.0.1(zod@3.25.76)':
+ dependencies:
+ '@ai-sdk/provider': 2.0.0
+ '@ai-sdk/provider-utils': 3.0.1(zod@3.25.76)
+ zod: 3.25.76
+
+ '@ai-sdk/gateway@1.0.4(zod@3.25.76)':
+ dependencies:
+ '@ai-sdk/provider': 2.0.0
+ '@ai-sdk/provider-utils': 3.0.1(zod@3.25.76)
+ zod: 3.25.76
- '@ai-sdk/google-vertex@2.2.24(zod@3.24.1)':
+ '@ai-sdk/google-vertex@2.2.24(zod@3.25.76)':
dependencies:
- '@ai-sdk/anthropic': 1.2.12(zod@3.24.1)
- '@ai-sdk/google': '@convex-dev/ai-sdk-google@1.2.17(zod@3.24.1)'
+ '@ai-sdk/anthropic': 1.2.12(zod@3.25.76)
+ '@ai-sdk/google': 1.2.19(zod@3.25.76)
'@ai-sdk/provider': 1.1.3
- '@ai-sdk/provider-utils': 2.2.8(zod@3.24.1)
+ '@ai-sdk/provider-utils': 2.2.8(zod@3.25.76)
google-auth-library: 9.15.1
- zod: 3.24.1
+ zod: 3.25.76
transitivePeerDependencies:
- encoding
- supports-color
- '@ai-sdk/openai-compatible@0.2.11(zod@3.24.1)':
+ '@ai-sdk/google@1.2.19(zod@3.25.76)':
+ dependencies:
+ '@ai-sdk/provider': 1.1.3
+ '@ai-sdk/provider-utils': 2.2.8(zod@3.25.76)
+ zod: 3.25.76
+
+ '@ai-sdk/google@1.2.22(zod@3.25.76)':
dependencies:
'@ai-sdk/provider': 1.1.3
- '@ai-sdk/provider-utils': 2.2.7(zod@3.24.1)
- zod: 3.24.1
+ '@ai-sdk/provider-utils': 2.2.8(zod@3.25.76)
+ zod: 3.25.76
- '@ai-sdk/openai@1.3.6(zod@3.24.1)':
+ '@ai-sdk/google@2.0.3(zod@3.25.76)':
+ dependencies:
+ '@ai-sdk/provider': 2.0.0
+ '@ai-sdk/provider-utils': 3.0.1(zod@3.25.76)
+ zod: 3.25.76
+
+ '@ai-sdk/openai-compatible@0.2.11(zod@3.25.76)':
+ dependencies:
+ '@ai-sdk/provider': 1.1.3
+ '@ai-sdk/provider-utils': 2.2.7(zod@3.25.76)
+ zod: 3.25.76
+
+ '@ai-sdk/openai-compatible@1.0.2(zod@3.25.76)':
+ dependencies:
+ '@ai-sdk/provider': 2.0.0
+ '@ai-sdk/provider-utils': 3.0.1(zod@3.25.76)
+ zod: 3.25.76
+
+ '@ai-sdk/openai@1.3.6(zod@3.25.76)':
dependencies:
'@ai-sdk/provider': 1.1.0
- '@ai-sdk/provider-utils': 2.2.3(zod@3.24.1)
- zod: 3.24.1
+ '@ai-sdk/provider-utils': 2.2.3(zod@3.25.76)
+ zod: 3.25.76
+
+ '@ai-sdk/openai@2.0.5(zod@3.25.76)':
+ dependencies:
+ '@ai-sdk/provider': 2.0.0
+ '@ai-sdk/provider-utils': 3.0.1(zod@3.25.76)
+ zod: 3.25.76
- '@ai-sdk/provider-utils@1.0.22(zod@3.24.1)':
+ '@ai-sdk/provider-utils@1.0.22(zod@3.25.76)':
dependencies:
'@ai-sdk/provider': 0.0.26
eventsource-parser: 1.1.2
nanoid: 3.3.8
secure-json-parse: 2.7.0
optionalDependencies:
- zod: 3.24.1
+ zod: 3.25.76
- '@ai-sdk/provider-utils@2.2.3(zod@3.24.1)':
+ '@ai-sdk/provider-utils@2.2.3(zod@3.25.76)':
dependencies:
'@ai-sdk/provider': 1.1.0
nanoid: 3.3.8
secure-json-parse: 2.7.0
- zod: 3.24.1
+ zod: 3.25.76
- '@ai-sdk/provider-utils@2.2.4(zod@3.24.1)':
+ '@ai-sdk/provider-utils@2.2.7(zod@3.25.76)':
dependencies:
- '@ai-sdk/provider': 1.1.0
+ '@ai-sdk/provider': 1.1.3
nanoid: 3.3.8
secure-json-parse: 2.7.0
- zod: 3.24.1
+ zod: 3.25.76
- '@ai-sdk/provider-utils@2.2.7(zod@3.24.1)':
+ '@ai-sdk/provider-utils@2.2.8(zod@3.25.76)':
dependencies:
'@ai-sdk/provider': 1.1.3
nanoid: 3.3.8
secure-json-parse: 2.7.0
- zod: 3.24.1
+ zod: 3.25.76
- '@ai-sdk/provider-utils@2.2.8(zod@3.24.1)':
+ '@ai-sdk/provider-utils@3.0.1(zod@3.25.76)':
dependencies:
- '@ai-sdk/provider': 1.1.3
- nanoid: 3.3.8
- secure-json-parse: 2.7.0
- zod: 3.24.1
+ '@ai-sdk/provider': 2.0.0
+ '@standard-schema/spec': 1.0.0
+ eventsource-parser: 3.0.3
+ zod: 3.25.76
+ zod-to-json-schema: 3.24.1(zod@3.25.76)
'@ai-sdk/provider@0.0.26':
dependencies:
@@ -9672,76 +9758,80 @@ snapshots:
dependencies:
json-schema: 0.4.0
- '@ai-sdk/react@0.0.70(react@18.3.1)(zod@3.24.1)':
+ '@ai-sdk/provider@2.0.0':
dependencies:
- '@ai-sdk/provider-utils': 1.0.22(zod@3.24.1)
- '@ai-sdk/ui-utils': 0.0.50(zod@3.24.1)
- swr: 2.3.2(react@18.3.1)
+ json-schema: 0.4.0
+
+ '@ai-sdk/react@0.0.70(react@18.3.1)(zod@3.25.76)':
+ dependencies:
+ '@ai-sdk/provider-utils': 1.0.22(zod@3.25.76)
+ '@ai-sdk/ui-utils': 0.0.50(zod@3.25.76)
+ swr: 2.3.4(react@18.3.1)
throttleit: 2.1.0
optionalDependencies:
react: 18.3.1
- zod: 3.24.1
+ zod: 3.25.76
- '@ai-sdk/react@1.2.6(react@18.3.1)(zod@3.24.1)':
+ '@ai-sdk/react@2.0.8(react@18.3.1)(zod@3.25.76)':
dependencies:
- '@ai-sdk/provider-utils': 2.2.4(zod@3.24.1)
- '@ai-sdk/ui-utils': 1.2.5(zod@3.24.1)
+ '@ai-sdk/provider-utils': 3.0.1(zod@3.25.76)
+ ai: 5.0.8(zod@3.25.76)
react: 18.3.1
swr: 2.3.4(react@18.3.1)
throttleit: 2.1.0
optionalDependencies:
- zod: 3.24.1
+ zod: 3.25.76
- '@ai-sdk/solid@0.0.54(zod@3.24.1)':
+ '@ai-sdk/solid@0.0.54(zod@3.25.76)':
dependencies:
- '@ai-sdk/provider-utils': 1.0.22(zod@3.24.1)
- '@ai-sdk/ui-utils': 0.0.50(zod@3.24.1)
+ '@ai-sdk/provider-utils': 1.0.22(zod@3.25.76)
+ '@ai-sdk/ui-utils': 0.0.50(zod@3.25.76)
transitivePeerDependencies:
- zod
- '@ai-sdk/svelte@0.0.57(svelte@5.28.1)(zod@3.24.1)':
+ '@ai-sdk/svelte@0.0.57(svelte@5.28.1)(zod@3.25.76)':
dependencies:
- '@ai-sdk/provider-utils': 1.0.22(zod@3.24.1)
- '@ai-sdk/ui-utils': 0.0.50(zod@3.24.1)
+ '@ai-sdk/provider-utils': 1.0.22(zod@3.25.76)
+ '@ai-sdk/ui-utils': 0.0.50(zod@3.25.76)
sswr: 2.2.0(svelte@5.28.1)
optionalDependencies:
svelte: 5.28.1
transitivePeerDependencies:
- zod
- '@ai-sdk/ui-utils@0.0.50(zod@3.24.1)':
+ '@ai-sdk/ui-utils@0.0.50(zod@3.25.76)':
dependencies:
'@ai-sdk/provider': 0.0.26
- '@ai-sdk/provider-utils': 1.0.22(zod@3.24.1)
+ '@ai-sdk/provider-utils': 1.0.22(zod@3.25.76)
json-schema: 0.4.0
secure-json-parse: 2.7.0
- zod-to-json-schema: 3.24.1(zod@3.24.1)
+ zod-to-json-schema: 3.24.1(zod@3.25.76)
optionalDependencies:
- zod: 3.24.1
-
- '@ai-sdk/ui-utils@1.2.5(zod@3.24.1)':
- dependencies:
- '@ai-sdk/provider': 1.1.0
- '@ai-sdk/provider-utils': 2.2.4(zod@3.24.1)
- zod: 3.24.1
- zod-to-json-schema: 3.24.1(zod@3.24.1)
+ zod: 3.25.76
- '@ai-sdk/vue@0.0.59(vue@3.5.13(typescript@5.8.3))(zod@3.24.1)':
+ '@ai-sdk/vue@0.0.59(vue@3.5.13(typescript@5.8.3))(zod@3.25.76)':
dependencies:
- '@ai-sdk/provider-utils': 1.0.22(zod@3.24.1)
- '@ai-sdk/ui-utils': 0.0.50(zod@3.24.1)
+ '@ai-sdk/provider-utils': 1.0.22(zod@3.25.76)
+ '@ai-sdk/ui-utils': 0.0.50(zod@3.25.76)
swrv: 1.1.0(vue@3.5.13(typescript@5.8.3))
optionalDependencies:
vue: 3.5.13(typescript@5.8.3)
transitivePeerDependencies:
- zod
- '@ai-sdk/xai@1.2.13(zod@3.24.1)':
+ '@ai-sdk/xai@1.2.13(zod@3.25.76)':
dependencies:
- '@ai-sdk/openai-compatible': 0.2.11(zod@3.24.1)
+ '@ai-sdk/openai-compatible': 0.2.11(zod@3.25.76)
'@ai-sdk/provider': 1.1.3
- '@ai-sdk/provider-utils': 2.2.7(zod@3.24.1)
- zod: 3.24.1
+ '@ai-sdk/provider-utils': 2.2.7(zod@3.25.76)
+ zod: 3.25.76
+
+ '@ai-sdk/xai@2.0.2(zod@3.25.76)':
+ dependencies:
+ '@ai-sdk/openai-compatible': 1.0.2(zod@3.25.76)
+ '@ai-sdk/provider': 2.0.0
+ '@ai-sdk/provider-utils': 3.0.1(zod@3.25.76)
+ zod: 3.25.76
'@alloc/quick-lru@5.2.0': {}
@@ -9758,10 +9848,10 @@ snapshots:
'@csstools/css-tokenizer': 3.0.3
lru-cache: 10.4.3
- '@asteasolutions/zod-to-openapi@6.4.0(zod@3.24.1)':
+ '@asteasolutions/zod-to-openapi@6.4.0(zod@3.25.76)':
dependencies:
openapi3-ts: 4.4.0
- zod: 3.24.1
+ zod: 3.25.76
'@auth0/auth0-react@2.3.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
dependencies:
@@ -10407,9 +10497,9 @@ snapshots:
'@braintrust/core@0.0.84':
dependencies:
- '@asteasolutions/zod-to-openapi': 6.4.0(zod@3.24.1)
+ '@asteasolutions/zod-to-openapi': 6.4.0(zod@3.25.76)
uuid: 9.0.1
- zod: 3.24.1
+ zod: 3.25.76
'@bufbuild/protobuf@2.2.5':
optional: true
@@ -10561,11 +10651,11 @@ snapshots:
style-mod: 4.1.2
w3c-keyname: 2.2.8
- '@convex-dev/ai-sdk-google@1.2.17(zod@3.24.1)':
+ '@convex-dev/ai-sdk-google@1.2.17(zod@3.25.76)':
dependencies:
'@ai-sdk/provider': 1.1.3
- '@ai-sdk/provider-utils': 2.2.8(zod@3.24.1)
- zod: 3.24.1
+ '@ai-sdk/provider-utils': 2.2.8(zod@3.25.76)
+ zod: 3.25.76
'@convex-dev/design-system@0.1.11(@popperjs/core@2.11.8)(@radix-ui/react-icons@1.3.2(react@18.3.1))(@tailwindcss/forms@0.5.10(tailwindcss@3.4.17(ts-node@10.9.2(@types/node@22.14.0)(typescript@5.7.3))))(@types/react-dom@18.3.6(@types/react@18.3.20))(@types/react@18.3.20)(react@18.3.1)(tailwind-scrollbar@3.0.3(tailwindcss@3.4.17(ts-node@10.9.2(@types/node@22.14.0)(typescript@5.7.3))))(tailwindcss@3.4.17(ts-node@10.9.2(@types/node@22.14.0)(typescript@5.7.3)))':
dependencies:
@@ -13203,6 +13293,8 @@ snapshots:
ignore: 5.3.2
p-map: 4.0.0
+ '@standard-schema/spec@1.0.0': {}
+
'@stylistic/eslint-plugin-ts@2.13.0(eslint@9.20.1(jiti@2.4.2))(typescript@5.7.3)':
dependencies:
'@typescript-eslint/utils': 8.24.0(eslint@9.20.1(jiti@2.4.2))(typescript@5.7.3)
@@ -13926,42 +14018,38 @@ snapshots:
clean-stack: 2.2.0
indent-string: 4.0.0
- ai@3.4.33(openai@4.93.0(ws@8.18.0)(zod@3.24.1))(react@18.3.1)(sswr@2.2.0(svelte@5.28.1))(svelte@5.28.1)(vue@3.5.13(typescript@5.8.3))(zod@3.24.1):
+ ai@3.4.33(openai@4.93.0(ws@8.18.0)(zod@3.25.76))(react@18.3.1)(sswr@2.2.0(svelte@5.28.1))(svelte@5.28.1)(vue@3.5.13(typescript@5.8.3))(zod@3.25.76):
dependencies:
'@ai-sdk/provider': 0.0.26
- '@ai-sdk/provider-utils': 1.0.22(zod@3.24.1)
- '@ai-sdk/react': 0.0.70(react@18.3.1)(zod@3.24.1)
- '@ai-sdk/solid': 0.0.54(zod@3.24.1)
- '@ai-sdk/svelte': 0.0.57(svelte@5.28.1)(zod@3.24.1)
- '@ai-sdk/ui-utils': 0.0.50(zod@3.24.1)
- '@ai-sdk/vue': 0.0.59(vue@3.5.13(typescript@5.8.3))(zod@3.24.1)
+ '@ai-sdk/provider-utils': 1.0.22(zod@3.25.76)
+ '@ai-sdk/react': 0.0.70(react@18.3.1)(zod@3.25.76)
+ '@ai-sdk/solid': 0.0.54(zod@3.25.76)
+ '@ai-sdk/svelte': 0.0.57(svelte@5.28.1)(zod@3.25.76)
+ '@ai-sdk/ui-utils': 0.0.50(zod@3.25.76)
+ '@ai-sdk/vue': 0.0.59(vue@3.5.13(typescript@5.8.3))(zod@3.25.76)
'@opentelemetry/api': 1.9.0
eventsource-parser: 1.1.2
json-schema: 0.4.0
jsondiffpatch: 0.6.0
secure-json-parse: 2.7.0
- zod-to-json-schema: 3.24.1(zod@3.24.1)
+ zod-to-json-schema: 3.24.1(zod@3.25.76)
optionalDependencies:
- openai: 4.93.0(ws@8.18.0)(zod@3.24.1)
+ openai: 4.93.0(ws@8.18.0)(zod@3.25.76)
react: 18.3.1
sswr: 2.2.0(svelte@5.28.1)
svelte: 5.28.1
- zod: 3.24.1
+ zod: 3.25.76
transitivePeerDependencies:
- solid-js
- vue
- ai@4.3.2(react@18.3.1)(zod@3.24.1):
+ ai@5.0.8(zod@3.25.76):
dependencies:
- '@ai-sdk/provider': 1.1.0
- '@ai-sdk/provider-utils': 2.2.4(zod@3.24.1)
- '@ai-sdk/react': 1.2.6(react@18.3.1)(zod@3.24.1)
- '@ai-sdk/ui-utils': 1.2.5(zod@3.24.1)
+ '@ai-sdk/gateway': 1.0.4(zod@3.25.76)
+ '@ai-sdk/provider': 2.0.0
+ '@ai-sdk/provider-utils': 3.0.1(zod@3.25.76)
'@opentelemetry/api': 1.9.0
- jsondiffpatch: 0.6.0
- zod: 3.24.1
- optionalDependencies:
- react: 18.3.1
+ zod: 3.25.76
ajv@6.12.6:
dependencies:
@@ -14202,13 +14290,13 @@ snapshots:
dependencies:
fill-range: 7.1.1
- braintrust@0.0.199(@aws-sdk/credential-provider-web-identity@3.782.0)(openai@4.93.0(ws@8.18.0)(zod@3.24.1))(react@18.3.1)(sswr@2.2.0(svelte@5.28.1))(svelte@5.28.1)(vue@3.5.13(typescript@5.8.3))(zod@3.24.1):
+ braintrust@0.0.199(@aws-sdk/credential-provider-web-identity@3.782.0)(openai@4.93.0(ws@8.18.0)(zod@3.25.76))(react@18.3.1)(sswr@2.2.0(svelte@5.28.1))(svelte@5.28.1)(vue@3.5.13(typescript@5.8.3))(zod@3.25.76):
dependencies:
'@ai-sdk/provider': 1.1.3
'@braintrust/core': 0.0.84
'@next/env': 14.2.28
'@vercel/functions': 1.6.0(@aws-sdk/credential-provider-web-identity@3.782.0)
- ai: 3.4.33(openai@4.93.0(ws@8.18.0)(zod@3.24.1))(react@18.3.1)(sswr@2.2.0(svelte@5.28.1))(svelte@5.28.1)(vue@3.5.13(typescript@5.8.3))(zod@3.24.1)
+ ai: 3.4.33(openai@4.93.0(ws@8.18.0)(zod@3.25.76))(react@18.3.1)(sswr@2.2.0(svelte@5.28.1))(svelte@5.28.1)(vue@3.5.13(typescript@5.8.3))(zod@3.25.76)
argparse: 2.0.1
chalk: 4.1.2
cli-progress: 3.12.0
@@ -14223,8 +14311,8 @@ snapshots:
slugify: 1.6.6
source-map: 0.7.4
uuid: 9.0.1
- zod: 3.24.1
- zod-to-json-schema: 3.24.1(zod@3.24.1)
+ zod: 3.25.76
+ zod-to-json-schema: 3.24.1(zod@3.25.76)
transitivePeerDependencies:
- '@aws-sdk/credential-provider-web-identity'
- openai
@@ -14547,13 +14635,13 @@ snapshots:
convert-source-map@2.0.0: {}
- convex-helpers@0.1.79(convex@1.25.0-alpha.3(@auth0/auth0-react@2.3.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1))(react@18.3.1)(typescript@5.7.3)(zod@3.24.1):
+ convex-helpers@0.1.79(convex@1.25.0-alpha.3(@auth0/auth0-react@2.3.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1))(react@18.3.1)(typescript@5.7.3)(zod@3.25.76):
dependencies:
convex: 1.25.0-alpha.3(@auth0/auth0-react@2.3.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)
optionalDependencies:
react: 18.3.1
typescript: 5.7.3
- zod: 3.24.1
+ zod: 3.25.76
convex-test@0.0.36(convex@1.25.0-alpha.3(@auth0/auth0-react@2.3.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)):
dependencies:
@@ -15489,6 +15577,8 @@ snapshots:
eventsource-parser@1.1.2: {}
+ eventsource-parser@3.0.3: {}
+
evp_bytestokey@1.0.3:
dependencies:
md5.js: 1.3.5
@@ -16516,8 +16606,8 @@ snapshots:
strip-json-comments: 5.0.1
summary: 2.1.0
typescript: 5.7.3
- zod: 3.24.1
- zod-validation-error: 3.4.0(zod@3.24.1)
+ zod: 3.25.76
+ zod-validation-error: 3.4.0(zod@3.25.76)
launchdarkly-js-client-sdk@3.5.0:
dependencies:
@@ -17653,7 +17743,7 @@ snapshots:
regex: 5.1.1
regex-recursion: 5.1.1
- openai@4.93.0(ws@8.18.0)(zod@3.24.1):
+ openai@4.93.0(ws@8.18.0)(zod@3.25.76):
dependencies:
'@types/node': 18.19.86
'@types/node-fetch': 2.6.12
@@ -17664,7 +17754,7 @@ snapshots:
node-fetch: 2.7.0
optionalDependencies:
ws: 8.18.0
- zod: 3.24.1
+ zod: 3.25.76
transitivePeerDependencies:
- encoding
@@ -18419,7 +18509,7 @@ snapshots:
react: 18.3.1
react-dom: 18.3.1(react@18.3.1)
- remix-utils@7.7.0(@remix-run/node@2.15.3(typescript@5.7.3))(@remix-run/react@2.15.3(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.7.3))(@remix-run/router@1.22.0)(react@18.3.1)(zod@3.24.1):
+ remix-utils@7.7.0(@remix-run/node@2.15.3(typescript@5.7.3))(@remix-run/react@2.15.3(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.7.3))(@remix-run/router@1.22.0)(react@18.3.1)(zod@3.25.76):
dependencies:
type-fest: 4.34.1
optionalDependencies:
@@ -18427,7 +18517,7 @@ snapshots:
'@remix-run/react': 2.15.3(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.7.3)
'@remix-run/router': 1.22.0
react: 18.3.1
- zod: 3.24.1
+ zod: 3.25.76
require-directory@2.1.1: {}
@@ -19108,12 +19198,6 @@ snapshots:
magic-string: 0.30.17
zimmerframe: 1.1.2
- swr@2.3.2(react@18.3.1):
- dependencies:
- dequal: 2.0.3
- react: 18.3.1
- use-sync-external-store: 1.4.0(react@18.3.1)
-
swr@2.3.4(react@18.3.1):
dependencies:
dequal: 2.0.3
@@ -20116,17 +20200,17 @@ snapshots:
zimmerframe@1.1.2: {}
- zod-to-json-schema@3.24.1(zod@3.24.1):
+ zod-to-json-schema@3.24.1(zod@3.25.76):
dependencies:
- zod: 3.24.1
+ zod: 3.25.76
- zod-validation-error@3.4.0(zod@3.24.1):
+ zod-validation-error@3.4.0(zod@3.25.76):
dependencies:
- zod: 3.24.1
+ zod: 3.25.76
zod@3.22.3:
optional: true
- zod@3.24.1: {}
+ zod@3.25.76: {}
zwitch@2.0.4: {}
diff --git a/test-kitchen/chefTask.ts b/test-kitchen/chefTask.ts
index 471d622be..c71a831c9 100644
--- a/test-kitchen/chefTask.ts
+++ b/test-kitchen/chefTask.ts
@@ -1,4 +1,4 @@
-import { CoreMessage, generateText, LanguageModelUsage } from 'ai';
+import { ModelMessage, generateText, LanguageModelUsage } from 'ai';
import * as walkdir from 'walkdir';
import { path } from 'chef-agent/utils/path';
import { ChefResult, ChefModel } from './types';
@@ -120,8 +120,8 @@ export async function chefTask(model: ChefModel, outputDir: string, userMessage:
let success: boolean;
let lastDeploySuccess = false;
let totalUsage: LanguageModelUsage = {
- promptTokens: 0,
- completionTokens: 0,
+ inputTokens: 0,
+ outputTokens: 0,
totalTokens: 0,
};
while (true) {
@@ -150,19 +150,19 @@ export async function chefTask(model: ChefModel, outputDir: string, userMessage:
logger.info('Generating...');
const response = await invokeGenerateText(model, opts, context);
const partId = makePartId(assistantMessage.id, assistantMessage.parts.length);
- assistantMessage.content += response.text;
- if (response.text) {
+ assistantMessage.content += response.text.text;
+ if (response.text.text) {
assistantMessage.parts.push({
type: 'text',
- text: response.text,
+ text: response.text.text,
});
}
- const parsed = messageParser.parse(partId, response.text);
+ const parsed = messageParser.parse(partId, response.text.text);
logger.info(
`Time taken: ${performance.now() - start}ms\nUsage: ${JSON.stringify(response.usage)}\nMessage: ${parsed}`,
);
- totalUsage.promptTokens += response.usage.promptTokens;
- totalUsage.completionTokens += response.usage.completionTokens;
+ totalUsage.inputTokens += response.usage.inputTokens;
+ totalUsage.outputTokens += response.usage.outputTokens;
totalUsage.totalTokens += response.usage.totalTokens;
if (response.finishReason == 'stop') {
success = lastDeploySuccess;
@@ -391,7 +391,7 @@ const installDependencies = wrapTraced(async function installDependencies(repoDi
async function invokeGenerateText(model: ChefModel, opts: SystemPromptOptions, context: UIMessage[]) {
return traced(
async (span) => {
- const messages: CoreMessage[] = [
+ const messages: ModelMessage[] = [
{
role: 'system',
content: ROLE_SYSTEM_PROMPT,
@@ -415,7 +415,7 @@ async function invokeGenerateText(model: ChefModel, opts: SystemPromptOptions, c
}
const result = await generateText({
model: model.ai,
- maxTokens: model.maxTokens,
+ maxOutputTokens: model.maxOutputTokens,
messages,
tools,
maxSteps: 64,
@@ -423,12 +423,12 @@ async function invokeGenerateText(model: ChefModel, opts: SystemPromptOptions, c
span.log({
input: messages,
output: {
- text: result.text,
+ text: result.text.text,
toolCalls: result.toolCalls,
},
metrics: {
- prompt_tokens: result.usage.promptTokens,
- completion_tokens: result.usage.completionTokens,
+ prompt_tokens: result.usage.inputTokens,
+ completion_tokens: result.usage.outputTokens,
total_tokens: result.usage.totalTokens,
},
metadata: {
diff --git a/test-kitchen/initialGeneration.eval.ts b/test-kitchen/initialGeneration.eval.ts
index fe47224f8..b910f0aab 100644
--- a/test-kitchen/initialGeneration.eval.ts
+++ b/test-kitchen/initialGeneration.eval.ts
@@ -47,13 +47,13 @@ if (process.env.ANTHROPIC_API_KEY) {
name: 'claude-3.5-sonnet',
model_slug: 'claude-3-5-sonnet-20241022',
ai: anthropic('claude-3-5-sonnet-20241022'),
- maxTokens: 8192,
+ maxOutputTokens: 8192,
});
chefEval({
name: 'claude-4-sonnet',
model_slug: 'claude-sonnet-4-20250514',
ai: anthropic('claude-sonnet-4-20250514'),
- maxTokens: 16384,
+ maxOutputTokens: 16384,
});
}
@@ -64,7 +64,7 @@ if (process.env.OPENAI_API_KEY && process.env.USE_OPENAI === 'true') {
name: 'gpt-4.1',
model_slug: 'gpt-4.1',
ai: openai('gpt-4.1'),
- maxTokens: 8192,
+ maxOutputTokens: 8192,
});
}
@@ -73,7 +73,7 @@ if (process.env.GOOGLE_GENERATIVE_AI_API_KEY) {
name: 'gemini-2.5-pro',
model_slug: 'gemini-2.5-pro',
ai: google('gemini-2.5-pro'),
- maxTokens: 20000,
+ maxOutputTokens: 20000,
});
}
@@ -82,6 +82,6 @@ if (process.env.XAI_API_KEY) {
name: 'grok-3-mini',
model_slug: 'grok-3-mini',
ai: xai('grok-3-mini'),
- maxTokens: 8192,
+ maxOutputTokens: 8192,
});
}
diff --git a/test-kitchen/main.ts b/test-kitchen/main.ts
index 5f067ff06..a8ea174f5 100644
--- a/test-kitchen/main.ts
+++ b/test-kitchen/main.ts
@@ -10,7 +10,7 @@ const model: ChefModel = {
name: 'claude-3.5-sonnet',
model_slug: 'claude-3-5-sonnet-20240620',
ai: anthropic('claude-3-5-sonnet-20241022'),
- maxTokens: 8192,
+ maxOutputTokens: 8192,
};
mkdirSync('/tmp/backend', { recursive: true });
const result = await chefTask(model, '/tmp/backend', 'Make me a chat app');
diff --git a/test-kitchen/types.ts b/test-kitchen/types.ts
index 0f86c0850..e82a96508 100644
--- a/test-kitchen/types.ts
+++ b/test-kitchen/types.ts
@@ -4,7 +4,7 @@ export type ChefModel = {
name: string;
model_slug: string;
ai: LanguageModelV1;
- maxTokens: number;
+ maxOutputTokens: number;
};
export type ChefResult = {