Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 18 additions & 19 deletions app/components/DebugPromptView.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import { useEffect, useCallback, useState } from 'react';
import { JsonView } from 'react-json-view-lite';
import 'react-json-view-lite/dist/index.css';
import type { CoreMessage, FilePart, ToolCallPart, TextPart } from 'ai';
import type { ModelMessage, FilePart, ToolCallPart, TextPart } from 'ai';
import { ChevronDownIcon, ChevronRightIcon } from '@heroicons/react/20/solid';
import { ClipboardIcon, ArrowTopRightOnSquareIcon } from '@heroicons/react/24/outline';
import { useDebugPrompt } from '~/lib/hooks/useDebugPrompt';
Expand Down Expand Up @@ -107,7 +107,7 @@ function isToolCallPart(part: unknown): part is ToolCallPart {
return typeof part === 'object' && part !== null && 'type' in part && part.type === 'tool-call';
}

function getMessageCharCount(message: CoreMessage): number {
function getMessageCharCount(message: ModelMessage): number {
if (typeof message.content === 'string') return message.content.length;
if (Array.isArray(message.content)) {
return message.content.reduce((sum, part) => {
Expand Down Expand Up @@ -145,7 +145,7 @@ function getPreviewClass(text: string) {
return `preview-${Math.abs(text.split('').reduce((acc, char) => acc + char.charCodeAt(0), 0))}`;
}

function findLastAssistantMessage(prompt: CoreMessage[]): string {
function findLastAssistantMessage(prompt: ModelMessage[]): string {
// The last assistant message in a LLM of messages is the response.
// It should generally just be the last message, full stop.
for (let i = prompt.length - 1; i >= 0; i--) {
Expand Down Expand Up @@ -177,16 +177,16 @@ function LlmPromptAndResponseView({ promptAndResponse }: { promptAndResponse: Ll
const { prompt, completion, finishReason, modelId } = promptAndResponse;

const [isExpanded, setIsExpanded] = useState(true);
const promptTokensTotal = promptAndResponse.usage.promptTokens;
const promptTokensTotal = promptAndResponse.usage.inputTokens;
const cachedPromptTokens = promptAndResponse.usage.cachedPromptTokens;
const outputTokens = promptAndResponse.usage.completionTokens;
const outputTokens = promptAndResponse.usage.outputTokens;
const chefTokens = promptAndResponse.chefTokens || 0;

// Calculate total characters
const totalInputChars = (prompt || []).reduce((sum, msg) => sum + getMessageCharCount(msg), 0);
const totalOutputChars = (completion || []).reduce((sum, msg) => sum + getMessageCharCount(msg), 0);

const getTokenEstimate = (message: CoreMessage) => {
const getTokenEstimate = (message: ModelMessage) => {
const charCount = getMessageCharCount(message);
return estimateTokenCount(charCount, totalInputChars, promptTokensTotal);
};
Expand Down Expand Up @@ -262,12 +262,12 @@ function LlmPromptAndResponseView({ promptAndResponse }: { promptAndResponse: Ll
}

type CoreMessageViewProps = {
message: CoreMessage;
getTokenEstimate?: (message: CoreMessage) => number;
message: ModelMessage;
getTokenEstimate?: (message: ModelMessage) => number;
totalCompletionTokens?: number;
};

function getMessagePreview(content: CoreMessage['content']): string {
function getMessagePreview(content: ModelMessage['content']): string {
if (typeof content === 'string') {
return content;
}
Expand All @@ -288,7 +288,7 @@ function getMessagePreview(content: CoreMessage['content']): string {
}

type MessageContentViewProps = {
content: CoreMessage['content'];
content: ModelMessage['content'];
showRawJson?: boolean;
};

Expand Down Expand Up @@ -410,7 +410,6 @@ function CoreMessageView({ message, getTokenEstimate, totalCompletionTokens }: C
className={`flex-1 truncate text-sm text-gray-600 dark:text-gray-300 ${getPreviewClass(preview)} before:block before:truncate`}
/>
</button>

<div>
{isExpanded && (
<div className="mt-2">
Expand Down Expand Up @@ -458,15 +457,15 @@ function groupIntoUserPrompts(data: LlmPromptAndResponse[]): AllPromptsForUserIn
if (currentGroup.length > 0) {
const totalCompletionTokens = currentGroup.reduce((sum, item) => {
const usage = item.usage;
return sum + usage.promptTokens;
return sum + usage.inputTokens;
}, 0);
const cachedCompletionTokens = currentGroup.reduce((sum, item) => {
const usage = item.usage;
return sum + usage.cachedPromptTokens;
}, 0);
const totalOutputTokens = currentGroup.reduce((sum, item) => {
const usage = item.usage;
return sum + usage.completionTokens;
return sum + usage.outputTokens;
}, 0);
const totalChefTokens = currentGroup.reduce((sum, item) => {
return sum + (item.chefTokens || 0);
Expand Down Expand Up @@ -593,16 +592,16 @@ export default function DebugAllPromptsForChat({ chatInitialId, onClose, isDebug
const totals = userPromptGroups.reduce(
(acc, group) => {
return {
promptTokens: acc.promptTokens + group.summary.totalCompletionTokens,
inputTokens: acc.inputTokens + group.summary.totalCompletionTokens,
cachedPromptTokens: acc.cachedPromptTokens + group.summary.cachedCompletionTokens,
completionTokens: acc.completionTokens + group.summary.totalOutputTokens,
outputTokens: acc.outputTokens + group.summary.totalOutputTokens,
chefTokens: acc.chefTokens + group.summary.totalChefTokens,
};
},
{
promptTokens: 0,
inputTokens: 0,
cachedPromptTokens: 0,
completionTokens: 0,
outputTokens: 0,
chefTokens: 0,
},
);
Expand Down Expand Up @@ -664,14 +663,14 @@ export default function DebugAllPromptsForChat({ chatInitialId, onClose, isDebug
<div className="mt-2 flex gap-4 text-sm text-gray-500 dark:text-gray-400">
<div>
<span className="font-semibold text-gray-900 dark:text-gray-100">
{formatNumber(totals.promptTokens - totals.cachedPromptTokens)}
{formatNumber(totals.inputTokens - totals.cachedPromptTokens)}
</span>{' '}
total prompt tokens
{totals.cachedPromptTokens ? ` (+${formatNumber(totals.cachedPromptTokens)} cached)` : ''}
</div>
<div>
<span className="font-semibold text-gray-900 dark:text-gray-100">
{formatNumber(totals.completionTokens)}
{formatNumber(totals.outputTokens)}
</span>{' '}
total completion tokens
</div>
Expand Down
28 changes: 14 additions & 14 deletions app/components/chat/AssistantMessage.tsx
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import { memo, useMemo } from 'react';
import { Markdown } from './Markdown';
import type { Message } from 'ai';
import type { UIMessage } from 'ai';
import { ToolCall } from './ToolCall';
import { makePartId, type PartId } from 'chef-agent/partId.js';
import { ExclamationTriangleIcon, DotFilledIcon } from '@radix-ui/react-icons';
Expand All @@ -10,7 +10,7 @@ import { calculateChefTokens, usageFromGeneration, type ChefTokenBreakdown } fro
import { captureMessage } from '@sentry/remix';

interface AssistantMessageProps {
message: Message;
message: UIMessage;
}

export const AssistantMessage = memo(function AssistantMessage({ message }: AssistantMessageProps) {
Expand Down Expand Up @@ -64,7 +64,7 @@ function AssistantMessagePart({
partId,
parsedAnnotations,
}: {
part: NonNullable<Message['parts']>[number];
part: NonNullable<UIMessage['parts']>[number];
showUsageAnnotations: boolean;
partId: PartId;
parsedAnnotations: ReturnType<typeof parseAnnotations>;
Expand Down Expand Up @@ -138,7 +138,7 @@ function displayChefTokenNumber(num: number) {
function displayUsage(usageAnnotation: UsageAnnotation, provider: ProviderType, showUsageAnnotations: boolean) {
const usage: Usage = usageFromGeneration({
usage: usageAnnotation,
providerMetadata: usageAnnotation.providerMetadata,
providerOptions: usageAnnotation.providerOptions,
});
const { chefTokens, breakdown } = calculateChefTokens(usage, provider);
return (
Expand All @@ -151,20 +151,20 @@ function displayUsage(usageAnnotation: UsageAnnotation, provider: ProviderType,

function displayBreakdownForSingleAnnotation(breakdown: ChefTokenBreakdown) {
// A single annotation should always have a single provider.
if (breakdown.completionTokens.anthropic > 0) {
return `${displayChefTokenNumber(breakdown.promptTokens.anthropic.uncached)} uncached, ${displayChefTokenNumber(breakdown.promptTokens.anthropic.cached)} cached, ${displayChefTokenNumber(breakdown.completionTokens.anthropic)} completion`;
if (breakdown.outputTokens.anthropic > 0) {
return `${displayChefTokenNumber(breakdown.inputTokens.anthropic.uncached)} uncached, ${displayChefTokenNumber(breakdown.inputTokens.anthropic.cached)} cached, ${displayChefTokenNumber(breakdown.outputTokens.anthropic)} completion`;
}
if (breakdown.completionTokens.openai > 0) {
return `${displayChefTokenNumber(breakdown.promptTokens.openai.uncached)} uncached, ${displayChefTokenNumber(breakdown.promptTokens.openai.cached)} cached, ${displayChefTokenNumber(breakdown.completionTokens.openai)} completion`;
if (breakdown.outputTokens.openai > 0) {
return `${displayChefTokenNumber(breakdown.inputTokens.openai.uncached)} uncached, ${displayChefTokenNumber(breakdown.inputTokens.openai.cached)} cached, ${displayChefTokenNumber(breakdown.outputTokens.openai)} completion`;
}
if (breakdown.completionTokens.xai > 0) {
return `${displayChefTokenNumber(breakdown.promptTokens.xai.uncached)} uncached, ${displayChefTokenNumber(breakdown.promptTokens.xai.cached)} cached, ${displayChefTokenNumber(breakdown.completionTokens.xai)} completion`;
if (breakdown.outputTokens.xai > 0) {
return `${displayChefTokenNumber(breakdown.inputTokens.xai.uncached)} uncached, ${displayChefTokenNumber(breakdown.inputTokens.xai.cached)} cached, ${displayChefTokenNumber(breakdown.outputTokens.xai)} completion`;
}
if (breakdown.completionTokens.google > 0) {
return `${displayChefTokenNumber(breakdown.promptTokens.google.uncached)} uncached, ${displayChefTokenNumber(breakdown.promptTokens.google.cached)} cached, ${displayChefTokenNumber(breakdown.completionTokens.google)} completion`;
if (breakdown.outputTokens.google > 0) {
return `${displayChefTokenNumber(breakdown.inputTokens.google.uncached)} uncached, ${displayChefTokenNumber(breakdown.inputTokens.google.cached)} cached, ${displayChefTokenNumber(breakdown.outputTokens.google)} completion`;
}
if (breakdown.completionTokens.bedrock > 0) {
return `${displayChefTokenNumber(breakdown.promptTokens.bedrock.uncached)} uncached, ${displayChefTokenNumber(breakdown.promptTokens.bedrock.cached)} cached, ${displayChefTokenNumber(breakdown.completionTokens.bedrock)} completion`;
if (breakdown.outputTokens.bedrock > 0) {
return `${displayChefTokenNumber(breakdown.inputTokens.bedrock.uncached)} uncached, ${displayChefTokenNumber(breakdown.inputTokens.bedrock.cached)} cached, ${displayChefTokenNumber(breakdown.outputTokens.bedrock)} completion`;
}
return 'unknown';
}
Expand Down
4 changes: 2 additions & 2 deletions app/components/chat/BaseChat.client.tsx
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import { Sheet } from '@ui/Sheet';
import type { Message } from 'ai';
import type { UIMessage } from 'ai';
import React, { type ReactNode, type RefCallback, useCallback, useEffect, useMemo, useState } from 'react';
import Landing from '~/components/landing/Landing';
import { Workbench } from '~/components/workbench/Workbench.client';
Expand Down Expand Up @@ -48,7 +48,7 @@ interface BaseChatProps {
streamStatus: 'streaming' | 'submitted' | 'ready' | 'error';
currentError: Error | undefined;
toolStatus: ToolStatus;
messages: Message[];
messages: UIMessage[];
terminalInitializationOptions: TerminalInitializationOptions | undefined;
disableChatMessage: ReactNode | string | null;

Expand Down
16 changes: 8 additions & 8 deletions app/components/chat/Chat.tsx
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import { useStore } from '@nanostores/react';
import type { Message, UIMessage } from 'ai';
import type { UIMessage } from 'ai';
import { useChat } from '@ai-sdk/react';
import { useAnimate } from 'framer-motion';
import { memo, useCallback, useEffect, useMemo, useRef, useState, type ReactNode } from 'react';
Expand Down Expand Up @@ -54,12 +54,12 @@ const MAX_RETRIES = 4;

const processSampledMessages = createSampler(
(options: {
messages: Message[];
initialMessages: Message[];
parseMessages: (messages: Message[]) => void;
messages: UIMessage[];
initialMessages: UIMessage[];
parseMessages: (messages: UIMessage[]) => void;
streamStatus: 'streaming' | 'submitted' | 'ready' | 'error';
storeMessageHistory: (
messages: Message[],
messages: UIMessage[],
streamStatus: 'streaming' | 'submitted' | 'ready' | 'error',
) => Promise<void>;
}) => {
Expand All @@ -74,10 +74,10 @@ const processSampledMessages = createSampler(
);

interface ChatProps {
initialMessages: Message[];
initialMessages: UIMessage[];
partCache: PartCache;
storeMessageHistory: (
messages: Message[],
messages: UIMessage[],
streamStatus: 'streaming' | 'submitted' | 'ready' | 'error',
) => Promise<void>;
initializeChat: () => Promise<boolean>;
Expand Down Expand Up @@ -283,7 +283,7 @@ export const Chat = memo(
const { messages, status, stop, append, setMessages, reload, error } = useChat({
initialMessages,
api: '/api/chat',
sendExtraMessageFields: true,

experimental_prepareRequestBody: ({ messages }) => {
const chatInitialId = initialIdStore.get();
const deploymentName = convexProjectStore.get()?.deploymentName;
Expand Down
4 changes: 2 additions & 2 deletions app/components/chat/Messages.client.tsx
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import type { Message } from 'ai';
import type { UIMessage } from 'ai';
import { Fragment, useCallback, useState } from 'react';
import { classNames } from '~/utils/classNames';
import { AssistantMessage } from './AssistantMessage';
Expand All @@ -20,7 +20,7 @@ interface MessagesProps {
id?: string;
className?: string;
isStreaming?: boolean;
messages?: Message[];
messages?: UIMessage[];
subchatsLength?: number;
onRewindToMessage?: (subchatIndex?: number, messageIndex?: number) => void;
}
Expand Down
Loading