Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 24 additions & 0 deletions src/core/chorus/ModelProviders/simple/ISimpleCompletionProvider.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
export enum SimpleCompletionMode {
TITLE_GENERATION = "title_generation",
SUMMARIZER = "summarizer",
}

export type SimpleCompletionParams = {
model?: SimpleCompletionMode | string;
maxTokens: number;
};

/**
* Lightweight interface for simple LLM completions.
* Used for utility tasks like generating chat titles and suggestions.
* Intentionally separate from IProvider to avoid coupling to streaming/tools/attachments.
*/
export interface ISimpleCompletionProvider {
/**
* Performs a simple completion request.
* @param prompt The prompt to send to the model
* @param params Completion parameters including model and maxTokens
* @returns The full response text
*/
complete(prompt: string, params: SimpleCompletionParams): Promise<string>;
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
import Anthropic from "@anthropic-ai/sdk";
import {
ISimpleCompletionProvider,
SimpleCompletionParams,
SimpleCompletionMode,
} from "./ISimpleCompletionProvider";

const DEFAULT_TITLE_MODEL = "claude-haiku-4-5";
const DEFAULT_SUMMARIZER_MODEL = "claude-haiku-4-5";

export class SimpleCompletionProviderAnthropic
implements ISimpleCompletionProvider
{
constructor(private apiKey: string) {}

async complete(
prompt: string,
params: SimpleCompletionParams,
): Promise<string> {
const client = new Anthropic({
apiKey: this.apiKey,
dangerouslyAllowBrowser: true,
});

const model = this.getModel(params.model);

const stream = client.messages.stream({
model,
max_tokens: params.maxTokens,
messages: [
{
role: "user",
content: prompt,
},
],
});

let fullResponse = "";

stream.on("text", (text: string) => {
fullResponse += text;
});

await stream.finalMessage();

return fullResponse;
}

private getModel(model: SimpleCompletionMode | string | undefined): string {
if (model === SimpleCompletionMode.SUMMARIZER) {
return DEFAULT_SUMMARIZER_MODEL;
}
if (model === SimpleCompletionMode.TITLE_GENERATION) {
return DEFAULT_TITLE_MODEL;
}
if (typeof model === "string") {
return model;
}
return DEFAULT_TITLE_MODEL;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
import { ApiKeys } from "../../Models";
import { canProceedWithProvider } from "@core/utilities/ProxyUtils";
import { ISimpleCompletionProvider } from "./ISimpleCompletionProvider";
import { SimpleCompletionProviderAnthropic } from "./SimpleCompletionProviderAnthropic";
import { SimpleCompletionProviderOpenRouter } from "./SimpleCompletionProviderOpenRouter";
import { SimpleCompletionProviderOpenAI } from "./SimpleCompletionProviderOpenAI";
import { SimpleCompletionProviderGoogle } from "./SimpleCompletionProviderGoogle";

type ProviderConfig = {
name: string;
key: keyof ApiKeys;
create: (apiKey: string) => ISimpleCompletionProvider;
};

const PROVIDER_PRECEDENCE: ProviderConfig[] = [
{
name: "anthropic",
key: "anthropic",
create: (key) => new SimpleCompletionProviderAnthropic(key),
},
{
name: "openai",
key: "openai",
create: (key) => new SimpleCompletionProviderOpenAI(key),
},
{
name: "google",
key: "google",
create: (key) => new SimpleCompletionProviderGoogle(key),
},
{
name: "openrouter",
key: "openrouter",
create: (key) => new SimpleCompletionProviderOpenRouter(key),
},
];

/**
* Factory function that selects and returns an appropriate simple completion provider
* based on available API keys. Follows explicit precedence order.
*
* @param apiKeys The API keys object from settings
* @returns An ISimpleCompletionProvider instance
* @throws Error if no suitable provider is configured
*/
export function getSimpleCompletionProvider(
apiKeys: ApiKeys,
): ISimpleCompletionProvider {
const reasons: string[] = [];

for (const provider of PROVIDER_PRECEDENCE) {
const check = canProceedWithProvider(provider.name, apiKeys);
const apiKey = apiKeys[provider.key];

if (check.canProceed && apiKey) {
return provider.create(apiKey);
}

if (!check.canProceed && check.reason) {
reasons.push(check.reason);
}
}

throw new Error(
`Please add an Anthropic, OpenAI, Google, or OpenRouter API key in Settings to generate chat titles. ${reasons.join(" ")}`,
);
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
import OpenAI from "openai";
import {
ISimpleCompletionProvider,
SimpleCompletionParams,
SimpleCompletionMode,
} from "./ISimpleCompletionProvider";

const DEFAULT_TITLE_MODEL = "gemini-2.5-flash";
const DEFAULT_SUMMARIZER_MODEL = "gemini-2.5-flash";

export class SimpleCompletionProviderGoogle
implements ISimpleCompletionProvider
{
constructor(private apiKey: string) {}

async complete(
prompt: string,
params: SimpleCompletionParams,
): Promise<string> {
const client = new OpenAI({
baseURL: "https://generativelanguage.googleapis.com/v1beta/openai",
apiKey: this.apiKey,
dangerouslyAllowBrowser: true,
});

const model = this.getModel(params.model);

const stream = await client.chat.completions.create({
model,
max_tokens: params.maxTokens,
stream: true,
messages: [
{
role: "user",
content: prompt,
},
],
});

let fullResponse = "";

for await (const chunk of stream) {
const delta = chunk.choices?.[0]?.delta?.content;
if (typeof delta === "string") {
fullResponse += delta;
}
}

return fullResponse;
}

private getModel(model: SimpleCompletionMode | string | undefined): string {
if (model === SimpleCompletionMode.SUMMARIZER) {
return DEFAULT_SUMMARIZER_MODEL;
}
if (model === SimpleCompletionMode.TITLE_GENERATION) {
return DEFAULT_TITLE_MODEL;
}
if (typeof model === "string") {
return model;
}
return DEFAULT_TITLE_MODEL;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
import OpenAI from "openai";
import {
ISimpleCompletionProvider,
SimpleCompletionParams,
SimpleCompletionMode,
} from "./ISimpleCompletionProvider";

const DEFAULT_TITLE_MODEL = "gpt-5-mini";
const DEFAULT_SUMMARIZER_MODEL = "gpt-5-mini";

export class SimpleCompletionProviderOpenAI
implements ISimpleCompletionProvider
{
constructor(private apiKey: string) {}

async complete(
prompt: string,
params: SimpleCompletionParams,
): Promise<string> {
const client = new OpenAI({
apiKey: this.apiKey,
dangerouslyAllowBrowser: true,
});

const model = this.getModel(params.model);

const stream = await client.chat.completions.create({
model,
max_tokens: params.maxTokens,
stream: true,
messages: [
{
role: "user",
content: prompt,
},
],
});

let fullResponse = "";

for await (const chunk of stream) {
const delta = chunk.choices?.[0]?.delta?.content;
if (typeof delta === "string") {
fullResponse += delta;
}
}

return fullResponse;
}

private getModel(model: SimpleCompletionMode | string | undefined): string {
if (model === SimpleCompletionMode.SUMMARIZER) {
return DEFAULT_SUMMARIZER_MODEL;
}
if (model === SimpleCompletionMode.TITLE_GENERATION) {
return DEFAULT_TITLE_MODEL;
}
if (typeof model === "string") {
return model;
}
return DEFAULT_TITLE_MODEL;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
import OpenAI from "openai";
import {
ISimpleCompletionProvider,
SimpleCompletionParams,
SimpleCompletionMode,
} from "./ISimpleCompletionProvider";

const DEFAULT_TITLE_MODEL = "anthropic/claude-haiku-4.5";
const DEFAULT_SUMMARIZER_MODEL = "anthropic/claude-haiku-4.5";

export class SimpleCompletionProviderOpenRouter
implements ISimpleCompletionProvider
{
constructor(private apiKey: string) {}

async complete(
prompt: string,
params: SimpleCompletionParams,
): Promise<string> {
const client = new OpenAI({
baseURL: "https://openrouter.ai/api/v1",
apiKey: this.apiKey,
defaultHeaders: {
"HTTP-Referer": "https://chorus.sh",
"X-Title": "Chorus",
},
dangerouslyAllowBrowser: true,
});

const model = this.getModel(params.model);

const stream = await client.chat.completions.create({
model,
max_tokens: params.maxTokens,
stream: true,
messages: [
{
role: "user",
content: prompt,
},
],
});

let fullResponse = "";

for await (const chunk of stream) {
const delta = chunk.choices?.[0]?.delta?.content;
if (typeof delta === "string") {
fullResponse += delta;
}
}

return fullResponse;
}

private getModel(model: SimpleCompletionMode | string | undefined): string {
if (model === SimpleCompletionMode.SUMMARIZER) {
return DEFAULT_SUMMARIZER_MODEL;
}
if (model === SimpleCompletionMode.TITLE_GENERATION) {
return DEFAULT_TITLE_MODEL;
}
if (typeof model === "string") {
return model;
}
return DEFAULT_TITLE_MODEL;
}
}
9 changes: 4 additions & 5 deletions src/core/chorus/api/MessageAPI.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,8 @@ import * as Models from "../Models";
import { UpdateQueue } from "../UpdateQueue";
import posthog from "posthog-js";
import { v4 as uuidv4 } from "uuid";
import { simpleLLM, simpleSummarizeLLM } from "../simpleLLM";
import { simpleLLM } from "../simpleLLM";
import { SimpleCompletionMode } from "../ModelProviders/simple/ISimpleCompletionProvider";
import * as Prompts from "../prompts/prompts";
import { useNavigate } from "react-router-dom";
import { ToolsetsManager } from "../ToolsetsManager";
Expand Down Expand Up @@ -2164,9 +2165,8 @@ export function useSummarizeChat() {
conversationText,
);

const summary = await simpleSummarizeLLM(prompt, {
// NOTE: If you change this model _provider_, you'll need to update the response handling in simpleSummarizeLLM.ts
model: "gemini-2.5-flash",
const summary = await simpleLLM(prompt, {
model: SimpleCompletionMode.SUMMARIZER,
maxTokens: 8192,
});

Expand Down Expand Up @@ -2925,7 +2925,6 @@ If there's no information in the message, just return "Untitled Chat".
${userMessageText}
</message>`,
{
model: "claude-3-5-sonnet-latest",
maxTokens: 100,
},
);
Expand Down
Loading
Loading