From 3df665e5fdaf7b6d015af9aa5f8943608b25407d Mon Sep 17 00:00:00 2001 From: isaac-scarrott Date: Sun, 27 Apr 2025 10:54:18 +0100 Subject: [PATCH 1/8] Add support for OpenRouter as a new model provider - Introduced `ProviderOpenRouter` in the `models` package. - Added OpenRouter-specific models, including `GPT41`, `GPT41Mini`, `GPT4o`, and others, with their configurations and costs. - Updated `generateSchema` to include OpenRouter as a provider. - Added OpenRouter-specific environment variable handling (`OPENROUTER_API_KEY`) in `config.go`. - Implemented default model settings for OpenRouter agents in `setDefaultModelForAgent`. - Updated `getProviderAPIKey` to retrieve the OpenRouter API key. - Extended `SupportedModels` to include OpenRouter models. - Added OpenRouter client initialization in the `provider` package. - Modified `processGeneration` to handle `FinishReasonUnknown` in addition to `FinishReasonToolUse`. --- cmd/schema/main.go | 1 + internal/config/config.go | 39 ++++++ internal/llm/agent/agent.go | 3 +- internal/llm/models/models.go | 1 + internal/llm/models/openrouter.go | 208 ++++++++++++++++++++++++++++++ internal/llm/provider/provider.go | 8 ++ 6 files changed, 259 insertions(+), 1 deletion(-) create mode 100644 internal/llm/models/openrouter.go diff --git a/cmd/schema/main.go b/cmd/schema/main.go index af9533cf3..cd550d3fe 100644 --- a/cmd/schema/main.go +++ b/cmd/schema/main.go @@ -173,6 +173,7 @@ func generateSchema() map[string]any { string(models.ProviderOpenAI), string(models.ProviderGemini), string(models.ProviderGROQ), + string(models.ProviderOpenRouter), string(models.ProviderBedrock), string(models.ProviderAzure), } diff --git a/internal/config/config.go b/internal/config/config.go index 9aa22bd4e..2fa576c03 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -267,6 +267,15 @@ func setProviderDefaults() { return } + // OpenRouter configuration + if apiKey := os.Getenv("OPENROUTER_API_KEY"); apiKey != "" { + viper.SetDefault("providers.openrouter.apiKey", apiKey) + viper.SetDefault("agents.coder.model", models.GPT41) + viper.SetDefault("agents.task.model", models.GPT41Mini) + viper.SetDefault("agents.title.model", models.GPT41Mini) + return + } + // AWS Bedrock configuration if hasAWSCredentials() { viper.SetDefault("agents.coder.model", models.BedrockClaude37Sonnet) @@ -527,6 +536,8 @@ func getProviderAPIKey(provider models.ModelProvider) string { return os.Getenv("GROQ_API_KEY") case models.ProviderAzure: return os.Getenv("AZURE_OPENAI_API_KEY") + case models.ProviderOpenRouter: + return os.Getenv("OPENROUTER_API_KEY") case models.ProviderBedrock: if hasAWSCredentials() { return "aws-credentials-available" @@ -578,6 +589,34 @@ func setDefaultModelForAgent(agent AgentName) bool { return true } + if apiKey := os.Getenv("OPENROUTER_API_KEY"); apiKey != "" { + var model models.ModelID + maxTokens := int64(5000) + reasoningEffort := "" + + switch agent { + case AgentTitle: + model = models.GPT41Mini + maxTokens = 80 + case AgentTask: + model = models.GPT41Mini + default: + model = models.GPT41 + } + + // Check if model supports reasoning + if modelInfo, ok := models.SupportedModels[model]; ok && modelInfo.CanReason { + reasoningEffort = "medium" + } + + cfg.Agents[agent] = Agent{ + Model: model, + MaxTokens: maxTokens, + ReasoningEffort: reasoningEffort, + } + return true + } + if apiKey := os.Getenv("GEMINI_API_KEY"); apiKey != "" { var model models.ModelID maxTokens := int64(5000) diff --git a/internal/llm/agent/agent.go b/internal/llm/agent/agent.go index 80dfeb0fd..41dfd8b14 100644 --- a/internal/llm/agent/agent.go +++ b/internal/llm/agent/agent.go @@ -229,7 +229,8 @@ func (a *agent) processGeneration(ctx context.Context, sessionID, content string return a.err(fmt.Errorf("failed to process events: %w", err)) } logging.Info("Result", "message", agentMessage.FinishReason(), "toolResults", toolResults) - if (agentMessage.FinishReason() == message.FinishReasonToolUse) && toolResults != nil { + finishReason := agentMessage.FinishReason() + if (finishReason == message.FinishReasonToolUse || finishReason == message.FinishReasonUnknown) && toolResults != nil { // We are not done, we need to respond with the tool response msgHistory = append(msgHistory, agentMessage, *toolResults) continue diff --git a/internal/llm/models/models.go b/internal/llm/models/models.go index 5dd28359f..2c5e61d7e 100644 --- a/internal/llm/models/models.go +++ b/internal/llm/models/models.go @@ -86,4 +86,5 @@ func init() { maps.Copy(SupportedModels, GeminiModels) maps.Copy(SupportedModels, GroqModels) maps.Copy(SupportedModels, AzureModels) + maps.Copy(SupportedModels, OpenRouterModels) } diff --git a/internal/llm/models/openrouter.go b/internal/llm/models/openrouter.go new file mode 100644 index 000000000..63bda33bd --- /dev/null +++ b/internal/llm/models/openrouter.go @@ -0,0 +1,208 @@ +package models + +const ( + ProviderOpenRouter ModelProvider = "openrouter" + + // TODO: These models should probably live in deepseek.go. OpenRouter generally shouldn't define models. + DeepSeekChatFree ModelID = "deepseek-chat-free" + DeepSeekR1Free ModelID = "deepseek-r1-free" +) + +var OpenRouterModels = map[ModelID]Model{ + GPT41: { + ID: GPT41, + Name: "GPT 4.1", + Provider: ProviderOpenRouter, + APIModel: "gpt-4.1", + CostPer1MIn: 2.00, + CostPer1MInCached: 0.50, + CostPer1MOutCached: 0.0, + CostPer1MOut: 8.00, + ContextWindow: 1_047_576, + DefaultMaxTokens: 20000, + }, + GPT41Mini: { + ID: GPT41Mini, + Name: "GPT 4.1 mini", + Provider: ProviderOpenRouter, + APIModel: "gpt-4.1", + CostPer1MIn: 0.40, + CostPer1MInCached: 0.10, + CostPer1MOutCached: 0.0, + CostPer1MOut: 1.60, + ContextWindow: 200_000, + DefaultMaxTokens: 20000, + }, + GPT41Nano: { + ID: GPT41Nano, + Name: "GPT 4.1 nano", + Provider: ProviderOpenRouter, + APIModel: "gpt-4.1-nano", + CostPer1MIn: 0.10, + CostPer1MInCached: 0.025, + CostPer1MOutCached: 0.0, + CostPer1MOut: 0.40, + ContextWindow: 1_047_576, + DefaultMaxTokens: 20000, + }, + GPT45Preview: { + ID: GPT45Preview, + Name: "GPT 4.5 preview", + Provider: ProviderOpenRouter, + APIModel: "gpt-4.5-preview", + CostPer1MIn: 75.00, + CostPer1MInCached: 37.50, + CostPer1MOutCached: 0.0, + CostPer1MOut: 150.00, + ContextWindow: 128_000, + DefaultMaxTokens: 15000, + }, + GPT4o: { + ID: GPT4o, + Name: "GPT 4o", + Provider: ProviderOpenRouter, + APIModel: "gpt-4o", + CostPer1MIn: 2.50, + CostPer1MInCached: 1.25, + CostPer1MOutCached: 0.0, + CostPer1MOut: 10.00, + ContextWindow: 128_000, + DefaultMaxTokens: 4096, + }, + GPT4oMini: { + ID: GPT4oMini, + Name: "GPT 4o mini", + Provider: ProviderOpenRouter, + APIModel: "gpt-4o-mini", + CostPer1MIn: 0.15, + CostPer1MInCached: 0.075, + CostPer1MOutCached: 0.0, + CostPer1MOut: 0.60, + ContextWindow: 128_000, + }, + O1: { + ID: O1, + Name: "O1", + Provider: ProviderOpenRouter, + APIModel: "o1", + CostPer1MIn: 15.00, + CostPer1MInCached: 7.50, + CostPer1MOutCached: 0.0, + CostPer1MOut: 60.00, + ContextWindow: 200_000, + DefaultMaxTokens: 50000, + CanReason: true, + }, + O1Pro: { + ID: O1Pro, + Name: "o1 pro", + Provider: ProviderOpenRouter, + APIModel: "o1-pro", + CostPer1MIn: 150.00, + CostPer1MInCached: 0.0, + CostPer1MOutCached: 0.0, + CostPer1MOut: 600.00, + ContextWindow: 200_000, + DefaultMaxTokens: 50000, + CanReason: true, + }, + O1Mini: { + ID: O1Mini, + Name: "o1 mini", + Provider: ProviderOpenRouter, + APIModel: "o1-mini", + CostPer1MIn: 1.10, + CostPer1MInCached: 0.55, + CostPer1MOutCached: 0.0, + CostPer1MOut: 4.40, + ContextWindow: 128_000, + DefaultMaxTokens: 50000, + CanReason: true, + }, + O3: { + ID: O3, + Name: "o3", + Provider: ProviderOpenRouter, + APIModel: "o3", + CostPer1MIn: 10.00, + CostPer1MInCached: 2.50, + CostPer1MOutCached: 0.0, + CostPer1MOut: 40.00, + ContextWindow: 200_000, + CanReason: true, + }, + O3Mini: { + ID: O3Mini, + Name: "o3 mini", + Provider: ProviderOpenRouter, + APIModel: "o3-mini", + CostPer1MIn: 1.10, + CostPer1MInCached: 0.55, + CostPer1MOutCached: 0.0, + CostPer1MOut: 4.40, + ContextWindow: 200_000, + DefaultMaxTokens: 50000, + CanReason: true, + }, + O4Mini: { + ID: O4Mini, + Name: "o4 mini", + Provider: ProviderOpenRouter, + APIModel: "o4-mini", + CostPer1MIn: 1.10, + CostPer1MInCached: 0.275, + CostPer1MOutCached: 0.0, + CostPer1MOut: 4.40, + ContextWindow: 128_000, + DefaultMaxTokens: 50000, + CanReason: true, + }, + Gemini25Flash: { + ID: Gemini25Flash, + Name: "Gemini 2.5 Flash", + Provider: ProviderOpenRouter, + APIModel: "google/gemini-2.5-flash-preview-04-17", + CostPer1MIn: 0.15, + CostPer1MInCached: 0, + CostPer1MOutCached: 0, + CostPer1MOut: 0.60, + ContextWindow: 1000000, + DefaultMaxTokens: 50000, + }, + Gemini25: { + ID: Gemini25, + Name: "Gemini 2.5 Pro", + Provider: ProviderOpenRouter, + APIModel: "google/gemini-2.5-pro-preview-03-25", + CostPer1MIn: 1.25, + CostPer1MInCached: 0, + CostPer1MOutCached: 0, + CostPer1MOut: 10, + ContextWindow: 1000000, + DefaultMaxTokens: 50000, + }, + DeepSeekChatFree: { + ID: DeepSeekChatFree, + Name: "DeepSeek Chat (Free)", + Provider: ProviderOpenRouter, + APIModel: "deepseek/deepseek-chat-v3-0324:free", + CostPer1MIn: 0, + CostPer1MInCached: 0, + CostPer1MOutCached: 0, + CostPer1MOut: 0, + ContextWindow: 12800, + DefaultMaxTokens: 50000, + }, + DeepSeekR1Free: { + ID: DeepSeekR1Free, + Name: "DeepSeek R1 (Free)", + Provider: ProviderOpenRouter, + APIModel: "deepseek/deepseek-r1:free", + CostPer1MIn: 0, + CostPer1MInCached: 0, + CostPer1MOutCached: 0, + CostPer1MOut: 0, + ContextWindow: 164000, + DefaultMaxTokens: 50000, + }, +} diff --git a/internal/llm/provider/provider.go b/internal/llm/provider/provider.go index 737b6fb00..a442ba09a 100644 --- a/internal/llm/provider/provider.go +++ b/internal/llm/provider/provider.go @@ -120,6 +120,14 @@ func NewProvider(providerName models.ModelProvider, opts ...ProviderClientOption options: clientOptions, client: newAzureClient(clientOptions), }, nil + case models.ProviderOpenRouter: + clientOptions.openaiOptions = append(clientOptions.openaiOptions, + WithOpenAIBaseURL("https://openrouter.ai/api/v1"), + ) + return &baseProvider[OpenAIClient]{ + options: clientOptions, + client: newOpenAIClient(clientOptions), + }, nil case models.ProviderMock: // TODO: implement mock client for test panic("not implemented") From 0f10b17e74c37884ab8677f5b53ecbcd67bba130 Mon Sep 17 00:00:00 2001 From: isaac-scarrott Date: Sun, 27 Apr 2025 11:01:57 +0100 Subject: [PATCH 2/8] [feature/openrouter-provider] Add new models and provider to schema - Added "deepseek-chat-free" and "deepseek-r1-free" to the list of supported models in `opencode-schema.json`. --- opencode-schema.json | 38 +++++++++----------------------------- 1 file changed, 9 insertions(+), 29 deletions(-) diff --git a/opencode-schema.json b/opencode-schema.json index 766ca0260..c519ebafe 100644 --- a/opencode-schema.json +++ b/opencode-schema.json @@ -55,17 +55,11 @@ }, "reasoningEffort": { "description": "Reasoning effort for models that support it (OpenAI, Anthropic)", - "enum": [ - "low", - "medium", - "high" - ], + "enum": ["low", "medium", "high"], "type": "string" } }, - "required": [ - "model" - ], + "required": ["model"], "type": "object" } }, @@ -126,17 +120,11 @@ }, "reasoningEffort": { "description": "Reasoning effort for models that support it (OpenAI, Anthropic)", - "enum": [ - "low", - "medium", - "high" - ], + "enum": ["low", "medium", "high"], "type": "string" } }, - "required": [ - "model" - ], + "required": ["model"], "type": "object" }, "description": "Agent configurations", @@ -182,9 +170,7 @@ "type": "string" } }, - "required": [ - "directory" - ], + "required": ["directory"], "type": "object" }, "debug": { @@ -222,9 +208,7 @@ "type": "object" } }, - "required": [ - "command" - ], + "required": ["command"], "type": "object" }, "description": "Language Server Protocol configurations", @@ -262,10 +246,7 @@ "type": { "default": "stdio", "description": "Type of MCP server", - "enum": [ - "stdio", - "sse" - ], + "enum": ["stdio", "sse"], "type": "string" }, "url": { @@ -273,9 +254,7 @@ "type": "string" } }, - "required": [ - "command" - ], + "required": ["command"], "type": "object" }, "description": "Model Control Protocol server configurations", @@ -299,6 +278,7 @@ "enum": [ "anthropic", "openai", + "openrouter", "gemini", "groq", "bedrock", From fb53885481ac0f3a84e5d21af284b2c2fda0fdc9 Mon Sep 17 00:00:00 2001 From: isaac-scarrott Date: Sun, 27 Apr 2025 19:46:11 +0100 Subject: [PATCH 3/8] [feature/openrouter-provider] Add OpenRouter provider support and integrate new models - Updated README.md to include OpenRouter as a supported provider and its configuration details. - Added `OPENROUTER_API_KEY` to environment variable configuration. - Introduced OpenRouter-specific models in `internal/llm/models/openrouter.go` with mappings to existing cost and token configurations. - Updated `internal/config/config.go` to set default models for OpenRouter agents. - Extended `opencode-schema.json` to include OpenRouter models in the schema definitions. - Refactored model IDs and names to align with OpenRouter naming conventions. --- README.md | 10 +- internal/config/config.go | 12 +- internal/llm/models/openrouter.go | 304 ++++++++++++++++-------------- opencode-schema.json | 40 +++- 4 files changed, 208 insertions(+), 158 deletions(-) diff --git a/README.md b/README.md index 1e5433c2e..e94c6cb3a 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ OpenCode is a Go-based CLI application that brings AI assistance to your termina ## Features - **Interactive TUI**: Built with [Bubble Tea](https://github.com/charmbracelet/bubbletea) for a smooth terminal experience -- **Multiple AI Providers**: Support for OpenAI, Anthropic Claude, Google Gemini, AWS Bedrock, Groq, and Azure OpenAI +- **Multiple AI Providers**: Support for OpenAI, Anthropic Claude, Google Gemini, AWS Bedrock, Groq, Azure OpenAI, and OpenRouter - **Session Management**: Save and manage multiple conversation sessions - **Tool Integration**: AI can execute commands, search files, and modify code - **Vim-like Editor**: Integrated editor with text input capabilities @@ -97,8 +97,12 @@ You can configure OpenCode using environment variables: "disabled": false }, "groq": { - "apiKey": "your-api-key", - "disabled": false + "apiKey": "your-api-key", + "disabled": false + }, + "openrouter": { + "apiKey": "your-api-key", + "disabled": false } }, "agents": { diff --git a/internal/config/config.go b/internal/config/config.go index 2fa576c03..cc5232143 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -270,9 +270,9 @@ func setProviderDefaults() { // OpenRouter configuration if apiKey := os.Getenv("OPENROUTER_API_KEY"); apiKey != "" { viper.SetDefault("providers.openrouter.apiKey", apiKey) - viper.SetDefault("agents.coder.model", models.GPT41) - viper.SetDefault("agents.task.model", models.GPT41Mini) - viper.SetDefault("agents.title.model", models.GPT41Mini) + viper.SetDefault("agents.coder.model", models.OpenRouterGPT41) + viper.SetDefault("agents.task.model", models.OpenRouterGPT41Mini) + viper.SetDefault("agents.title.model", models.OpenRouterGPT41Mini) return } @@ -596,12 +596,12 @@ func setDefaultModelForAgent(agent AgentName) bool { switch agent { case AgentTitle: - model = models.GPT41Mini + model = models.OpenRouterGPT41Mini maxTokens = 80 case AgentTask: - model = models.GPT41Mini + model = models.OpenRouterGPT41Mini default: - model = models.GPT41 + model = models.OpenRouterGPT41 } // Check if model supports reasoning diff --git a/internal/llm/models/openrouter.go b/internal/llm/models/openrouter.go index 63bda33bd..0b1f5a41d 100644 --- a/internal/llm/models/openrouter.go +++ b/internal/llm/models/openrouter.go @@ -3,187 +3,201 @@ package models const ( ProviderOpenRouter ModelProvider = "openrouter" - // TODO: These models should probably live in deepseek.go. OpenRouter generally shouldn't define models. - DeepSeekChatFree ModelID = "deepseek-chat-free" - DeepSeekR1Free ModelID = "deepseek-r1-free" + OpenRouterDeepSeekChatFree ModelID = "openrouter.deepseek-chat-free" + OpenRouterDeepSeekR1Free ModelID = "openrouter.deepseek-r1-free" + OpenRouterGPT41 ModelID = "openrouter.gpt-4.1" + OpenRouterGPT41Mini ModelID = "openrouter.gpt-4.1-mini" + OpenRouterGPT41Nano ModelID = "openrouter.gpt-4.1-nano" + OpenRouterGPT45Preview ModelID = "openrouter.gpt-4.5-preview" + OpenRouterGPT4o ModelID = "openrouter.gpt-4o" + OpenRouterGPT4oMini ModelID = "openrouter.gpt-4o-mini" + OpenRouterO1 ModelID = "openrouter.o1" + OpenRouterO1Pro ModelID = "openrouter.o1-pro" + OpenRouterO1Mini ModelID = "openrouter.o1-mini" + OpenRouterO3 ModelID = "openrouter.o3" + OpenRouterO3Mini ModelID = "openrouter.o3-mini" + OpenRouterO4Mini ModelID = "openrouter.o4-mini" + OpenRouterGemini25Flash ModelID = "openrouter.gemini-2.5-flash" + OpenRouterGemini25 ModelID = "openrouter.gemini-2.5" ) var OpenRouterModels = map[ModelID]Model{ - GPT41: { - ID: GPT41, - Name: "GPT 4.1", + OpenRouterGPT41: { + ID: OpenRouterGPT41, + Name: "OpenRouter – GPT 4.1", Provider: ProviderOpenRouter, APIModel: "gpt-4.1", - CostPer1MIn: 2.00, - CostPer1MInCached: 0.50, - CostPer1MOutCached: 0.0, - CostPer1MOut: 8.00, - ContextWindow: 1_047_576, - DefaultMaxTokens: 20000, + CostPer1MIn: OpenAIModels[GPT41].CostPer1MIn, + CostPer1MInCached: OpenAIModels[GPT41].CostPer1MInCached, + CostPer1MOut: OpenAIModels[GPT41].CostPer1MOut, + CostPer1MOutCached: OpenAIModels[GPT41].CostPer1MOutCached, + ContextWindow: OpenAIModels[GPT41].ContextWindow, + DefaultMaxTokens: OpenAIModels[GPT41].DefaultMaxTokens, }, - GPT41Mini: { - ID: GPT41Mini, - Name: "GPT 4.1 mini", + OpenRouterGPT41Mini: { + ID: OpenRouterGPT41Mini, + Name: "OpenRouter – GPT 4.1 mini", Provider: ProviderOpenRouter, APIModel: "gpt-4.1", - CostPer1MIn: 0.40, - CostPer1MInCached: 0.10, - CostPer1MOutCached: 0.0, - CostPer1MOut: 1.60, - ContextWindow: 200_000, - DefaultMaxTokens: 20000, + CostPer1MIn: OpenAIModels[GPT41Mini].CostPer1MIn, + CostPer1MInCached: OpenAIModels[GPT41Mini].CostPer1MInCached, + CostPer1MOut: OpenAIModels[GPT41Mini].CostPer1MOut, + CostPer1MOutCached: OpenAIModels[GPT41Mini].CostPer1MOutCached, + ContextWindow: OpenAIModels[GPT41Mini].ContextWindow, + DefaultMaxTokens: OpenAIModels[GPT41Mini].DefaultMaxTokens, }, - GPT41Nano: { - ID: GPT41Nano, - Name: "GPT 4.1 nano", + OpenRouterGPT41Nano: { + ID: OpenRouterGPT41Nano, + Name: "OpenRouter – GPT 4.1 nano", Provider: ProviderOpenRouter, APIModel: "gpt-4.1-nano", - CostPer1MIn: 0.10, - CostPer1MInCached: 0.025, - CostPer1MOutCached: 0.0, - CostPer1MOut: 0.40, - ContextWindow: 1_047_576, - DefaultMaxTokens: 20000, + CostPer1MIn: OpenAIModels[GPT41Nano].CostPer1MIn, + CostPer1MInCached: OpenAIModels[GPT41Nano].CostPer1MInCached, + CostPer1MOut: OpenAIModels[GPT41Nano].CostPer1MOut, + CostPer1MOutCached: OpenAIModels[GPT41Nano].CostPer1MOutCached, + ContextWindow: OpenAIModels[GPT41Nano].ContextWindow, + DefaultMaxTokens: OpenAIModels[GPT41Nano].DefaultMaxTokens, }, - GPT45Preview: { - ID: GPT45Preview, - Name: "GPT 4.5 preview", + OpenRouterGPT45Preview: { + ID: OpenRouterGPT45Preview, + Name: "OpenRouter – GPT 4.5 preview", Provider: ProviderOpenRouter, APIModel: "gpt-4.5-preview", - CostPer1MIn: 75.00, - CostPer1MInCached: 37.50, - CostPer1MOutCached: 0.0, - CostPer1MOut: 150.00, - ContextWindow: 128_000, - DefaultMaxTokens: 15000, + CostPer1MIn: OpenAIModels[GPT45Preview].CostPer1MIn, + CostPer1MInCached: OpenAIModels[GPT45Preview].CostPer1MInCached, + CostPer1MOut: OpenAIModels[GPT45Preview].CostPer1MOut, + CostPer1MOutCached: OpenAIModels[GPT45Preview].CostPer1MOutCached, + ContextWindow: OpenAIModels[GPT45Preview].ContextWindow, + DefaultMaxTokens: OpenAIModels[GPT45Preview].DefaultMaxTokens, }, - GPT4o: { - ID: GPT4o, - Name: "GPT 4o", + OpenRouterGPT4o: { + ID: OpenRouterGPT4o, + Name: "OpenRouter – GPT 4o", Provider: ProviderOpenRouter, APIModel: "gpt-4o", - CostPer1MIn: 2.50, - CostPer1MInCached: 1.25, - CostPer1MOutCached: 0.0, - CostPer1MOut: 10.00, - ContextWindow: 128_000, - DefaultMaxTokens: 4096, + CostPer1MIn: OpenAIModels[GPT4o].CostPer1MIn, + CostPer1MInCached: OpenAIModels[GPT4o].CostPer1MInCached, + CostPer1MOut: OpenAIModels[GPT4o].CostPer1MOut, + CostPer1MOutCached: OpenAIModels[GPT4o].CostPer1MOutCached, + ContextWindow: OpenAIModels[GPT4o].ContextWindow, + DefaultMaxTokens: OpenAIModels[GPT4o].DefaultMaxTokens, }, - GPT4oMini: { - ID: GPT4oMini, - Name: "GPT 4o mini", + OpenRouterGPT4oMini: { + ID: OpenRouterGPT4oMini, + Name: "OpenRouter – GPT 4o mini", Provider: ProviderOpenRouter, APIModel: "gpt-4o-mini", - CostPer1MIn: 0.15, - CostPer1MInCached: 0.075, - CostPer1MOutCached: 0.0, - CostPer1MOut: 0.60, - ContextWindow: 128_000, + CostPer1MIn: OpenAIModels[GPT4oMini].CostPer1MIn, + CostPer1MInCached: OpenAIModels[GPT4oMini].CostPer1MInCached, + CostPer1MOut: OpenAIModels[GPT4oMini].CostPer1MOut, + CostPer1MOutCached: OpenAIModels[GPT4oMini].CostPer1MOutCached, + ContextWindow: OpenAIModels[GPT4oMini].ContextWindow, }, - O1: { - ID: O1, - Name: "O1", + OpenRouterO1: { + ID: OpenRouterO1, + Name: "OpenRouter – O1", Provider: ProviderOpenRouter, APIModel: "o1", - CostPer1MIn: 15.00, - CostPer1MInCached: 7.50, - CostPer1MOutCached: 0.0, - CostPer1MOut: 60.00, - ContextWindow: 200_000, - DefaultMaxTokens: 50000, - CanReason: true, - }, - O1Pro: { - ID: O1Pro, - Name: "o1 pro", + CostPer1MIn: OpenAIModels[O1].CostPer1MIn, + CostPer1MInCached: OpenAIModels[O1].CostPer1MInCached, + CostPer1MOut: OpenAIModels[O1].CostPer1MOut, + CostPer1MOutCached: OpenAIModels[O1].CostPer1MOutCached, + ContextWindow: OpenAIModels[O1].ContextWindow, + DefaultMaxTokens: OpenAIModels[O1].DefaultMaxTokens, + CanReason: OpenAIModels[O1].CanReason, + }, + OpenRouterO1Pro: { + ID: OpenRouterO1Pro, + Name: "OpenRouter – o1 pro", Provider: ProviderOpenRouter, APIModel: "o1-pro", - CostPer1MIn: 150.00, - CostPer1MInCached: 0.0, - CostPer1MOutCached: 0.0, - CostPer1MOut: 600.00, - ContextWindow: 200_000, - DefaultMaxTokens: 50000, - CanReason: true, - }, - O1Mini: { - ID: O1Mini, - Name: "o1 mini", + CostPer1MIn: OpenAIModels[O1Pro].CostPer1MIn, + CostPer1MInCached: OpenAIModels[O1Pro].CostPer1MInCached, + CostPer1MOut: OpenAIModels[O1Pro].CostPer1MOut, + CostPer1MOutCached: OpenAIModels[O1Pro].CostPer1MOutCached, + ContextWindow: OpenAIModels[O1Pro].ContextWindow, + DefaultMaxTokens: OpenAIModels[O1Pro].DefaultMaxTokens, + CanReason: OpenAIModels[O1Pro].CanReason, + }, + OpenRouterO1Mini: { + ID: OpenRouterO1Mini, + Name: "OpenRouter – o1 mini", Provider: ProviderOpenRouter, APIModel: "o1-mini", - CostPer1MIn: 1.10, - CostPer1MInCached: 0.55, - CostPer1MOutCached: 0.0, - CostPer1MOut: 4.40, - ContextWindow: 128_000, - DefaultMaxTokens: 50000, - CanReason: true, - }, - O3: { - ID: O3, - Name: "o3", + CostPer1MIn: OpenAIModels[O1Mini].CostPer1MIn, + CostPer1MInCached: OpenAIModels[O1Mini].CostPer1MInCached, + CostPer1MOut: OpenAIModels[O1Mini].CostPer1MOut, + CostPer1MOutCached: OpenAIModels[O1Mini].CostPer1MOutCached, + ContextWindow: OpenAIModels[O1Mini].ContextWindow, + DefaultMaxTokens: OpenAIModels[O1Mini].DefaultMaxTokens, + CanReason: OpenAIModels[O1Mini].CanReason, + }, + OpenRouterO3: { + ID: OpenRouterO3, + Name: "OpenRouter – o3", Provider: ProviderOpenRouter, APIModel: "o3", - CostPer1MIn: 10.00, - CostPer1MInCached: 2.50, - CostPer1MOutCached: 0.0, - CostPer1MOut: 40.00, - ContextWindow: 200_000, - CanReason: true, - }, - O3Mini: { - ID: O3Mini, - Name: "o3 mini", + CostPer1MIn: OpenAIModels[O3].CostPer1MIn, + CostPer1MInCached: OpenAIModels[O3].CostPer1MInCached, + CostPer1MOut: OpenAIModels[O3].CostPer1MOut, + CostPer1MOutCached: OpenAIModels[O3].CostPer1MOutCached, + ContextWindow: OpenAIModels[O3].ContextWindow, + DefaultMaxTokens: OpenAIModels[O3].DefaultMaxTokens, + CanReason: OpenAIModels[O3].CanReason, + }, + OpenRouterO3Mini: { + ID: OpenRouterO3Mini, + Name: "OpenRouter – o3 mini", Provider: ProviderOpenRouter, APIModel: "o3-mini", - CostPer1MIn: 1.10, - CostPer1MInCached: 0.55, - CostPer1MOutCached: 0.0, - CostPer1MOut: 4.40, - ContextWindow: 200_000, - DefaultMaxTokens: 50000, - CanReason: true, - }, - O4Mini: { - ID: O4Mini, - Name: "o4 mini", + CostPer1MIn: OpenAIModels[O3Mini].CostPer1MIn, + CostPer1MInCached: OpenAIModels[O3Mini].CostPer1MInCached, + CostPer1MOut: OpenAIModels[O3Mini].CostPer1MOut, + CostPer1MOutCached: OpenAIModels[O3Mini].CostPer1MOutCached, + ContextWindow: OpenAIModels[O3Mini].ContextWindow, + DefaultMaxTokens: OpenAIModels[O3Mini].DefaultMaxTokens, + CanReason: OpenAIModels[O3Mini].CanReason, + }, + OpenRouterO4Mini: { + ID: OpenRouterO4Mini, + Name: "OpenRouter – o4 mini", Provider: ProviderOpenRouter, APIModel: "o4-mini", - CostPer1MIn: 1.10, - CostPer1MInCached: 0.275, - CostPer1MOutCached: 0.0, - CostPer1MOut: 4.40, - ContextWindow: 128_000, - DefaultMaxTokens: 50000, - CanReason: true, - }, - Gemini25Flash: { - ID: Gemini25Flash, - Name: "Gemini 2.5 Flash", + CostPer1MIn: OpenAIModels[O4Mini].CostPer1MIn, + CostPer1MInCached: OpenAIModels[O4Mini].CostPer1MInCached, + CostPer1MOut: OpenAIModels[O4Mini].CostPer1MOut, + CostPer1MOutCached: OpenAIModels[O4Mini].CostPer1MOutCached, + ContextWindow: OpenAIModels[O4Mini].ContextWindow, + DefaultMaxTokens: OpenAIModels[O4Mini].DefaultMaxTokens, + CanReason: OpenAIModels[O4Mini].CanReason, + }, + OpenRouterGemini25Flash: { + ID: OpenRouterGemini25Flash, + Name: "OpenRouter – Gemini 2.5 Flash", Provider: ProviderOpenRouter, APIModel: "google/gemini-2.5-flash-preview-04-17", - CostPer1MIn: 0.15, - CostPer1MInCached: 0, - CostPer1MOutCached: 0, - CostPer1MOut: 0.60, - ContextWindow: 1000000, - DefaultMaxTokens: 50000, + CostPer1MIn: GeminiModels[Gemini25Flash].CostPer1MIn, + CostPer1MInCached: GeminiModels[Gemini25Flash].CostPer1MInCached, + CostPer1MOut: GeminiModels[Gemini25Flash].CostPer1MOut, + CostPer1MOutCached: GeminiModels[Gemini25Flash].CostPer1MOutCached, + ContextWindow: GeminiModels[Gemini25Flash].ContextWindow, + DefaultMaxTokens: GeminiModels[Gemini25Flash].DefaultMaxTokens, }, - Gemini25: { - ID: Gemini25, - Name: "Gemini 2.5 Pro", + OpenRouterGemini25: { + ID: OpenRouterGemini25, + Name: "OpenRouter – Gemini 2.5 Pro", Provider: ProviderOpenRouter, APIModel: "google/gemini-2.5-pro-preview-03-25", - CostPer1MIn: 1.25, - CostPer1MInCached: 0, - CostPer1MOutCached: 0, - CostPer1MOut: 10, - ContextWindow: 1000000, - DefaultMaxTokens: 50000, + CostPer1MIn: GeminiModels[Gemini25].CostPer1MIn, + CostPer1MInCached: GeminiModels[Gemini25].CostPer1MInCached, + CostPer1MOut: GeminiModels[Gemini25].CostPer1MOut, + CostPer1MOutCached: GeminiModels[Gemini25].CostPer1MOutCached, + ContextWindow: GeminiModels[Gemini25].ContextWindow, + DefaultMaxTokens: GeminiModels[Gemini25].DefaultMaxTokens, }, - DeepSeekChatFree: { - ID: DeepSeekChatFree, - Name: "DeepSeek Chat (Free)", + OpenRouterDeepSeekChatFree: { + ID: OpenRouterDeepSeekChatFree, + Name: "OpenRouter – DeepSeek Chat (Free)", Provider: ProviderOpenRouter, APIModel: "deepseek/deepseek-chat-v3-0324:free", CostPer1MIn: 0, @@ -193,9 +207,9 @@ var OpenRouterModels = map[ModelID]Model{ ContextWindow: 12800, DefaultMaxTokens: 50000, }, - DeepSeekR1Free: { - ID: DeepSeekR1Free, - Name: "DeepSeek R1 (Free)", + OpenRouterDeepSeekR1Free: { + ID: OpenRouterDeepSeekR1Free, + Name: "OpenRouter – DeepSeek R1 (Free)", Provider: ProviderOpenRouter, APIModel: "deepseek/deepseek-r1:free", CostPer1MIn: 0, diff --git a/opencode-schema.json b/opencode-schema.json index c519ebafe..55d750bc3 100644 --- a/opencode-schema.json +++ b/opencode-schema.json @@ -49,7 +49,23 @@ "gpt-4.1-mini", "azure.gpt-4.1-mini", "gemini-2.5", - "meta-llama/llama-4-scout-17b-16e-instruct" + "meta-llama/llama-4-scout-17b-16e-instruct", + "openrouter.deepseek-chat-free", + "openrouter.deepseek-r1-free", + "openrouter.gpt-4.1", + "openrouter.gpt-4.1-mini", + "openrouter.gpt-4.1-nano", + "openrouter.gpt-4.5-preview", + "openrouter.gpt-4o", + "openrouter.gpt-4o-mini", + "openrouter.o1", + "openrouter.o1-pro", + "openrouter.o1-mini", + "openrouter.o3", + "openrouter.o3-mini", + "openrouter.o4-mini", + "openrouter.gemini-2.5-flash", + "openrouter.gemini-2.5" ], "type": "string" }, @@ -114,7 +130,23 @@ "gpt-4.1-mini", "azure.gpt-4.1-mini", "gemini-2.5", - "meta-llama/llama-4-scout-17b-16e-instruct" + "meta-llama/llama-4-scout-17b-16e-instruct", + "openrouter.deepseek-chat-free", + "openrouter.deepseek-r1-free", + "openrouter.gpt-4.1", + "openrouter.gpt-4.1-mini", + "openrouter.gpt-4.1-nano", + "openrouter.gpt-4.5-preview", + "openrouter.gpt-4o", + "openrouter.gpt-4o-mini", + "openrouter.o1", + "openrouter.o1-pro", + "openrouter.o1-mini", + "openrouter.o3", + "openrouter.o3-mini", + "openrouter.o4-mini", + "openrouter.gemini-2.5-flash", + "openrouter.gemini-2.5" ], "type": "string" }, @@ -278,11 +310,11 @@ "enum": [ "anthropic", "openai", - "openrouter", "gemini", "groq", "bedrock", - "azure" + "azure", + "openrouter" ], "type": "string" } From 75f6737d87a774d6534a4b6f08962e2d81994dd4 Mon Sep 17 00:00:00 2001 From: isaac-scarrott Date: Sun, 27 Apr 2025 21:15:09 +0100 Subject: [PATCH 4/8] [feature/openrouter-provider] Refactor finish reason handling and tool call logic in agent and OpenAI provider - Simplified finish reason check in `agent.go` by removing redundant variable assignment. - Updated `openai.go` to override the finish reason to `FinishReasonToolUse` when tool calls are present. - Ensured consistent finish reason handling in both `send` and `stream` methods of the OpenAI provider. [feature/openrouter-provider] Refactor finish reason handling and tool call logic in agent and OpenAI provider - Simplified finish reason check in `agent.go` by removing redundant variable assignment. - Updated `openai.go` to override the finish reason to `FinishReasonToolUse` when tool calls are present. - Ensured consistent finish reason handling in both `send` and `stream` methods of the OpenAI provider. --- internal/llm/agent/agent.go | 3 +-- internal/llm/provider/openai.go | 23 ++++++++++++++++++++--- 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/internal/llm/agent/agent.go b/internal/llm/agent/agent.go index 41dfd8b14..80dfeb0fd 100644 --- a/internal/llm/agent/agent.go +++ b/internal/llm/agent/agent.go @@ -229,8 +229,7 @@ func (a *agent) processGeneration(ctx context.Context, sessionID, content string return a.err(fmt.Errorf("failed to process events: %w", err)) } logging.Info("Result", "message", agentMessage.FinishReason(), "toolResults", toolResults) - finishReason := agentMessage.FinishReason() - if (finishReason == message.FinishReasonToolUse || finishReason == message.FinishReasonUnknown) && toolResults != nil { + if (agentMessage.FinishReason() == message.FinishReasonToolUse) && toolResults != nil { // We are not done, we need to respond with the tool response msgHistory = append(msgHistory, agentMessage, *toolResults) continue diff --git a/internal/llm/provider/openai.go b/internal/llm/provider/openai.go index 4d45aebfa..ed2ec444d 100644 --- a/internal/llm/provider/openai.go +++ b/internal/llm/provider/openai.go @@ -204,11 +204,18 @@ func (o *openaiClient) send(ctx context.Context, messages []message.Message, too content = openaiResponse.Choices[0].Message.Content } + toolCalls := o.toolCalls(*openaiResponse) + finishReason := o.finishReason(string(openaiResponse.Choices[0].FinishReason)) + + if len(toolCalls) > 0 { + finishReason = message.FinishReasonToolUse + } + return &ProviderResponse{ Content: content, - ToolCalls: o.toolCalls(*openaiResponse), + ToolCalls: toolCalls, Usage: o.usage(*openaiResponse), - FinishReason: o.finishReason(string(openaiResponse.Choices[0].FinishReason)), + FinishReason: finishReason, }, nil } } @@ -267,13 +274,23 @@ func (o *openaiClient) stream(ctx context.Context, messages []message.Message, t err := openaiStream.Err() if err == nil || errors.Is(err, io.EOF) { // Stream completed successfully + eventChan <- ProviderEvent{ + Type: EventContentStop, + } + + finishReason := o.finishReason(string(acc.ChatCompletion.Choices[0].FinishReason)) + + if len(toolCalls) > 0 { + finishReason = message.FinishReasonToolUse + } + eventChan <- ProviderEvent{ Type: EventComplete, Response: &ProviderResponse{ Content: currentContent, ToolCalls: toolCalls, Usage: o.usage(acc.ChatCompletion), - FinishReason: o.finishReason(string(acc.ChatCompletion.Choices[0].FinishReason)), + FinishReason: finishReason, }, } close(eventChan) From fd487d5fb6381f26b638bec9f15b38b01db76950 Mon Sep 17 00:00:00 2001 From: isaac-scarrott Date: Mon, 28 Apr 2025 13:38:56 +0100 Subject: [PATCH 5/8] **[feature/openrouter-provider] Add support for custom headers in OpenAI client configuration** - Introduced a new `extraHeaders` field in the `openaiOptions` struct to allow specifying additional HTTP headers. - Added logic in `newOpenAIClient` to apply `extraHeaders` to the OpenAI client configuration. - Implemented a new option function `WithOpenAIExtraHeaders` to set custom headers in `openaiOptions`. - Updated the OpenRouter provider configuration in `NewProvider` to include default headers (`HTTP-Referer` and `X-Title`) for OpenRouter API requests. --- internal/llm/provider/openai.go | 13 +++++++++++++ internal/llm/provider/provider.go | 4 ++++ 2 files changed, 17 insertions(+) diff --git a/internal/llm/provider/openai.go b/internal/llm/provider/openai.go index ed2ec444d..1a2d6baae 100644 --- a/internal/llm/provider/openai.go +++ b/internal/llm/provider/openai.go @@ -21,6 +21,7 @@ type openaiOptions struct { baseURL string disableCache bool reasoningEffort string + extraHeaders map[string]string } type OpenAIOption func(*openaiOptions) @@ -49,6 +50,12 @@ func newOpenAIClient(opts providerClientOptions) OpenAIClient { openaiClientOptions = append(openaiClientOptions, option.WithBaseURL(openaiOpts.baseURL)) } + if openaiOpts.extraHeaders != nil { + for key, value := range openaiOpts.extraHeaders { + openaiClientOptions = append(openaiClientOptions, option.WithHeader(key, value)) + } + } + client := openai.NewClient(openaiClientOptions...) return &openaiClient{ providerOptions: opts, @@ -392,6 +399,12 @@ func WithOpenAIBaseURL(baseURL string) OpenAIOption { } } +func WithOpenAIExtraHeaders(headers map[string]string) OpenAIOption { + return func(options *openaiOptions) { + options.extraHeaders = headers + } +} + func WithOpenAIDisableCache() OpenAIOption { return func(options *openaiOptions) { options.disableCache = true diff --git a/internal/llm/provider/provider.go b/internal/llm/provider/provider.go index a442ba09a..1545bc27a 100644 --- a/internal/llm/provider/provider.go +++ b/internal/llm/provider/provider.go @@ -123,6 +123,10 @@ func NewProvider(providerName models.ModelProvider, opts ...ProviderClientOption case models.ProviderOpenRouter: clientOptions.openaiOptions = append(clientOptions.openaiOptions, WithOpenAIBaseURL("https://openrouter.ai/api/v1"), + WithOpenAIExtraHeaders(map[string]string{ + "HTTP-Referer": "opencode.ai", + "X-Title": "OpenCode", + }), ) return &baseProvider[OpenAIClient]{ options: clientOptions, From 5562675f652334d956a66ee5c988242f86b70ece Mon Sep 17 00:00:00 2001 From: isaac-scarrott Date: Mon, 28 Apr 2025 20:46:07 +0100 Subject: [PATCH 6/8] Update OpenRouter model config and remove unsupported models --- internal/llm/models/openrouter.go | 80 +++++++++++-------------------- 1 file changed, 27 insertions(+), 53 deletions(-) diff --git a/internal/llm/models/openrouter.go b/internal/llm/models/openrouter.go index 0b1f5a41d..701b0d398 100644 --- a/internal/llm/models/openrouter.go +++ b/internal/llm/models/openrouter.go @@ -3,22 +3,20 @@ package models const ( ProviderOpenRouter ModelProvider = "openrouter" - OpenRouterDeepSeekChatFree ModelID = "openrouter.deepseek-chat-free" - OpenRouterDeepSeekR1Free ModelID = "openrouter.deepseek-r1-free" - OpenRouterGPT41 ModelID = "openrouter.gpt-4.1" - OpenRouterGPT41Mini ModelID = "openrouter.gpt-4.1-mini" - OpenRouterGPT41Nano ModelID = "openrouter.gpt-4.1-nano" - OpenRouterGPT45Preview ModelID = "openrouter.gpt-4.5-preview" - OpenRouterGPT4o ModelID = "openrouter.gpt-4o" - OpenRouterGPT4oMini ModelID = "openrouter.gpt-4o-mini" - OpenRouterO1 ModelID = "openrouter.o1" - OpenRouterO1Pro ModelID = "openrouter.o1-pro" - OpenRouterO1Mini ModelID = "openrouter.o1-mini" - OpenRouterO3 ModelID = "openrouter.o3" - OpenRouterO3Mini ModelID = "openrouter.o3-mini" - OpenRouterO4Mini ModelID = "openrouter.o4-mini" - OpenRouterGemini25Flash ModelID = "openrouter.gemini-2.5-flash" - OpenRouterGemini25 ModelID = "openrouter.gemini-2.5" + OpenRouterGPT41 ModelID = "openrouter.gpt-4.1" + OpenRouterGPT41Mini ModelID = "openrouter.gpt-4.1-mini" + OpenRouterGPT41Nano ModelID = "openrouter.gpt-4.1-nano" + OpenRouterGPT45Preview ModelID = "openrouter.gpt-4.5-preview" + OpenRouterGPT4o ModelID = "openrouter.gpt-4o" + OpenRouterGPT4oMini ModelID = "openrouter.gpt-4o-mini" + OpenRouterO1 ModelID = "openrouter.o1" + OpenRouterO1Pro ModelID = "openrouter.o1-pro" + OpenRouterO1Mini ModelID = "openrouter.o1-mini" + OpenRouterO3 ModelID = "openrouter.o3" + OpenRouterO3Mini ModelID = "openrouter.o3-mini" + OpenRouterO4Mini ModelID = "openrouter.o4-mini" + OpenRouterGemini25Flash ModelID = "openrouter.gemini-2.5-flash" + OpenRouterGemini25 ModelID = "openrouter.gemini-2.5" ) var OpenRouterModels = map[ModelID]Model{ @@ -26,7 +24,7 @@ var OpenRouterModels = map[ModelID]Model{ ID: OpenRouterGPT41, Name: "OpenRouter – GPT 4.1", Provider: ProviderOpenRouter, - APIModel: "gpt-4.1", + APIModel: "openai/gpt-4.1", CostPer1MIn: OpenAIModels[GPT41].CostPer1MIn, CostPer1MInCached: OpenAIModels[GPT41].CostPer1MInCached, CostPer1MOut: OpenAIModels[GPT41].CostPer1MOut, @@ -38,7 +36,7 @@ var OpenRouterModels = map[ModelID]Model{ ID: OpenRouterGPT41Mini, Name: "OpenRouter – GPT 4.1 mini", Provider: ProviderOpenRouter, - APIModel: "gpt-4.1", + APIModel: "openai/gpt-4.1-mini", CostPer1MIn: OpenAIModels[GPT41Mini].CostPer1MIn, CostPer1MInCached: OpenAIModels[GPT41Mini].CostPer1MInCached, CostPer1MOut: OpenAIModels[GPT41Mini].CostPer1MOut, @@ -50,7 +48,7 @@ var OpenRouterModels = map[ModelID]Model{ ID: OpenRouterGPT41Nano, Name: "OpenRouter – GPT 4.1 nano", Provider: ProviderOpenRouter, - APIModel: "gpt-4.1-nano", + APIModel: "openai/gpt-4.1-nano", CostPer1MIn: OpenAIModels[GPT41Nano].CostPer1MIn, CostPer1MInCached: OpenAIModels[GPT41Nano].CostPer1MInCached, CostPer1MOut: OpenAIModels[GPT41Nano].CostPer1MOut, @@ -62,7 +60,7 @@ var OpenRouterModels = map[ModelID]Model{ ID: OpenRouterGPT45Preview, Name: "OpenRouter – GPT 4.5 preview", Provider: ProviderOpenRouter, - APIModel: "gpt-4.5-preview", + APIModel: "openai/gpt-4.5-preview", CostPer1MIn: OpenAIModels[GPT45Preview].CostPer1MIn, CostPer1MInCached: OpenAIModels[GPT45Preview].CostPer1MInCached, CostPer1MOut: OpenAIModels[GPT45Preview].CostPer1MOut, @@ -74,7 +72,7 @@ var OpenRouterModels = map[ModelID]Model{ ID: OpenRouterGPT4o, Name: "OpenRouter – GPT 4o", Provider: ProviderOpenRouter, - APIModel: "gpt-4o", + APIModel: "openai/gpt-4o", CostPer1MIn: OpenAIModels[GPT4o].CostPer1MIn, CostPer1MInCached: OpenAIModels[GPT4o].CostPer1MInCached, CostPer1MOut: OpenAIModels[GPT4o].CostPer1MOut, @@ -86,7 +84,7 @@ var OpenRouterModels = map[ModelID]Model{ ID: OpenRouterGPT4oMini, Name: "OpenRouter – GPT 4o mini", Provider: ProviderOpenRouter, - APIModel: "gpt-4o-mini", + APIModel: "openai/gpt-4o-mini", CostPer1MIn: OpenAIModels[GPT4oMini].CostPer1MIn, CostPer1MInCached: OpenAIModels[GPT4oMini].CostPer1MInCached, CostPer1MOut: OpenAIModels[GPT4oMini].CostPer1MOut, @@ -97,7 +95,7 @@ var OpenRouterModels = map[ModelID]Model{ ID: OpenRouterO1, Name: "OpenRouter – O1", Provider: ProviderOpenRouter, - APIModel: "o1", + APIModel: "openai/o1", CostPer1MIn: OpenAIModels[O1].CostPer1MIn, CostPer1MInCached: OpenAIModels[O1].CostPer1MInCached, CostPer1MOut: OpenAIModels[O1].CostPer1MOut, @@ -110,7 +108,7 @@ var OpenRouterModels = map[ModelID]Model{ ID: OpenRouterO1Pro, Name: "OpenRouter – o1 pro", Provider: ProviderOpenRouter, - APIModel: "o1-pro", + APIModel: "openai/o1-pro", CostPer1MIn: OpenAIModels[O1Pro].CostPer1MIn, CostPer1MInCached: OpenAIModels[O1Pro].CostPer1MInCached, CostPer1MOut: OpenAIModels[O1Pro].CostPer1MOut, @@ -123,7 +121,7 @@ var OpenRouterModels = map[ModelID]Model{ ID: OpenRouterO1Mini, Name: "OpenRouter – o1 mini", Provider: ProviderOpenRouter, - APIModel: "o1-mini", + APIModel: "openai/o1-mini", CostPer1MIn: OpenAIModels[O1Mini].CostPer1MIn, CostPer1MInCached: OpenAIModels[O1Mini].CostPer1MInCached, CostPer1MOut: OpenAIModels[O1Mini].CostPer1MOut, @@ -136,7 +134,7 @@ var OpenRouterModels = map[ModelID]Model{ ID: OpenRouterO3, Name: "OpenRouter – o3", Provider: ProviderOpenRouter, - APIModel: "o3", + APIModel: "openai/o3", CostPer1MIn: OpenAIModels[O3].CostPer1MIn, CostPer1MInCached: OpenAIModels[O3].CostPer1MInCached, CostPer1MOut: OpenAIModels[O3].CostPer1MOut, @@ -149,7 +147,7 @@ var OpenRouterModels = map[ModelID]Model{ ID: OpenRouterO3Mini, Name: "OpenRouter – o3 mini", Provider: ProviderOpenRouter, - APIModel: "o3-mini", + APIModel: "openai/o3-mini-high", CostPer1MIn: OpenAIModels[O3Mini].CostPer1MIn, CostPer1MInCached: OpenAIModels[O3Mini].CostPer1MInCached, CostPer1MOut: OpenAIModels[O3Mini].CostPer1MOut, @@ -162,7 +160,7 @@ var OpenRouterModels = map[ModelID]Model{ ID: OpenRouterO4Mini, Name: "OpenRouter – o4 mini", Provider: ProviderOpenRouter, - APIModel: "o4-mini", + APIModel: "openai/o4-mini-high", CostPer1MIn: OpenAIModels[O4Mini].CostPer1MIn, CostPer1MInCached: OpenAIModels[O4Mini].CostPer1MInCached, CostPer1MOut: OpenAIModels[O4Mini].CostPer1MOut, @@ -175,7 +173,7 @@ var OpenRouterModels = map[ModelID]Model{ ID: OpenRouterGemini25Flash, Name: "OpenRouter – Gemini 2.5 Flash", Provider: ProviderOpenRouter, - APIModel: "google/gemini-2.5-flash-preview-04-17", + APIModel: "google/gemini-2.5-flash-preview:thinking", CostPer1MIn: GeminiModels[Gemini25Flash].CostPer1MIn, CostPer1MInCached: GeminiModels[Gemini25Flash].CostPer1MInCached, CostPer1MOut: GeminiModels[Gemini25Flash].CostPer1MOut, @@ -195,28 +193,4 @@ var OpenRouterModels = map[ModelID]Model{ ContextWindow: GeminiModels[Gemini25].ContextWindow, DefaultMaxTokens: GeminiModels[Gemini25].DefaultMaxTokens, }, - OpenRouterDeepSeekChatFree: { - ID: OpenRouterDeepSeekChatFree, - Name: "OpenRouter – DeepSeek Chat (Free)", - Provider: ProviderOpenRouter, - APIModel: "deepseek/deepseek-chat-v3-0324:free", - CostPer1MIn: 0, - CostPer1MInCached: 0, - CostPer1MOutCached: 0, - CostPer1MOut: 0, - ContextWindow: 12800, - DefaultMaxTokens: 50000, - }, - OpenRouterDeepSeekR1Free: { - ID: OpenRouterDeepSeekR1Free, - Name: "OpenRouter – DeepSeek R1 (Free)", - Provider: ProviderOpenRouter, - APIModel: "deepseek/deepseek-r1:free", - CostPer1MIn: 0, - CostPer1MInCached: 0, - CostPer1MOutCached: 0, - CostPer1MOut: 0, - ContextWindow: 164000, - DefaultMaxTokens: 50000, - }, } From bc86938662e0671ed5c89b6f6d277c7901938fbd Mon Sep 17 00:00:00 2001 From: isaac-scarrott Date: Mon, 28 Apr 2025 20:48:13 +0100 Subject: [PATCH 7/8] [feature/openrouter-provider] Update OpenRouter models and default configurations - Added new OpenRouter models: `claude-3.5-sonnet`, `claude-3-haiku`, `claude-3.7-sonnet`, `claude-3.5-haiku`, and `claude-3-opus` in `openrouter.go`. - Updated default agent models in `config.go`: - `agents.coder.model` now uses `claude-3.7-sonnet`. - `agents.task.model` now uses `claude-3.7-sonnet`. - `agents.title.model` now uses `claude-3.5-haiku`. - Updated `opencode-schema.json` to include the new models in the allowed list for schema validation. - Adjusted logic in `setDefaultModelForAgent` to reflect the new default models. --- internal/config/config.go | 12 ++-- internal/llm/models/openrouter.go | 94 ++++++++++++++++++++++++++----- opencode-schema.json | 14 ++++- 3 files changed, 98 insertions(+), 22 deletions(-) diff --git a/internal/config/config.go b/internal/config/config.go index cc5232143..22781e189 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -270,9 +270,9 @@ func setProviderDefaults() { // OpenRouter configuration if apiKey := os.Getenv("OPENROUTER_API_KEY"); apiKey != "" { viper.SetDefault("providers.openrouter.apiKey", apiKey) - viper.SetDefault("agents.coder.model", models.OpenRouterGPT41) - viper.SetDefault("agents.task.model", models.OpenRouterGPT41Mini) - viper.SetDefault("agents.title.model", models.OpenRouterGPT41Mini) + viper.SetDefault("agents.coder.model", models.OpenRouterClaude37Sonnet) + viper.SetDefault("agents.task.model", models.OpenRouterClaude37Sonnet) + viper.SetDefault("agents.title.model", models.OpenRouterClaude35Haiku) return } @@ -596,12 +596,12 @@ func setDefaultModelForAgent(agent AgentName) bool { switch agent { case AgentTitle: - model = models.OpenRouterGPT41Mini + model = models.OpenRouterClaude35Haiku maxTokens = 80 case AgentTask: - model = models.OpenRouterGPT41Mini + model = models.OpenRouterClaude37Sonnet default: - model = models.OpenRouterGPT41 + model = models.OpenRouterClaude37Sonnet } // Check if model supports reasoning diff --git a/internal/llm/models/openrouter.go b/internal/llm/models/openrouter.go index 701b0d398..6fc2a2119 100644 --- a/internal/llm/models/openrouter.go +++ b/internal/llm/models/openrouter.go @@ -3,20 +3,25 @@ package models const ( ProviderOpenRouter ModelProvider = "openrouter" - OpenRouterGPT41 ModelID = "openrouter.gpt-4.1" - OpenRouterGPT41Mini ModelID = "openrouter.gpt-4.1-mini" - OpenRouterGPT41Nano ModelID = "openrouter.gpt-4.1-nano" - OpenRouterGPT45Preview ModelID = "openrouter.gpt-4.5-preview" - OpenRouterGPT4o ModelID = "openrouter.gpt-4o" - OpenRouterGPT4oMini ModelID = "openrouter.gpt-4o-mini" - OpenRouterO1 ModelID = "openrouter.o1" - OpenRouterO1Pro ModelID = "openrouter.o1-pro" - OpenRouterO1Mini ModelID = "openrouter.o1-mini" - OpenRouterO3 ModelID = "openrouter.o3" - OpenRouterO3Mini ModelID = "openrouter.o3-mini" - OpenRouterO4Mini ModelID = "openrouter.o4-mini" - OpenRouterGemini25Flash ModelID = "openrouter.gemini-2.5-flash" - OpenRouterGemini25 ModelID = "openrouter.gemini-2.5" + OpenRouterGPT41 ModelID = "openrouter.gpt-4.1" + OpenRouterGPT41Mini ModelID = "openrouter.gpt-4.1-mini" + OpenRouterGPT41Nano ModelID = "openrouter.gpt-4.1-nano" + OpenRouterGPT45Preview ModelID = "openrouter.gpt-4.5-preview" + OpenRouterGPT4o ModelID = "openrouter.gpt-4o" + OpenRouterGPT4oMini ModelID = "openrouter.gpt-4o-mini" + OpenRouterO1 ModelID = "openrouter.o1" + OpenRouterO1Pro ModelID = "openrouter.o1-pro" + OpenRouterO1Mini ModelID = "openrouter.o1-mini" + OpenRouterO3 ModelID = "openrouter.o3" + OpenRouterO3Mini ModelID = "openrouter.o3-mini" + OpenRouterO4Mini ModelID = "openrouter.o4-mini" + OpenRouterGemini25Flash ModelID = "openrouter.gemini-2.5-flash" + OpenRouterGemini25 ModelID = "openrouter.gemini-2.5" + OpenRouterClaude35Sonnet ModelID = "openrouter.claude-3.5-sonnet" + OpenRouterClaude3Haiku ModelID = "openrouter.claude-3-haiku" + OpenRouterClaude37Sonnet ModelID = "openrouter.claude-3.7-sonnet" + OpenRouterClaude35Haiku ModelID = "openrouter.claude-3.5-haiku" + OpenRouterClaude3Opus ModelID = "openrouter.claude-3-opus" ) var OpenRouterModels = map[ModelID]Model{ @@ -193,4 +198,65 @@ var OpenRouterModels = map[ModelID]Model{ ContextWindow: GeminiModels[Gemini25].ContextWindow, DefaultMaxTokens: GeminiModels[Gemini25].DefaultMaxTokens, }, + OpenRouterClaude35Sonnet: { + ID: OpenRouterClaude35Sonnet, + Name: "OpenRouter – Claude 3.5 Sonnet", + Provider: ProviderOpenRouter, + APIModel: "anthropic/claude-3.5-sonnet", + CostPer1MIn: AnthropicModels[Claude35Sonnet].CostPer1MIn, + CostPer1MInCached: AnthropicModels[Claude35Sonnet].CostPer1MInCached, + CostPer1MOut: AnthropicModels[Claude35Sonnet].CostPer1MOut, + CostPer1MOutCached: AnthropicModels[Claude35Sonnet].CostPer1MOutCached, + ContextWindow: AnthropicModels[Claude35Sonnet].ContextWindow, + DefaultMaxTokens: AnthropicModels[Claude35Sonnet].DefaultMaxTokens, + }, + OpenRouterClaude3Haiku: { + ID: OpenRouterClaude3Haiku, + Name: "OpenRouter – Claude 3 Haiku", + Provider: ProviderOpenRouter, + APIModel: "anthropic/claude-3-haiku", + CostPer1MIn: AnthropicModels[Claude3Haiku].CostPer1MIn, + CostPer1MInCached: AnthropicModels[Claude3Haiku].CostPer1MInCached, + CostPer1MOut: AnthropicModels[Claude3Haiku].CostPer1MOut, + CostPer1MOutCached: AnthropicModels[Claude3Haiku].CostPer1MOutCached, + ContextWindow: AnthropicModels[Claude3Haiku].ContextWindow, + DefaultMaxTokens: AnthropicModels[Claude3Haiku].DefaultMaxTokens, + }, + OpenRouterClaude37Sonnet: { + ID: OpenRouterClaude37Sonnet, + Name: "OpenRouter – Claude 3.7 Sonnet", + Provider: ProviderOpenRouter, + APIModel: "anthropic/claude-3.7-sonnet", + CostPer1MIn: AnthropicModels[Claude37Sonnet].CostPer1MIn, + CostPer1MInCached: AnthropicModels[Claude37Sonnet].CostPer1MInCached, + CostPer1MOut: AnthropicModels[Claude37Sonnet].CostPer1MOut, + CostPer1MOutCached: AnthropicModels[Claude37Sonnet].CostPer1MOutCached, + ContextWindow: AnthropicModels[Claude37Sonnet].ContextWindow, + DefaultMaxTokens: AnthropicModels[Claude37Sonnet].DefaultMaxTokens, + CanReason: AnthropicModels[Claude37Sonnet].CanReason, + }, + OpenRouterClaude35Haiku: { + ID: OpenRouterClaude35Haiku, + Name: "OpenRouter – Claude 3.5 Haiku", + Provider: ProviderOpenRouter, + APIModel: "anthropic/claude-3.5-haiku", + CostPer1MIn: AnthropicModels[Claude35Haiku].CostPer1MIn, + CostPer1MInCached: AnthropicModels[Claude35Haiku].CostPer1MInCached, + CostPer1MOut: AnthropicModels[Claude35Haiku].CostPer1MOut, + CostPer1MOutCached: AnthropicModels[Claude35Haiku].CostPer1MOutCached, + ContextWindow: AnthropicModels[Claude35Haiku].ContextWindow, + DefaultMaxTokens: AnthropicModels[Claude35Haiku].DefaultMaxTokens, + }, + OpenRouterClaude3Opus: { + ID: OpenRouterClaude3Opus, + Name: "OpenRouter – Claude 3 Opus", + Provider: ProviderOpenRouter, + APIModel: "anthropic/claude-3-opus", + CostPer1MIn: AnthropicModels[Claude3Opus].CostPer1MIn, + CostPer1MInCached: AnthropicModels[Claude3Opus].CostPer1MInCached, + CostPer1MOut: AnthropicModels[Claude3Opus].CostPer1MOut, + CostPer1MOutCached: AnthropicModels[Claude3Opus].CostPer1MOutCached, + ContextWindow: AnthropicModels[Claude3Opus].ContextWindow, + DefaultMaxTokens: AnthropicModels[Claude3Opus].DefaultMaxTokens, + }, } diff --git a/opencode-schema.json b/opencode-schema.json index 55d750bc3..7d1dde213 100644 --- a/opencode-schema.json +++ b/opencode-schema.json @@ -65,7 +65,12 @@ "openrouter.o3-mini", "openrouter.o4-mini", "openrouter.gemini-2.5-flash", - "openrouter.gemini-2.5" + "openrouter.gemini-2.5", + "openrouter.claude-3.5-sonnet", + "openrouter.claude-3-haiku", + "openrouter.claude-3.7-sonnet", + "openrouter.claude-3.5-haiku", + "openrouter.claude-3-opus" ], "type": "string" }, @@ -146,7 +151,12 @@ "openrouter.o3-mini", "openrouter.o4-mini", "openrouter.gemini-2.5-flash", - "openrouter.gemini-2.5" + "openrouter.gemini-2.5", + "openrouter.claude-3.5-sonnet", + "openrouter.claude-3-haiku", + "openrouter.claude-3.7-sonnet", + "openrouter.claude-3.5-haiku", + "openrouter.claude-3-opus" ], "type": "string" }, From 6f5f73a22ce1f7ed463609e0bec8cd523b7be743 Mon Sep 17 00:00:00 2001 From: isaac-scarrott Date: Tue, 29 Apr 2025 12:41:11 +0100 Subject: [PATCH 8/8] [feature/openrouter-provider] Remove unused ProviderEvent emission in stream function The changes remove the emission of a `ProviderEvent` with type `EventContentStop` in the `stream` function of the `openaiClient` implementation. This event was sent upon successful stream completion but is no longer used. --- internal/llm/provider/openai.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/internal/llm/provider/openai.go b/internal/llm/provider/openai.go index 1a2d6baae..b557df535 100644 --- a/internal/llm/provider/openai.go +++ b/internal/llm/provider/openai.go @@ -281,10 +281,6 @@ func (o *openaiClient) stream(ctx context.Context, messages []message.Message, t err := openaiStream.Err() if err == nil || errors.Is(err, io.EOF) { // Stream completed successfully - eventChan <- ProviderEvent{ - Type: EventContentStop, - } - finishReason := o.finishReason(string(acc.ChatCompletion.Choices[0].FinishReason)) if len(toolCalls) > 0 {