diff --git a/.github/guidelines-check.yml b/.github/guidelines-check.yml deleted file mode 100644 index 522e52a5b2c..00000000000 --- a/.github/guidelines-check.yml +++ /dev/null @@ -1,57 +0,0 @@ -# -# This file is intentionally in the wrong dir, will move and add later.... -# - -name: Guidelines Check - -on: - # Disabled - uncomment to re-enable - # pull_request_target: - # types: [opened, synchronize] - -jobs: - check-guidelines: - runs-on: ubuntu-latest - permissions: - contents: read - pull-requests: write - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - fetch-depth: 1 - - - name: Install opencode - run: curl -fsSL https://opencode.ai/install | bash - - - name: Check PR guidelines compliance - env: - ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - OPENCODE_PERMISSION: '{ "bash": { "gh*": "allow", "gh pr review*": "deny", "*": "deny" } }' - run: | - opencode run -m anthropic/claude-sonnet-4-20250514 "A new pull request has been created: '${{ github.event.pull_request.title }}' - - - ${{ github.event.pull_request.number }} - - - - ${{ github.event.pull_request.body }} - - - Please check all the code changes in this pull request against the guidelines in AGENTS.md file in this repository. Diffs are important but make sure you read the entire file to get proper context. Make it clear the suggestions are merely suggestions and the human can decide what to do - - Use the gh cli to create comments on the files for the violations. Try to leave the comment on the exact line number. If you have a suggested fix include it in a suggestion code block. - - Command MUST be like this. - ``` - gh api \ - --method POST \ - -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - /repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/comments \ - -f 'body=[summary of issue]' -f 'commit_id=${{ github.event.pull_request.head.sha }}' -f 'path=[path-to-file]' -F "line=[line]" -f 'side=RIGHT' - ``` - - Only create comments for actual violations. If the code follows all guidelines, don't run any gh commands." diff --git a/.github/last-synced-tag b/.github/last-synced-tag index 91dd8d56caa..93e9cce5993 100644 --- a/.github/last-synced-tag +++ b/.github/last-synced-tag @@ -1 +1 @@ -v1.0.129 +v1.0.130 diff --git a/.github/workflows/review.yml b/.github/workflows/review.yml new file mode 100644 index 00000000000..32c7c7b1144 --- /dev/null +++ b/.github/workflows/review.yml @@ -0,0 +1,93 @@ +name: Guidelines Check + +on: + pull_request_target: + types: [opened, ready_for_review] + issue_comment: + types: [created] + +jobs: + check-guidelines: + if: | + (github.event_name == 'pull_request_target' && + github.event.pull_request.draft == false) || + (github.event_name == 'issue_comment' && + github.event.issue.pull_request && + startsWith(github.event.comment.body, '/review')) + runs-on: ubuntu-latest + permissions: + contents: read + pull-requests: write + steps: + - name: Check if user has write permission + if: github.event_name == 'issue_comment' + run: | + PERMISSION=$(gh api /repos/${{ github.repository }}/collaborators/${{ github.event.comment.user.login }}/permission --jq '.permission') + if [[ "$PERMISSION" != "write" && "$PERMISSION" != "admin" ]]; then + echo "User does not have write permission" + exit 1 + fi + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Get PR number + id: pr-number + run: | + if [ "${{ github.event_name }}" = "pull_request_target" ]; then + echo "number=${{ github.event.pull_request.number }}" >> $GITHUB_OUTPUT + else + echo "number=${{ github.event.issue.number }}" >> $GITHUB_OUTPUT + fi + + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 1 + + - name: Install opencode + run: curl -fsSL https://opencode.ai/install | bash + + - name: Get PR details + id: pr-details + run: | + gh api /repos/${{ github.repository }}/pulls/${{ steps.pr-number.outputs.number }} > pr_data.json + echo "title=$(jq -r .title pr_data.json)" >> $GITHUB_OUTPUT + echo "sha=$(jq -r .head.sha pr_data.json)" >> $GITHUB_OUTPUT + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Check PR guidelines compliance + env: + ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + OPENCODE_PERMISSION: '{ "bash": { "gh*": "allow", "gh pr review*": "deny", "*": "deny" } }' + run: | + PR_BODY=$(jq -r .body pr_data.json) + opencode run -m anthropic/claude-sonnet-4-5 "A new pull request has been created: '${{ steps.pr-details.outputs.title }}' + + + ${{ steps.pr-number.outputs.number }} + + + + $PR_BODY + + + Please check all the code changes in this pull request against the style guide, also look for any bugs if they exist. Diffs are important but make sure you read the entire file to get proper context. Make it clear the suggestions are merely suggestions and the human can decide what to do + + When critiquing code against the style guide, be sure that the code is ACTUALLY in violation, don't complain about else statements if they already use early returns there. You may complain about excessive nesting though, regardless of else statement usage. + When critiquing code style don't be a zealot, we don't like "let" statements but sometimes they are the simpliest option, if someone does a bunch of nesting with let, they should consider using iife (see packages/opencode/src/util.iife.ts) + + Use the gh cli to create comments on the files for the violations. Try to leave the comment on the exact line number. If you have a suggested fix include it in a suggestion code block. + + Command MUST be like this. + \`\`\` + gh api \ + --method POST \ + -H \"Accept: application/vnd.github+json\" \ + -H \"X-GitHub-Api-Version: 2022-11-28\" \ + /repos/${{ github.repository }}/pulls/${{ steps.pr-number.outputs.number }}/comments \ + -f 'body=[summary of issue]' -f 'commit_id=${{ steps.pr-details.outputs.sha }}' -f 'path=[path-to-file]' -F \"line=[line]\" -f 'side=RIGHT' + \`\`\` + + Only create comments for actual violations. If the code follows all guidelines, don't run any gh commands." diff --git a/.opencode/opencode.jsonc b/.opencode/opencode.jsonc index ce4a6658bf5..c3416388961 100644 --- a/.opencode/opencode.jsonc +++ b/.opencode/opencode.jsonc @@ -4,6 +4,7 @@ // "enterprise": { // "url": "https://enterprise.dev.opencode.ai", // }, + "instructions": ["STYLE_GUIDE.md"], "provider": { "opencode": { "options": { diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2fc5737d795..6a24995e81a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -42,6 +42,8 @@ Want to take on an issue? Leave a comment and a maintainer may assign it to you > [!NOTE] > After touching `packages/opencode/src/server/server.ts`, run "./packages/sdk/js/script/build.ts" to regenerate the JS sdk. +Please try to follow the [style guide](./STYLE_GUIDE.md) + ### Setting up a Debugger Bun debugging is currently rough around the edges. We hope this guide helps you get set up and avoid some pain points. diff --git a/STATS.md b/STATS.md index a9807ddf55a..25678e915d5 100644 --- a/STATS.md +++ b/STATS.md @@ -158,3 +158,4 @@ | 2025-11-30 | 916,116 (+7,427) | 870,194 (+6,833) | 1,786,310 (+14,260) | | 2025-12-01 | 925,898 (+9,782) | 876,500 (+6,306) | 1,802,398 (+16,088) | | 2025-12-02 | 939,250 (+13,352) | 890,919 (+14,419) | 1,830,169 (+27,771) | +| 2025-12-03 | 952,249 (+12,999) | 903,713 (+12,794) | 1,855,962 (+25,793) | diff --git a/STYLE_GUIDE.md b/STYLE_GUIDE.md new file mode 100644 index 00000000000..164f69bd46c --- /dev/null +++ b/STYLE_GUIDE.md @@ -0,0 +1,12 @@ +## Style Guide + +- Try to keep things in one function unless composable or reusable +- DO NOT do unnecessary destructuring of variables +- DO NOT use `else` statements unless necessary +- DO NOT use `try`/`catch` if it can be avoided +- AVOID `try`/`catch` where possible +- AVOID `else` statements +- AVOID using `any` type +- AVOID `let` statements +- PREFER single word variable names where possible +- Use as many bun apis as possible like Bun.file() diff --git a/bun.lock b/bun.lock index f58f245bab9..77842163e9a 100644 --- a/bun.lock +++ b/bun.lock @@ -20,7 +20,7 @@ }, "packages/console/app": { "name": "@opencode-ai/console-app", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "@cloudflare/vite-plugin": "1.15.2", "@ibm/plex": "6.4.1", @@ -48,7 +48,7 @@ }, "packages/console/core": { "name": "@opencode-ai/console-core", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "@aws-sdk/client-sts": "3.782.0", "@jsx-email/render": "1.1.1", @@ -75,7 +75,7 @@ }, "packages/console/function": { "name": "@opencode-ai/console-function", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "@ai-sdk/anthropic": "2.0.0", "@ai-sdk/openai": "2.0.2", @@ -99,7 +99,7 @@ }, "packages/console/mail": { "name": "@opencode-ai/console-mail", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "@jsx-email/all": "2.2.3", "@jsx-email/cli": "1.4.3", @@ -123,7 +123,7 @@ }, "packages/desktop": { "name": "@opencode-ai/desktop", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "@kobalte/core": "catalog:", "@opencode-ai/sdk": "workspace:*", @@ -164,7 +164,7 @@ }, "packages/enterprise": { "name": "@opencode-ai/enterprise", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "@opencode-ai/ui": "workspace:*", "@opencode-ai/util": "workspace:*", @@ -192,7 +192,7 @@ }, "packages/function": { "name": "@opencode-ai/function", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "@octokit/auth-app": "8.0.1", "@octokit/rest": "22.0.0", @@ -208,7 +208,7 @@ }, "packages/opencode": { "name": "opencode", - "version": "1.0.129", + "version": "1.0.130", "bin": { "opencode": "./bin/opencode", }, @@ -238,8 +238,8 @@ "@opencode-ai/sdk": "workspace:*", "@opencode-ai/util": "workspace:*", "@openrouter/ai-sdk-provider": "1.2.8", - "@opentui/core": "0.1.55", - "@opentui/solid": "0.1.55", + "@opentui/core": "0.1.56", + "@opentui/solid": "0.1.56", "@parcel/watcher": "2.5.1", "@pierre/precision-diffs": "catalog:", "@solid-primitives/event-bus": "1.1.2", @@ -298,7 +298,7 @@ }, "packages/plugin": { "name": "@opencode-ai/plugin", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "@opencode-ai/sdk": "workspace:*", "zod": "catalog:", @@ -318,7 +318,7 @@ }, "packages/sdk/js": { "name": "@opencode-ai/sdk", - "version": "1.0.129", + "version": "1.0.130", "devDependencies": { "@hey-api/openapi-ts": "0.81.0", "@tsconfig/node22": "catalog:", @@ -329,7 +329,7 @@ }, "packages/slack": { "name": "@opencode-ai/slack", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "@opencode-ai/sdk": "workspace:*", "@slack/bolt": "^3.17.1", @@ -342,7 +342,7 @@ }, "packages/tauri": { "name": "@opencode-ai/tauri", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "@tauri-apps/api": "^2", "@tauri-apps/plugin-opener": "^2", @@ -355,7 +355,7 @@ }, "packages/ui": { "name": "@opencode-ai/ui", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "@kobalte/core": "catalog:", "@opencode-ai/sdk": "workspace:*", @@ -387,7 +387,7 @@ }, "packages/util": { "name": "@opencode-ai/util", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "zod": "catalog:", }, @@ -398,7 +398,7 @@ }, "packages/web": { "name": "@opencode-ai/web", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "@astrojs/cloudflare": "12.6.3", "@astrojs/markdown-remark": "6.3.1", @@ -1087,21 +1087,21 @@ "@opentelemetry/api": ["@opentelemetry/api@1.9.0", "", {}, "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg=="], - "@opentui/core": ["@opentui/core@0.1.55", "", { "dependencies": { "bun-ffi-structs": "0.1.2", "diff": "8.0.2", "jimp": "1.6.0", "yoga-layout": "3.2.1" }, "optionalDependencies": { "@dimforge/rapier2d-simd-compat": "^0.17.3", "@opentui/core-darwin-arm64": "0.1.55", "@opentui/core-darwin-x64": "0.1.55", "@opentui/core-linux-arm64": "0.1.55", "@opentui/core-linux-x64": "0.1.55", "@opentui/core-win32-arm64": "0.1.55", "@opentui/core-win32-x64": "0.1.55", "bun-webgpu": "0.1.4", "planck": "^1.4.2", "three": "0.177.0" }, "peerDependencies": { "web-tree-sitter": "0.25.10" } }, "sha512-WafOO8eMf1/fmlFUfgooWyWJclQG5X/72VUebH+jN6/kSoSb91XJxHQgaKL9CQYFBNBIApQhAZn/sF9Qt60+lQ=="], + "@opentui/core": ["@opentui/core@0.1.56", "", { "dependencies": { "bun-ffi-structs": "0.1.2", "diff": "8.0.2", "jimp": "1.6.0", "yoga-layout": "3.2.1" }, "optionalDependencies": { "@dimforge/rapier2d-simd-compat": "^0.17.3", "@opentui/core-darwin-arm64": "0.1.56", "@opentui/core-darwin-x64": "0.1.56", "@opentui/core-linux-arm64": "0.1.56", "@opentui/core-linux-x64": "0.1.56", "@opentui/core-win32-arm64": "0.1.56", "@opentui/core-win32-x64": "0.1.56", "bun-webgpu": "0.1.4", "planck": "^1.4.2", "three": "0.177.0" }, "peerDependencies": { "web-tree-sitter": "0.25.10" } }, "sha512-TI5cSCPYythHIQYpAEdXyZhewGACn2TfnfC1qZmrSyEq33zFo4W7zpQ4EZNpy9xZJFCI+elAUVJFARwhudp9EQ=="], - "@opentui/core-darwin-arm64": ["@opentui/core-darwin-arm64@0.1.55", "", { "os": "darwin", "cpu": "arm64" }, "sha512-z2Prd/KKUbhPaSGBFv2q0nDtiLB/5oI3sGFDgf+YAfs6M6UfP9n0XkPUupbE1dx4lMyvwA9X8/QUnsQApd3E2g=="], + "@opentui/core-darwin-arm64": ["@opentui/core-darwin-arm64@0.1.56", "", { "os": "darwin", "cpu": "arm64" }, "sha512-x5U9J2k1Fmbb9Mdh1nOd/yZVpg4ARCrV5pFngpaeKrIWDhs8RLpQW3ap+r7uyFLGFkSn4h5wBR0jj6Dg+Tyw+A=="], - "@opentui/core-darwin-x64": ["@opentui/core-darwin-x64@0.1.55", "", { "os": "darwin", "cpu": "x64" }, "sha512-zjgGmIaTCWUvvQ9vIHJ0ypTkuFIA4ykKiZ16QxpG930bPr9fJ1xZ8MYj+6WSyuiao7tm6iWQfuYKT3tzA8+ItQ=="], + "@opentui/core-darwin-x64": ["@opentui/core-darwin-x64@0.1.56", "", { "os": "darwin", "cpu": "x64" }, "sha512-7swq9rV/SaNVBWoUbC7mlP1VNyKBl7SSwmyVMkcaBP71lkm95zWuh4pgGj82fLgZ9gITRBD95TJVDmTovOyW0A=="], - "@opentui/core-linux-arm64": ["@opentui/core-linux-arm64@0.1.55", "", { "os": "linux", "cpu": "arm64" }, "sha512-77EZtLxH0VW/Kw+6kTs9FrFWfhjaIjsK/o39DAWM1ZNdFDTXAa/MQNOFDlBXbNHiNqPOyxd0tol1nUFLr8ZtZg=="], + "@opentui/core-linux-arm64": ["@opentui/core-linux-arm64@0.1.56", "", { "os": "linux", "cpu": "arm64" }, "sha512-v8b+kiTlynAJzR0hFeVpGFzVi5PGqXAe3Zql9iTiQqTExkm/sR34sfC/P6rBOUhuAnos8ovPDKWtDb6eCTSm9g=="], - "@opentui/core-linux-x64": ["@opentui/core-linux-x64@0.1.55", "", { "os": "linux", "cpu": "x64" }, "sha512-o4RB1jqKWx4TM9v2trGUij6H2ymJCxID8BK3HWvRIjd71tpKkaMY4SxaMWGzvK89X40u8v9qKE04dileKNa10w=="], + "@opentui/core-linux-x64": ["@opentui/core-linux-x64@0.1.56", "", { "os": "linux", "cpu": "x64" }, "sha512-lbxgvAi5SBswK/2hoMPtLhPvJxASgquPUwvGTRHqzDkCvrOChP/loTjBQpL09/nAFc3jbM3SAbZtnEgA2SGYVw=="], - "@opentui/core-win32-arm64": ["@opentui/core-win32-arm64@0.1.55", "", { "os": "win32", "cpu": "arm64" }, "sha512-SYna371ZcQme6XjGI2ESHM2uMUZQRi9kgtJj5E22uH4wgBpPWFwf83EGWv78v+irvsypR+ZJgVfkwkz6JjgVTQ=="], + "@opentui/core-win32-arm64": ["@opentui/core-win32-arm64@0.1.56", "", { "os": "win32", "cpu": "arm64" }, "sha512-RoCAbvDo+59OevX+6GrEGbaueERiBVnTaWJkrS41hRAD2fFS3CZpW7UuS5jIg7zn5clHmOGyfvCiBkTRXmgkhw=="], - "@opentui/core-win32-x64": ["@opentui/core-win32-x64@0.1.55", "", { "os": "win32", "cpu": "x64" }, "sha512-ViPBCm+EtZ/4NmLqLAxcz31lVYGCe1ily+YmfAkoq1K/iuiXGhtD3mDrQuc3ayfTT8w5UwiYKyrUibJVJ/noVQ=="], + "@opentui/core-win32-x64": ["@opentui/core-win32-x64@0.1.56", "", { "os": "win32", "cpu": "x64" }, "sha512-i6N5TjZU5gRkJsKmH8e/qY9vwSk0rh6A5t37mHDGlzN4E5yO/MbBrYH4ppLp5stps9Zfi1Re51ofJX1s2hZY/Q=="], - "@opentui/solid": ["@opentui/solid@0.1.55", "", { "dependencies": { "@babel/core": "7.28.0", "@babel/preset-typescript": "7.27.1", "@opentui/core": "0.1.55", "babel-plugin-module-resolver": "5.0.2", "babel-preset-solid": "1.9.9", "s-js": "^0.4.9" }, "peerDependencies": { "solid-js": "1.9.9" } }, "sha512-6NWOt0k32tu5KCiddLkPjVNWd++vW3QNbEccuGOdSiotO5TuwK4g0rcLAG6haPOB7Mf/l6aC06FQNeexpBqvtQ=="], + "@opentui/solid": ["@opentui/solid@0.1.56", "", { "dependencies": { "@babel/core": "7.28.0", "@babel/preset-typescript": "7.27.1", "@opentui/core": "0.1.56", "babel-plugin-module-resolver": "5.0.2", "babel-preset-solid": "1.9.9", "s-js": "^0.4.9" }, "peerDependencies": { "solid-js": "1.9.9" } }, "sha512-3R7AfxsYHUyehwJK98rt5dI9u2WCT/uH/CYvddZIgXPHyfFm1SHJekMdy3DUoiQTCUllt68eFGKMv9zRi6Laww=="], "@oslojs/asn1": ["@oslojs/asn1@1.0.0", "", { "dependencies": { "@oslojs/binary": "1.0.0" } }, "sha512-zw/wn0sj0j0QKbIXfIlnEcTviaCzYOY3V5rAyjR6YtOByFtJiT574+8p9Wlach0lZH9fddD4yb9laEAIl4vXQA=="], diff --git a/flake.lock b/flake.lock index 45c31d9ccf2..4e7cf41e1b7 100644 --- a/flake.lock +++ b/flake.lock @@ -2,11 +2,11 @@ "nodes": { "nixpkgs": { "locked": { - "lastModified": 1764642553, - "narHash": "sha256-mvbFFzVBhVK1FjyPHZGMAKpNiqkr7k++xIwy+p/NQvA=", + "lastModified": 1764733908, + "narHash": "sha256-QJiih52NU+nm7XQWCj+K8SwUdIEayDQ1FQgjkYISt4I=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "f720de59066162ee879adcc8c79e15c51fe6bfb4", + "rev": "cadcc8de247676e4751c9d4a935acb2c0b059113", "type": "github" }, "original": { diff --git a/nix/hashes.json b/nix/hashes.json index 7c7fc45f63e..47634e2ed82 100644 --- a/nix/hashes.json +++ b/nix/hashes.json @@ -1,3 +1,3 @@ { - "nodeModules": "sha256-QhqAa47P3Y2aoMGnr8l1nLq0EQb4qEm75dGfNjyzbpU=" + "nodeModules": "sha256-ZGKC7h4ScHDzVYj8qb1lN/weZhyZivPS8kpNAZvgO0I=" } diff --git a/packages/console/app/package.json b/packages/console/app/package.json index 6db271e24b1..663b1e160da 100644 --- a/packages/console/app/package.json +++ b/packages/console/app/package.json @@ -1,6 +1,6 @@ { "name": "@opencode-ai/console-app", - "version": "1.0.129", + "version": "1.0.130", "type": "module", "scripts": { "typecheck": "tsgo --noEmit", diff --git a/packages/console/app/src/routes/workspace/[id]/graph-section.tsx b/packages/console/app/src/routes/workspace/[id]/graph-section.tsx index b13309d3d85..46418d61880 100644 --- a/packages/console/app/src/routes/workspace/[id]/graph-section.tsx +++ b/packages/console/app/src/routes/workspace/[id]/graph-section.tsx @@ -158,9 +158,24 @@ export function GraphSection() { model: null as string | null, modelDropdownOpen: false, keyDropdownOpen: false, + colorScheme: "light" as "light" | "dark", }) const initialData = createAsync(() => queryCosts(params.id!, store.year, store.month)) + createEffect(() => { + if (typeof window === "undefined") return + + const mediaQuery = window.matchMedia("(prefers-color-scheme: dark)") + setStore({ colorScheme: mediaQuery.matches ? "dark" : "light" }) + + const handleColorSchemeChange = (e: MediaQueryListEvent) => { + setStore({ colorScheme: e.matches ? "dark" : "light" }) + } + + mediaQuery.addEventListener("change", handleColorSchemeChange) + onCleanup(() => mediaQuery.removeEventListener("change", handleColorSchemeChange)) + }) + const onPreviousMonth = async () => { const month = store.month === 0 ? 11 : store.month - 1 const year = store.month === 0 ? store.year - 1 : store.year @@ -206,10 +221,21 @@ export function GraphSection() { const isCurrentMonth = () => store.year === now.getFullYear() && store.month === now.getMonth() const chartConfig = createMemo((): ChartConfiguration | null => { + if (typeof window === "undefined") return null + const data = getData() const dates = getDates() if (!data?.usage?.length) return null + store.colorScheme + const styles = getComputedStyle(document.documentElement) + const colorTextMuted = styles.getPropertyValue("--color-text-muted").trim() + const colorBorderMuted = styles.getPropertyValue("--color-border-muted").trim() + const colorBgElevated = styles.getPropertyValue("--color-bg-elevated").trim() + const colorText = styles.getPropertyValue("--color-text").trim() + const colorTextSecondary = styles.getPropertyValue("--color-text-secondary").trim() + const colorBorder = styles.getPropertyValue("--color-border").trim() + const dailyData = new Map>() for (const dateKey of dates) dailyData.set(dateKey, new Map()) @@ -252,7 +278,7 @@ export function GraphSection() { ticks: { maxRotation: 0, autoSkipPadding: 20, - color: "rgba(255, 255, 255, 0.5)", + color: colorTextMuted, font: { family: "monospace", size: 11, @@ -263,10 +289,10 @@ export function GraphSection() { stacked: true, beginAtZero: true, grid: { - color: "rgba(255, 255, 255, 0.1)", + color: colorBorderMuted, }, ticks: { - color: "rgba(255, 255, 255, 0.5)", + color: colorTextMuted, font: { family: "monospace", size: 11, @@ -282,10 +308,10 @@ export function GraphSection() { tooltip: { mode: "index", intersect: false, - backgroundColor: "rgba(0, 0, 0, 0.9)", - titleColor: "rgba(255, 255, 255, 0.9)", - bodyColor: "rgba(255, 255, 255, 0.8)", - borderColor: "rgba(255, 255, 255, 0.1)", + backgroundColor: colorBgElevated, + titleColor: colorText, + bodyColor: colorTextSecondary, + borderColor: colorBorder, borderWidth: 1, padding: 12, displayColors: true, @@ -301,7 +327,7 @@ export function GraphSection() { display: true, position: "bottom", labels: { - color: "rgba(255, 255, 255, 0.7)", + color: colorTextSecondary, font: { size: 12, }, diff --git a/packages/console/core/package.json b/packages/console/core/package.json index 69f4d14c352..c0125fd7d83 100644 --- a/packages/console/core/package.json +++ b/packages/console/core/package.json @@ -1,7 +1,7 @@ { "$schema": "https://json.schemastore.org/package.json", "name": "@opencode-ai/console-core", - "version": "1.0.129", + "version": "1.0.130", "private": true, "type": "module", "dependencies": { diff --git a/packages/console/function/package.json b/packages/console/function/package.json index f175147e601..df3c78bfcb1 100644 --- a/packages/console/function/package.json +++ b/packages/console/function/package.json @@ -1,6 +1,6 @@ { "name": "@opencode-ai/console-function", - "version": "1.0.129", + "version": "1.0.130", "$schema": "https://json.schemastore.org/package.json", "private": true, "type": "module", diff --git a/packages/console/mail/package.json b/packages/console/mail/package.json index 5c9505cc903..bd28176e323 100644 --- a/packages/console/mail/package.json +++ b/packages/console/mail/package.json @@ -1,6 +1,6 @@ { "name": "@opencode-ai/console-mail", - "version": "1.0.129", + "version": "1.0.130", "dependencies": { "@jsx-email/all": "2.2.3", "@jsx-email/cli": "1.4.3", diff --git a/packages/desktop/package.json b/packages/desktop/package.json index 4b797f62af6..367e92b6616 100644 --- a/packages/desktop/package.json +++ b/packages/desktop/package.json @@ -1,6 +1,6 @@ { "name": "@opencode-ai/desktop", - "version": "1.0.129", + "version": "1.0.130", "description": "", "type": "module", "scripts": { diff --git a/packages/desktop/src/components/prompt-input.tsx b/packages/desktop/src/components/prompt-input.tsx index 97692422370..a311ae76385 100644 --- a/packages/desktop/src/components/prompt-input.tsx +++ b/packages/desktop/src/components/prompt-input.tsx @@ -456,9 +456,9 @@ export const PromptInput: Component = (props) => {
{i.name} - + - {DateTime.fromFormat(i.release_date, "yyyy-MM-dd").toFormat("LLL yyyy")} + {DateTime.fromFormat("unknown", "yyyy-MM-dd").toFormat("LLL yyyy")}
diff --git a/packages/enterprise/package.json b/packages/enterprise/package.json index 96c973b66f9..dd422ca892f 100644 --- a/packages/enterprise/package.json +++ b/packages/enterprise/package.json @@ -1,6 +1,6 @@ { "name": "@opencode-ai/enterprise", - "version": "1.0.129", + "version": "1.0.130", "private": true, "type": "module", "scripts": { diff --git a/packages/extensions/zed/extension.toml b/packages/extensions/zed/extension.toml index 1a9788adb30..7e3b9056848 100644 --- a/packages/extensions/zed/extension.toml +++ b/packages/extensions/zed/extension.toml @@ -1,7 +1,7 @@ id = "opencode" name = "OpenCode" description = "The AI coding agent built for the terminal" -version = "1.0.129" +version = "1.0.130" schema_version = 1 authors = ["Anomaly"] repository = "https://github.com/sst/opencode" @@ -11,26 +11,26 @@ name = "OpenCode" icon = "./icons/opencode.svg" [agent_servers.opencode.targets.darwin-aarch64] -archive = "https://github.com/sst/opencode/releases/download/v1.0.129/opencode-darwin-arm64.zip" +archive = "https://github.com/sst/opencode/releases/download/v1.0.130/opencode-darwin-arm64.zip" cmd = "./opencode" args = ["acp"] [agent_servers.opencode.targets.darwin-x86_64] -archive = "https://github.com/sst/opencode/releases/download/v1.0.129/opencode-darwin-x64.zip" +archive = "https://github.com/sst/opencode/releases/download/v1.0.130/opencode-darwin-x64.zip" cmd = "./opencode" args = ["acp"] [agent_servers.opencode.targets.linux-aarch64] -archive = "https://github.com/sst/opencode/releases/download/v1.0.129/opencode-linux-arm64.zip" +archive = "https://github.com/sst/opencode/releases/download/v1.0.130/opencode-linux-arm64.zip" cmd = "./opencode" args = ["acp"] [agent_servers.opencode.targets.linux-x86_64] -archive = "https://github.com/sst/opencode/releases/download/v1.0.129/opencode-linux-x64.zip" +archive = "https://github.com/sst/opencode/releases/download/v1.0.130/opencode-linux-x64.zip" cmd = "./opencode" args = ["acp"] [agent_servers.opencode.targets.windows-x86_64] -archive = "https://github.com/sst/opencode/releases/download/v1.0.129/opencode-windows-x64.zip" +archive = "https://github.com/sst/opencode/releases/download/v1.0.130/opencode-windows-x64.zip" cmd = "./opencode.exe" args = ["acp"] diff --git a/packages/function/package.json b/packages/function/package.json index 7c886f44d86..e94d8cd3d13 100644 --- a/packages/function/package.json +++ b/packages/function/package.json @@ -1,6 +1,6 @@ { "name": "@opencode-ai/function", - "version": "1.0.129", + "version": "1.0.130", "$schema": "https://json.schemastore.org/package.json", "private": true, "type": "module", diff --git a/packages/opencode/package.json b/packages/opencode/package.json index 7c2a7f50d54..74d7597497c 100644 --- a/packages/opencode/package.json +++ b/packages/opencode/package.json @@ -1,6 +1,6 @@ { "$schema": "https://json.schemastore.org/package.json", - "version": "1.0.129", + "version": "1.0.130", "name": "opencode", "type": "module", "private": true, @@ -64,8 +64,8 @@ "@opencode-ai/sdk": "workspace:*", "@opencode-ai/util": "workspace:*", "@openrouter/ai-sdk-provider": "1.2.8", - "@opentui/core": "0.1.55", - "@opentui/solid": "0.1.55", + "@opentui/core": "0.1.56", + "@opentui/solid": "0.1.56", "@parcel/watcher": "2.5.1", "@pierre/precision-diffs": "catalog:", "@solid-primitives/event-bus": "1.1.2", diff --git a/packages/opencode/script/build.ts b/packages/opencode/script/build.ts index b72270aaaf4..6cb7173701f 100755 --- a/packages/opencode/script/build.ts +++ b/packages/opencode/script/build.ts @@ -16,6 +16,7 @@ import pkg from "../package.json" import { Script } from "@opencode-ai/script" const singleFlag = process.argv.includes("--single") +const skipInstall = process.argv.includes("--skip-install") const allTargets: { os: string @@ -83,8 +84,10 @@ const targets = singleFlag await $`rm -rf dist` const binaries: Record = {} -await $`bun install --os="*" --cpu="*" @opentui/core@${pkg.dependencies["@opentui/core"]}` -await $`bun install --os="*" --cpu="*" @parcel/watcher@${pkg.dependencies["@parcel/watcher"]}` +if (!skipInstall) { + await $`bun install --os="*" --cpu="*" @opentui/core@${pkg.dependencies["@opentui/core"]}` + await $`bun install --os="*" --cpu="*" @parcel/watcher@${pkg.dependencies["@parcel/watcher"]}` +} for (const item of targets) { const name = [ "shuvcode", // Use shuvcode prefix for binary packages @@ -104,6 +107,10 @@ for (const item of targets) { const parserWorker = fs.realpathSync(path.resolve(dir, "./node_modules/@opentui/core/parser.worker.js")) const workerPath = "./src/cli/cmd/tui/worker.ts" + // Use platform-specific bunfs root path based on target OS + const bunfsRoot = item.os === "win32" ? "B:/~BUN/root/" : "/$bunfs/root/" + const workerRelativePath = path.relative(dir, parserWorker).replaceAll("\\", "/") + await Bun.build({ conditions: ["browser"], tsconfig: "./tsconfig.json", @@ -121,7 +128,7 @@ for (const item of targets) { define: { OPENCODE_VERSION: `'${Script.version}'`, OPENCODE_BASE_VERSION: `'${pkg.version}'`, - OTUI_TREE_SITTER_WORKER_PATH: "/$bunfs/root/" + path.relative(dir, parserWorker).replaceAll("\\", "/"), + OTUI_TREE_SITTER_WORKER_PATH: bunfsRoot + workerRelativePath, OPENCODE_WORKER_PATH: workerPath, OPENCODE_CHANNEL: `'${Script.channel}'`, }, diff --git a/packages/opencode/src/agent/agent.ts b/packages/opencode/src/agent/agent.ts index 1e37bcb7369..10387aaa49f 100644 --- a/packages/opencode/src/agent/agent.ts +++ b/packages/opencode/src/agent/agent.ts @@ -248,6 +248,7 @@ export namespace Agent { export async function generate(input: { description: string }) { const defaultModel = await Provider.defaultModel() const model = await Provider.getModel(defaultModel.providerID, defaultModel.modelID) + const language = await Provider.getLanguage(model) const system = SystemPrompt.header(defaultModel.providerID) system.push(PROMPT_GENERATE) const existing = await list() @@ -265,7 +266,7 @@ export namespace Agent { content: `Create an agent configuration based on this request: \"${input.description}\".\n\nIMPORTANT: The following identifiers already exist and must NOT be used: ${existing.map((i) => i.name).join(", ")}\n Return ONLY the JSON object, no other text, do not wrap in backticks`, }, ], - model: model.language, + model: language, schema: z.object({ identifier: z.string(), whenToUse: z.string(), diff --git a/packages/opencode/src/cli/cmd/models.ts b/packages/opencode/src/cli/cmd/models.ts index 1ae4ae12ca9..156dae91c67 100644 --- a/packages/opencode/src/cli/cmd/models.ts +++ b/packages/opencode/src/cli/cmd/models.ts @@ -38,7 +38,7 @@ export const ModelsCommand = cmd({ function printModels(providerID: string, verbose?: boolean) { const provider = providers[providerID] - const sortedModels = Object.entries(provider.info.models).sort(([a], [b]) => a.localeCompare(b)) + const sortedModels = Object.entries(provider.models).sort(([a], [b]) => a.localeCompare(b)) for (const [modelID, model] of sortedModels) { process.stdout.write(`${providerID}/${modelID}`) process.stdout.write(EOL) diff --git a/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx b/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx index 38a399848af..016172333ca 100644 --- a/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx +++ b/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx @@ -650,6 +650,7 @@ export function Session() { { title: showDetails() ? "Hide tool details" : "Show tool details", value: "session.toggle.actions", + keybind: "tool_details", category: "Session", onSelect: (dialog) => { const newValue = !showDetails() @@ -724,6 +725,37 @@ export function Session() { dialog.clear() }, }, + { + title: "Jump to last user message", + value: "session.messages_last_user", + keybind: "messages_last_user", + category: "Session", + onSelect: () => { + const messages = sync.data.message[route.sessionID] + if (!messages || !messages.length) return + + // Find the most recent user message with non-ignored, non-synthetic text parts + for (let i = messages.length - 1; i >= 0; i--) { + const message = messages[i] + if (!message || message.role !== "user") continue + + const parts = sync.data.part[message.id] + if (!parts || !Array.isArray(parts)) continue + + const hasValidTextPart = parts.some( + (part) => part && part.type === "text" && !part.synthetic && !part.ignored, + ) + + if (hasValidTextPart) { + const child = scroll.getChildren().find((child) => { + return child.id === message.id + }) + if (child) scroll.scrollBy(child.y - scroll.y - 1) + break + } + } + }, + }, { title: "Copy last assistant message", value: "messages.copy", diff --git a/packages/opencode/src/config/config.ts b/packages/opencode/src/config/config.ts index 2ee32fbaf61..786d0e7a75b 100644 --- a/packages/opencode/src/config/config.ts +++ b/packages/opencode/src/config/config.ts @@ -419,6 +419,7 @@ export namespace Config { .describe("Scroll messages down by half page"), messages_first: z.string().optional().default("ctrl+g,home").describe("Navigate to first message"), messages_last: z.string().optional().default("ctrl+alt+g,end").describe("Navigate to last message"), + messages_last_user: z.string().optional().default("none").describe("Navigate to last user message"), messages_copy: z.string().optional().default("y").describe("Copy message"), messages_undo: z.string().optional().default("u").describe("Undo message"), messages_redo: z.string().optional().default("r").describe("Redo message"), @@ -427,6 +428,7 @@ export namespace Config { .optional() .default("h") .describe("Toggle code block concealment in messages"), + tool_details: z.string().optional().default("none").describe("Toggle tool details visibility"), model_list: z.string().optional().default("m").describe("List available models"), model_cycle_recent: z.string().optional().default("f2").describe("Next recently used model"), model_cycle_recent_reverse: z.string().optional().default("shift+f2").describe("Previous recently used model"), @@ -470,6 +472,42 @@ export namespace Config { }) export type Layout = z.infer + export const Provider = ModelsDev.Provider.partial() + .extend({ + whitelist: z.array(z.string()).optional(), + blacklist: z.array(z.string()).optional(), + models: z.record(z.string(), ModelsDev.Model.partial()).optional(), + options: z + .object({ + apiKey: z.string().optional(), + baseURL: z.string().optional(), + enterpriseUrl: z.string().optional().describe("GitHub Enterprise URL for copilot authentication"), + setCacheKey: z.boolean().optional().describe("Enable promptCacheKey for this provider (default false)"), + timeout: z + .union([ + z + .number() + .int() + .positive() + .describe( + "Timeout in milliseconds for requests to this provider. Default is 300000 (5 minutes). Set to false to disable timeout.", + ), + z.literal(false).describe("Disable timeout for this provider entirely."), + ]) + .optional() + .describe( + "Timeout in milliseconds for requests to this provider. Default is 300000 (5 minutes). Set to false to disable timeout.", + ), + }) + .catchall(z.any()) + .optional(), + }) + .strict() + .meta({ + ref: "ProviderConfig", + }) + export type Provider = z.infer + export const Info = z .object({ $schema: z.string().optional().describe("JSON schema reference for configuration validation"), @@ -536,43 +574,7 @@ export namespace Config { .optional() .describe("Agent configuration, see https://opencode.ai/docs/agent"), provider: z - .record( - z.string(), - ModelsDev.Provider.partial() - .extend({ - whitelist: z.array(z.string()).optional(), - blacklist: z.array(z.string()).optional(), - models: z.record(z.string(), ModelsDev.Model.partial()).optional(), - options: z - .object({ - apiKey: z.string().optional(), - baseURL: z.string().optional(), - enterpriseUrl: z.string().optional().describe("GitHub Enterprise URL for copilot authentication"), - setCacheKey: z - .boolean() - .optional() - .describe("Enable promptCacheKey for this provider (default false)"), - timeout: z - .union([ - z - .number() - .int() - .positive() - .describe( - "Timeout in milliseconds for requests to this provider. Default is 300000 (5 minutes). Set to false to disable timeout.", - ), - z.literal(false).describe("Disable timeout for this provider entirely."), - ]) - .optional() - .describe( - "Timeout in milliseconds for requests to this provider. Default is 300000 (5 minutes). Set to false to disable timeout.", - ), - }) - .catchall(z.any()) - .optional(), - }) - .strict(), - ) + .record(z.string(), Provider) .optional() .describe("Custom provider configurations and model overrides"), mcp: z.record(z.string(), Mcp).optional().describe("MCP (Model Context Protocol) server configurations"), @@ -670,6 +672,10 @@ export namespace Config { chatMaxRetries: z.number().optional().describe("Number of retries for chat completions on failure"), disable_paste_summary: z.boolean().optional(), batch_tool: z.boolean().optional().describe("Enable the batch tool"), + primary_tools: z + .array(z.string()) + .optional() + .describe("Tools that should only be available to primary agents."), }) .optional(), }) diff --git a/packages/opencode/src/env/index.ts b/packages/opencode/src/env/index.ts new file mode 100644 index 00000000000..56a8c921f1e --- /dev/null +++ b/packages/opencode/src/env/index.ts @@ -0,0 +1,26 @@ +import { Instance } from "../project/instance" + +export namespace Env { + const state = Instance.state(() => { + return { ...process.env } as Record + }) + + export function get(key: string) { + const env = state() + return env[key] + } + + export function all() { + return state() + } + + export function set(key: string, value: string) { + const env = state() + env[key] = value + } + + export function remove(key: string) { + const env = state() + delete env[key] + } +} diff --git a/packages/opencode/src/provider/models.ts b/packages/opencode/src/provider/models.ts index 676837e1521..3d28787c88f 100644 --- a/packages/opencode/src/provider/models.ts +++ b/packages/opencode/src/provider/models.ts @@ -9,16 +9,16 @@ export namespace ModelsDev { const log = Log.create({ service: "models.dev" }) const filepath = path.join(Global.Path.cache, "models.json") - export const Model = z - .object({ - id: z.string(), - name: z.string(), - release_date: z.string(), - attachment: z.boolean(), - reasoning: z.boolean(), - temperature: z.boolean(), - tool_call: z.boolean(), - cost: z.object({ + export const Model = z.object({ + id: z.string(), + name: z.string(), + release_date: z.string(), + attachment: z.boolean(), + reasoning: z.boolean(), + temperature: z.boolean(), + tool_call: z.boolean(), + cost: z + .object({ input: z.number(), output: z.number(), cache_read: z.number().optional(), @@ -31,40 +31,34 @@ export namespace ModelsDev { cache_write: z.number().optional(), }) .optional(), - }), - limit: z.object({ - context: z.number(), - output: z.number(), - }), - modalities: z - .object({ - input: z.array(z.enum(["text", "audio", "image", "video", "pdf"])), - output: z.array(z.enum(["text", "audio", "image", "video", "pdf"])), - }) - .optional(), - experimental: z.boolean().optional(), - status: z.enum(["alpha", "beta", "deprecated"]).optional(), - options: z.record(z.string(), z.any()), - headers: z.record(z.string(), z.string()).optional(), - provider: z.object({ npm: z.string() }).optional(), - }) - .meta({ - ref: "Model", - }) + }) + .optional(), + limit: z.object({ + context: z.number(), + output: z.number(), + }), + modalities: z + .object({ + input: z.array(z.enum(["text", "audio", "image", "video", "pdf"])), + output: z.array(z.enum(["text", "audio", "image", "video", "pdf"])), + }) + .optional(), + experimental: z.boolean().optional(), + status: z.enum(["alpha", "beta", "deprecated"]).optional(), + options: z.record(z.string(), z.any()), + headers: z.record(z.string(), z.string()).optional(), + provider: z.object({ npm: z.string() }).optional(), + }) export type Model = z.infer - export const Provider = z - .object({ - api: z.string().optional(), - name: z.string(), - env: z.array(z.string()), - id: z.string(), - npm: z.string().optional(), - models: z.record(z.string(), Model), - }) - .meta({ - ref: "Provider", - }) + export const Provider = z.object({ + api: z.string().optional(), + name: z.string(), + env: z.array(z.string()), + id: z.string(), + npm: z.string().optional(), + models: z.record(z.string(), Model), + }) export type Provider = z.infer diff --git a/packages/opencode/src/provider/provider.ts b/packages/opencode/src/provider/provider.ts index fef4677bc28..2df4bc96b99 100644 --- a/packages/opencode/src/provider/provider.ts +++ b/packages/opencode/src/provider/provider.ts @@ -1,14 +1,15 @@ import z from "zod" import fuzzysort from "fuzzysort" import { Config } from "../config/config" -import { mergeDeep, sortBy } from "remeda" -import { NoSuchModelError, type LanguageModel, type Provider as SDK } from "ai" +import { mapValues, mergeDeep, sortBy } from "remeda" +import { NoSuchModelError, type Provider as SDK } from "ai" import { Log } from "../util/log" import { BunProc } from "../bun" import { Plugin } from "../plugin" import { ModelsDev } from "./models" import { NamedError } from "@opencode-ai/util/error" import { Auth } from "../auth" +import { Env } from "../env" import { Instance } from "../project/instance" import { Flag } from "../flag/flag" import { iife } from "@/util/iife" @@ -22,7 +23,7 @@ import { createVertex } from "@ai-sdk/google-vertex" import { createVertexAnthropic } from "@ai-sdk/google-vertex/anthropic" import { createOpenAI } from "@ai-sdk/openai" import { createOpenAICompatible } from "@ai-sdk/openai-compatible" -import { createOpenRouter } from "@openrouter/ai-sdk-provider" +import { createOpenRouter, type LanguageModelV2 } from "@openrouter/ai-sdk-provider" import { createOpenaiCompatible as createGitHubCopilotOpenAICompatible } from "./sdk/openai-compatible/src" export namespace Provider { @@ -42,14 +43,13 @@ export namespace Provider { "@ai-sdk/github-copilot": createGitHubCopilotOpenAICompatible, } - type CustomLoader = (provider: ModelsDev.Provider) => Promise<{ + type CustomModelLoader = (sdk: any, modelID: string, options?: Record) => Promise + type CustomLoader = (provider: Info) => Promise<{ autoload: boolean - getModel?: (sdk: any, modelID: string, options?: Record) => Promise + getModel?: CustomModelLoader options?: Record }> - type Source = "env" | "config" | "custom" | "api" - const CUSTOM_LOADERS: Record = { async anthropic() { return { @@ -64,7 +64,8 @@ export namespace Provider { }, async opencode(input) { const hasKey = await (async () => { - if (input.env.some((item) => process.env[item])) return true + const env = Env.all() + if (input.env.some((item) => env[item])) return true if (await Auth.get(input.id)) return true return false })() @@ -128,7 +129,7 @@ export namespace Provider { } }, "azure-cognitive-services": async () => { - const resourceName = process.env["AZURE_COGNITIVE_SERVICES_RESOURCE_NAME"] + const resourceName = Env.get("AZURE_COGNITIVE_SERVICES_RESOURCE_NAME") return { autoload: false, async getModel(sdk: any, modelID: string, options?: Record) { @@ -144,10 +145,15 @@ export namespace Provider { } }, "amazon-bedrock": async () => { - if (!process.env["AWS_PROFILE"] && !process.env["AWS_ACCESS_KEY_ID"] && !process.env["AWS_BEARER_TOKEN_BEDROCK"]) - return { autoload: false } + const [awsProfile, awsAccessKeyId, awsBearerToken, awsRegion] = await Promise.all([ + Env.get("AWS_PROFILE"), + Env.get("AWS_ACCESS_KEY_ID"), + Env.get("AWS_BEARER_TOKEN_BEDROCK"), + Env.get("AWS_REGION"), + ]) + if (!awsProfile && !awsAccessKeyId && !awsBearerToken) return { autoload: false } - const region = process.env["AWS_REGION"] ?? "us-east-1" + const region = awsRegion ?? "us-east-1" const { fromNodeProviderChain } = await import(await BunProc.install("@aws-sdk/credential-providers")) return { @@ -246,8 +252,8 @@ export namespace Provider { } }, "google-vertex": async () => { - const project = process.env["GOOGLE_CLOUD_PROJECT"] ?? process.env["GCP_PROJECT"] ?? process.env["GCLOUD_PROJECT"] - const location = process.env["GOOGLE_CLOUD_LOCATION"] ?? process.env["VERTEX_LOCATION"] ?? "us-east5" + const project = Env.get("GOOGLE_CLOUD_PROJECT") ?? Env.get("GCP_PROJECT") ?? Env.get("GCLOUD_PROJECT") + const location = Env.get("GOOGLE_CLOUD_LOCATION") ?? Env.get("VERTEX_LOCATION") ?? "us-east5" const autoload = Boolean(project) if (!autoload) return { autoload: false } return { @@ -263,8 +269,8 @@ export namespace Provider { } }, "google-vertex-anthropic": async () => { - const project = process.env["GOOGLE_CLOUD_PROJECT"] ?? process.env["GCP_PROJECT"] ?? process.env["GCLOUD_PROJECT"] - const location = process.env["GOOGLE_CLOUD_LOCATION"] ?? process.env["VERTEX_LOCATION"] ?? "global" + const project = Env.get("GOOGLE_CLOUD_PROJECT") ?? Env.get("GCP_PROJECT") ?? Env.get("GCLOUD_PROJECT") + const location = Env.get("GOOGLE_CLOUD_LOCATION") ?? Env.get("VERTEX_LOCATION") ?? "global" const autoload = Boolean(project) if (!autoload) return { autoload: false } return { @@ -273,7 +279,7 @@ export namespace Provider { project, location, }, - async getModel(sdk: any, modelID: string) { + async getModel(sdk, modelID) { const id = String(modelID).trim() return sdk.languageModel(id) }, @@ -292,10 +298,155 @@ export namespace Provider { }, } + export const Model = z + .object({ + id: z.string(), + providerID: z.string(), + api: z.object({ + id: z.string(), + url: z.string(), + npm: z.string(), + }), + name: z.string(), + capabilities: z.object({ + temperature: z.boolean(), + reasoning: z.boolean(), + attachment: z.boolean(), + toolcall: z.boolean(), + input: z.object({ + text: z.boolean(), + audio: z.boolean(), + image: z.boolean(), + video: z.boolean(), + pdf: z.boolean(), + }), + output: z.object({ + text: z.boolean(), + audio: z.boolean(), + image: z.boolean(), + video: z.boolean(), + pdf: z.boolean(), + }), + }), + cost: z.object({ + input: z.number(), + output: z.number(), + cache: z.object({ + read: z.number(), + write: z.number(), + }), + experimentalOver200K: z + .object({ + input: z.number(), + output: z.number(), + cache: z.object({ + read: z.number(), + write: z.number(), + }), + }) + .optional(), + }), + limit: z.object({ + context: z.number(), + output: z.number(), + }), + status: z.enum(["alpha", "beta", "deprecated", "active"]), + options: z.record(z.string(), z.any()), + headers: z.record(z.string(), z.string()), + }) + .meta({ + ref: "Model", + }) + export type Model = z.infer + + export const Info = z + .object({ + id: z.string(), + name: z.string(), + source: z.enum(["env", "config", "custom", "api"]), + env: z.string().array(), + key: z.string().optional(), + options: z.record(z.string(), z.any()), + models: z.record(z.string(), Model), + }) + .meta({ + ref: "Provider", + }) + export type Info = z.infer + + function fromModelsDevModel(provider: ModelsDev.Provider, model: ModelsDev.Model): Model { + return { + id: model.id, + providerID: provider.id, + name: model.name, + api: { + id: model.id, + url: provider.api!, + npm: model.provider?.npm ?? provider.npm ?? provider.id, + }, + status: model.status ?? "active", + headers: model.headers ?? {}, + options: model.options ?? {}, + cost: { + input: model.cost?.input ?? 0, + output: model.cost?.output ?? 0, + cache: { + read: model.cost?.cache_read ?? 0, + write: model.cost?.cache_write ?? 0, + }, + experimentalOver200K: model.cost?.context_over_200k + ? { + cache: { + read: model.cost.context_over_200k.cache_read ?? 0, + write: model.cost.context_over_200k.cache_write ?? 0, + }, + input: model.cost.context_over_200k.input, + output: model.cost.context_over_200k.output, + } + : undefined, + }, + limit: { + context: model.limit.context, + output: model.limit.output, + }, + capabilities: { + temperature: model.temperature, + reasoning: model.reasoning, + attachment: model.attachment, + toolcall: model.tool_call, + input: { + text: model.modalities?.input?.includes("text") ?? false, + audio: model.modalities?.input?.includes("audio") ?? false, + image: model.modalities?.input?.includes("image") ?? false, + video: model.modalities?.input?.includes("video") ?? false, + pdf: model.modalities?.input?.includes("pdf") ?? false, + }, + output: { + text: model.modalities?.output?.includes("text") ?? false, + audio: model.modalities?.output?.includes("audio") ?? false, + image: model.modalities?.output?.includes("image") ?? false, + video: model.modalities?.output?.includes("video") ?? false, + pdf: model.modalities?.output?.includes("pdf") ?? false, + }, + }, + } + } + + export function fromModelsDevProvider(provider: ModelsDev.Provider): Info { + return { + id: provider.id, + source: "custom", + name: provider.name, + env: provider.env ?? [], + options: {}, + models: mapValues(provider.models, (model) => fromModelsDevModel(provider, model)), + } + } + const state = Instance.state(async () => { using _ = log.time("state") const config = await Config.get() - const database = await ModelsDev.get() + const database = mapValues(await ModelsDev.get(), fromModelsDevProvider) const disabled = new Set(config.disabled_providers ?? []) const enabled = config.enabled_providers ? new Set(config.enabled_providers) : null @@ -306,54 +457,15 @@ export namespace Provider { return true } - const providers: { - [providerID: string]: { - source: Source - info: ModelsDev.Provider - getModel?: (sdk: any, modelID: string, options?: Record) => Promise - options: Record - } + const providers: { [providerID: string]: Info } = {} + const languages = new Map() + const modelLoaders: { + [providerID: string]: CustomModelLoader } = {} - const models = new Map< - string, - { - providerID: string - modelID: string - info: ModelsDev.Model - language: LanguageModel - npm?: string - } - >() const sdk = new Map() - // Maps `${provider}/${key}` to the provider’s actual model ID for custom aliases. - const realIdByKey = new Map() log.info("init") - function mergeProvider( - id: string, - options: Record, - source: Source, - getModel?: (sdk: any, modelID: string, options?: Record) => Promise, - ) { - const provider = providers[id] - if (!provider) { - const info = database[id] - if (!info) return - if (info.api && !options["baseURL"]) options["baseURL"] = info.api - providers[id] = { - source, - info, - options, - getModel, - } - return - } - provider.options = mergeDeep(provider.options, options) - provider.source = source - provider.getModel = getModel ?? provider.getModel - } - const configProviders = Object.entries(config.provider ?? {}) // Add GitHub Copilot Enterprise provider that inherits from GitHub Copilot @@ -363,19 +475,31 @@ export namespace Provider { ...githubCopilot, id: "github-copilot-enterprise", name: "GitHub Copilot Enterprise", - // Enterprise uses a different API endpoint - will be set dynamically based on auth - api: undefined, } } + function mergeProvider(providerID: string, provider: Partial) { + const existing = providers[providerID] + if (existing) { + // @ts-expect-error + providers[providerID] = mergeDeep(existing, provider) + return + } + const match = database[providerID] + if (!match) return + // @ts-expect-error + providers[providerID] = mergeDeep(match, provider) + } + + // extend database from config for (const [providerID, provider] of configProviders) { const existing = database[providerID] - const parsed: ModelsDev.Provider = { + const parsed: Info = { id: providerID, - npm: provider.npm ?? existing?.npm, name: provider.name ?? existing?.name ?? providerID, env: provider.env ?? existing?.env ?? [], - api: provider.api ?? existing?.api, + options: mergeDeep(existing?.options ?? {}, provider.options ?? {}), + source: "config", models: existing?.models ?? {}, } @@ -386,72 +510,76 @@ export namespace Provider { if (model.id && model.id !== modelID) return modelID return existing?.name ?? modelID }) - const parsedModel: ModelsDev.Model = { + const parsedModel: Model = { id: modelID, - name, - release_date: model.release_date ?? existing?.release_date, - attachment: model.attachment ?? existing?.attachment ?? false, - reasoning: model.reasoning ?? existing?.reasoning ?? false, - temperature: model.temperature ?? existing?.temperature ?? false, - tool_call: model.tool_call ?? existing?.tool_call ?? true, - cost: - !model.cost && !existing?.cost - ? { - input: 0, - output: 0, - cache_read: 0, - cache_write: 0, - } - : { - cache_read: 0, - cache_write: 0, - ...existing?.cost, - ...model.cost, - }, - options: { - ...existing?.options, - ...model.options, + api: { + id: model.id ?? existing?.api.id ?? modelID, + npm: model.provider?.npm ?? provider.npm ?? existing?.api.npm ?? providerID, + url: provider?.api ?? existing?.api.url, }, - limit: model.limit ?? - existing?.limit ?? { - context: 0, - output: 0, + status: model.status ?? existing?.status ?? "active", + name, + providerID, + capabilities: { + temperature: model.temperature ?? existing?.capabilities.temperature ?? false, + reasoning: model.reasoning ?? existing?.capabilities.reasoning ?? false, + attachment: model.attachment ?? existing?.capabilities.attachment ?? false, + toolcall: model.tool_call ?? existing?.capabilities.toolcall ?? true, + input: { + text: model.modalities?.input?.includes("text") ?? existing?.capabilities.input.text ?? true, + audio: model.modalities?.input?.includes("audio") ?? existing?.capabilities.input.audio ?? false, + image: model.modalities?.input?.includes("image") ?? existing?.capabilities.input.image ?? false, + video: model.modalities?.input?.includes("video") ?? existing?.capabilities.input.video ?? false, + pdf: model.modalities?.input?.includes("pdf") ?? existing?.capabilities.input.pdf ?? false, }, - modalities: model.modalities ?? - existing?.modalities ?? { - input: ["text"], - output: ["text"], + output: { + text: model.modalities?.output?.includes("text") ?? existing?.capabilities.output.text ?? true, + audio: model.modalities?.output?.includes("audio") ?? existing?.capabilities.output.audio ?? false, + image: model.modalities?.output?.includes("image") ?? existing?.capabilities.output.image ?? false, + video: model.modalities?.output?.includes("video") ?? existing?.capabilities.output.video ?? false, + pdf: model.modalities?.output?.includes("pdf") ?? existing?.capabilities.output.pdf ?? false, }, - headers: model.headers, - provider: model.provider ?? existing?.provider, - } - if (model.id && model.id !== modelID) { - realIdByKey.set(`${providerID}/${modelID}`, model.id) + }, + cost: { + input: model?.cost?.input ?? existing?.cost?.input ?? 0, + output: model?.cost?.output ?? existing?.cost?.output ?? 0, + cache: { + read: model?.cost?.cache_read ?? existing?.cost?.cache.read ?? 0, + write: model?.cost?.cache_write ?? existing?.cost?.cache.write ?? 0, + }, + }, + options: mergeDeep(existing?.options ?? {}, model.options ?? {}), + limit: { + context: model.limit?.context ?? existing?.limit?.context ?? 0, + output: model.limit?.output ?? existing?.limit?.output ?? 0, + }, + headers: mergeDeep(existing?.headers ?? {}, model.headers ?? {}), } parsed.models[modelID] = parsedModel } - database[providerID] = parsed } // load env + const env = Env.all() for (const [providerID, provider] of Object.entries(database)) { if (disabled.has(providerID)) continue - const apiKey = provider.env.map((item) => process.env[item]).at(0) + const apiKey = provider.env.map((item) => env[item]).find(Boolean) if (!apiKey) continue - mergeProvider( - providerID, - // only include apiKey if there's only one potential option - provider.env.length === 1 ? { apiKey } : {}, - "env", - ) + mergeProvider(providerID, { + source: "env", + key: provider.env.length === 1 ? apiKey : undefined, + }) } // load apikeys for (const [providerID, provider] of Object.entries(await Auth.all())) { if (disabled.has(providerID)) continue if (provider.type === "api") { - mergeProvider(providerID, { apiKey: provider.key }, "api") + mergeProvider(providerID, { + source: "api", + key: provider.key, + }) } } @@ -477,7 +605,10 @@ export namespace Provider { // Load for the main provider if auth exists if (auth) { const options = await plugin.auth.loader(() => Auth.get(providerID) as any, database[plugin.auth.provider]) - mergeProvider(plugin.auth.provider, options ?? {}, "custom") + mergeProvider(plugin.auth.provider, { + source: "custom", + options: options, + }) } // If this is github-copilot plugin, also register for github-copilot-enterprise if auth exists @@ -490,7 +621,10 @@ export namespace Provider { () => Auth.get(enterpriseProviderID) as any, database[enterpriseProviderID], ) - mergeProvider(enterpriseProviderID, enterpriseOptions ?? {}, "custom") + mergeProvider(enterpriseProviderID, { + source: "custom", + options: enterpriseOptions, + }) } } } @@ -500,13 +634,21 @@ export namespace Provider { if (disabled.has(providerID)) continue const result = await fn(database[providerID]) if (result && (result.autoload || providers[providerID])) { - mergeProvider(providerID, result.options ?? {}, "custom", result.getModel) + if (result.getModel) modelLoaders[providerID] = result.getModel + mergeProvider(providerID, { + source: "custom", + options: result.options, + }) } } // load config for (const [providerID, provider] of configProviders) { - mergeProvider(providerID, provider.options ?? {}, "config") + const partial: Partial = { source: "config" } + if (provider.env) partial.env = provider.env + if (provider.name) partial.name = provider.name + if (provider.options) partial.options = provider.options + mergeProvider(providerID, partial) } for (const [providerID, provider] of Object.entries(providers)) { @@ -516,49 +658,43 @@ export namespace Provider { } if (providerID === "github-copilot" || providerID === "github-copilot-enterprise") { - provider.info.npm = "@ai-sdk/github-copilot" + provider.models = mapValues(provider.models, (model) => ({ + ...model, + api: { + ...model.api, + npm: "@ai-sdk/github-copilot", + }, + })) } const configProvider = config.provider?.[providerID] - const filteredModels = Object.fromEntries( - Object.entries(provider.info.models) - // Filter out blacklisted models - .filter( - ([modelID]) => - modelID !== "gpt-5-chat-latest" && !(providerID === "openrouter" && modelID === "openai/gpt-5-chat"), - ) - // Filter out experimental models - .filter( - ([, model]) => - ((!model.experimental && model.status !== "alpha") || Flag.OPENCODE_ENABLE_EXPERIMENTAL_MODELS) && - model.status !== "deprecated", - ) - // Filter by provider's whitelist/blacklist from config - .filter(([modelID]) => { - if (!configProvider) return true - - return ( - (!configProvider.blacklist || !configProvider.blacklist.includes(modelID)) && - (!configProvider.whitelist || configProvider.whitelist.includes(modelID)) - ) - }), - ) - provider.info.models = filteredModels + for (const [modelID, model] of Object.entries(provider.models)) { + model.api.id = model.api.id ?? model.id ?? modelID + if (modelID === "gpt-5-chat-latest" || (providerID === "openrouter" && modelID === "openai/gpt-5-chat")) + delete provider.models[modelID] + if ((model.status === "alpha" && !Flag.OPENCODE_ENABLE_EXPERIMENTAL_MODELS) || model.status === "deprecated") + delete provider.models[modelID] + if ( + (configProvider?.blacklist && configProvider.blacklist.includes(modelID)) || + (configProvider?.whitelist && !configProvider.whitelist.includes(modelID)) + ) + delete provider.models[modelID] + } - if (Object.keys(provider.info.models).length === 0) { + if (Object.keys(provider.models).length === 0) { delete providers[providerID] continue } - log.info("found", { providerID, npm: provider.info.npm }) + log.info("found", { providerID }) } return { - models, + models: languages, providers, sdk, - realIdByKey, + modelLoaders, } }) @@ -566,19 +702,28 @@ export namespace Provider { return state().then((state) => state.providers) } - async function getSDK(provider: ModelsDev.Provider, model: ModelsDev.Model) { - return (async () => { + async function getSDK(model: Model) { + try { using _ = log.time("getSDK", { - providerID: provider.id, + providerID: model.providerID, }) const s = await state() - const pkg = model.provider?.npm ?? provider.npm ?? provider.id - const options = { ...s.providers[provider.id]?.options } - if (pkg.includes("@ai-sdk/openai-compatible") && options["includeUsage"] === undefined) { + const provider = s.providers[model.providerID] + const options = { ...provider.options } + + if (model.api.npm.includes("@ai-sdk/openai-compatible") && options["includeUsage"] !== false) { options["includeUsage"] = true } - const key = Bun.hash.xxHash32(JSON.stringify({ pkg, options })) + if (!options["baseURL"]) options["baseURL"] = model.api.url + if (!options["apiKey"]) options["apiKey"] = provider.key + if (model.headers) + options["headers"] = { + ...options["headers"], + ...model.headers, + } + + const key = Bun.hash.xxHash32(JSON.stringify({ npm: model.api.npm, options })) const existing = s.sdk.get(key) if (existing) return existing @@ -607,12 +752,13 @@ export namespace Provider { } // Special case: google-vertex-anthropic uses a subpath import - const bundledKey = provider.id === "google-vertex-anthropic" ? "@ai-sdk/google-vertex/anthropic" : pkg + const bundledKey = + model.providerID === "google-vertex-anthropic" ? "@ai-sdk/google-vertex/anthropic" : model.api.npm const bundledFn = BUNDLED_PROVIDERS[bundledKey] if (bundledFn) { - log.info("using bundled provider", { providerID: provider.id, pkg: bundledKey }) + log.info("using bundled provider", { providerID: model.providerID, pkg: bundledKey }) const loaded = bundledFn({ - name: provider.id, + name: model.providerID, ...options, }) s.sdk.set(key, loaded) @@ -620,25 +766,25 @@ export namespace Provider { } let installedPath: string - if (!pkg.startsWith("file://")) { - installedPath = await BunProc.install(pkg, "latest") + if (!model.api.npm.startsWith("file://")) { + installedPath = await BunProc.install(model.api.npm, "latest") } else { - log.info("loading local provider", { pkg }) - installedPath = pkg + log.info("loading local provider", { pkg: model.api.npm }) + installedPath = model.api.npm } const mod = await import(installedPath) const fn = mod[Object.keys(mod).find((key) => key.startsWith("create"))!] const loaded = fn({ - name: provider.id, + name: model.providerID, ...options, }) s.sdk.set(key, loaded) return loaded as SDK - })().catch((e) => { - throw new InitError({ providerID: provider.id }, { cause: e }) - }) + } catch (e) { + throw new InitError({ providerID: model.providerID }, { cause: e }) + } } export async function getProvider(providerID: string) { @@ -646,15 +792,7 @@ export namespace Provider { } export async function getModel(providerID: string, modelID: string) { - const key = `${providerID}/${modelID}` const s = await state() - if (s.models.has(key)) return s.models.get(key)! - - log.info("getModel", { - providerID, - modelID, - }) - const provider = s.providers[providerID] if (!provider) { const availableProviders = Object.keys(s.providers) @@ -663,43 +801,36 @@ export namespace Provider { throw new ModelNotFoundError({ providerID, modelID, suggestions }) } - const info = provider.info.models[modelID] + const info = provider.models[modelID] if (!info) { - const availableModels = Object.keys(provider.info.models) + const availableModels = Object.keys(provider.models) const matches = fuzzysort.go(modelID, availableModels, { limit: 3, threshold: -10000 }) const suggestions = matches.map((m) => m.target) throw new ModelNotFoundError({ providerID, modelID, suggestions }) } + return info + } + + export async function getLanguage(model: Model) { + const s = await state() + const key = `${model.providerID}/${model.id}` + if (s.models.has(key)) return s.models.get(key)! - const sdk = await getSDK(provider.info, info) + const provider = s.providers[model.providerID] + const sdk = await getSDK(model) try { - const keyReal = `${providerID}/${modelID}` - const realID = s.realIdByKey.get(keyReal) ?? info.id - const language = provider.getModel - ? await provider.getModel(sdk, realID, provider.options) - : sdk.languageModel(realID) - log.info("found", { providerID, modelID }) - s.models.set(key, { - providerID, - modelID, - info, - language, - npm: info.provider?.npm ?? provider.info.npm, - }) - return { - modelID, - providerID, - info, - language, - npm: info.provider?.npm ?? provider.info.npm, - } + const language = s.modelLoaders[model.providerID] + ? await s.modelLoaders[model.providerID](sdk, model.api.id, provider.options) + : sdk.languageModel(model.api.id) + s.models.set(key, language) + return language } catch (e) { if (e instanceof NoSuchModelError) throw new ModelNotFoundError( { - modelID: modelID, - providerID, + modelID: model.id, + providerID: model.providerID, }, { cause: e }, ) @@ -712,7 +843,7 @@ export namespace Provider { const provider = s.providers[providerID] if (!provider) return undefined for (const item of query) { - for (const modelID of Object.keys(provider.info.models)) { + for (const modelID of Object.keys(provider.models)) { if (modelID.includes(item)) return { providerID, @@ -748,7 +879,7 @@ export namespace Provider { priority = ["gpt-5-nano"] } for (const item of priority) { - for (const model of Object.keys(provider.info.models)) { + for (const model of Object.keys(provider.models)) { if (model.includes(item)) return getModel(providerID, model) } } @@ -756,7 +887,7 @@ export namespace Provider { // Check if opencode provider is available before using it const opencodeProvider = await state().then((state) => state.providers["opencode"]) - if (opencodeProvider && opencodeProvider.info.models["gpt-5-nano"]) { + if (opencodeProvider && opencodeProvider.models["gpt-5-nano"]) { return getModel("opencode", "gpt-5-nano") } @@ -764,7 +895,7 @@ export namespace Provider { } const priority = ["gpt-5", "claude-sonnet-4", "big-pickle", "gemini-3-pro"] - export function sort(models: ModelsDev.Model[]) { + export function sort(models: Model[]) { return sortBy( models, [(model) => priority.findIndex((filter) => model.id.includes(filter)), "desc"], @@ -779,12 +910,12 @@ export namespace Provider { const provider = await list() .then((val) => Object.values(val)) - .then((x) => x.find((p) => !cfg.provider || Object.keys(cfg.provider).includes(p.info.id))) + .then((x) => x.find((p) => !cfg.provider || Object.keys(cfg.provider).includes(p.id))) if (!provider) throw new Error("no providers found") - const [model] = sort(Object.values(provider.info.models)) + const [model] = sort(Object.values(provider.models)) if (!model) throw new Error("no models found") return { - providerID: provider.info.id, + providerID: provider.id, modelID: model.id, } } diff --git a/packages/opencode/src/provider/transform.ts b/packages/opencode/src/provider/transform.ts index abe269d5d02..09dfd69a317 100644 --- a/packages/opencode/src/provider/transform.ts +++ b/packages/opencode/src/provider/transform.ts @@ -1,10 +1,11 @@ import type { APICallError, ModelMessage } from "ai" import { unique } from "remeda" import type { JSONSchema } from "zod/v4/core" +import type { Provider } from "./provider" export namespace ProviderTransform { - function normalizeMessages(msgs: ModelMessage[], providerID: string, modelID: string): ModelMessage[] { - if (modelID.includes("claude")) { + function normalizeMessages(msgs: ModelMessage[], model: Provider.Model): ModelMessage[] { + if (model.api.id.includes("claude")) { return msgs.map((msg) => { if ((msg.role === "assistant" || msg.role === "tool") && Array.isArray(msg.content)) { msg.content = msg.content.map((part) => { @@ -20,7 +21,7 @@ export namespace ProviderTransform { return msg }) } - if (providerID === "mistral" || modelID.toLowerCase().includes("mistral")) { + if (model.providerID === "mistral" || model.api.id.toLowerCase().includes("mistral")) { const result: ModelMessage[] = [] for (let i = 0; i < msgs.length; i++) { const msg = msgs[i] @@ -62,6 +63,46 @@ export namespace ProviderTransform { return result } + // DeepSeek: Handle reasoning_content for tool call continuations + // - With tool calls: Include reasoning_content in providerOptions so model can continue reasoning + // - Without tool calls: Strip reasoning (new turn doesn't need previous reasoning) + // See: https://api-docs.deepseek.com/guides/thinking_mode + if (model.providerID === "deepseek" || model.api.id.toLowerCase().includes("deepseek")) { + return msgs.map((msg) => { + if (msg.role === "assistant" && Array.isArray(msg.content)) { + const reasoningParts = msg.content.filter((part: any) => part.type === "reasoning") + const hasToolCalls = msg.content.some((part: any) => part.type === "tool-call") + const reasoningText = reasoningParts.map((part: any) => part.text).join("") + + // Filter out reasoning parts from content + const filteredContent = msg.content.filter((part: any) => part.type !== "reasoning") + + // If this message has tool calls and reasoning, include reasoning_content + // so DeepSeek can continue reasoning after tool execution + if (hasToolCalls && reasoningText) { + return { + ...msg, + content: filteredContent, + providerOptions: { + ...msg.providerOptions, + openaiCompatible: { + ...(msg.providerOptions as any)?.openaiCompatible, + reasoning_content: reasoningText, + }, + }, + } + } + + // For final answers (no tool calls), just strip reasoning + return { + ...msg, + content: filteredContent, + } + } + return msg + }) + } + return msgs } @@ -107,67 +148,68 @@ export namespace ProviderTransform { return msgs } - export function message(msgs: ModelMessage[], providerID: string, modelID: string) { - msgs = normalizeMessages(msgs, providerID, modelID) - if (providerID === "anthropic" || modelID.includes("anthropic") || modelID.includes("claude")) { - msgs = applyCaching(msgs, providerID) + export function message(msgs: ModelMessage[], model: Provider.Model) { + msgs = normalizeMessages(msgs, model) + if (model.providerID === "anthropic" || model.api.id.includes("anthropic") || model.api.id.includes("claude")) { + msgs = applyCaching(msgs, model.providerID) } return msgs } - export function temperature(_providerID: string, modelID: string) { - if (modelID.toLowerCase().includes("qwen")) return 0.55 - if (modelID.toLowerCase().includes("claude")) return undefined - if (modelID.toLowerCase().includes("gemini-3-pro")) return 1.0 + export function temperature(model: Provider.Model) { + if (model.api.id.toLowerCase().includes("qwen")) return 0.55 + if (model.api.id.toLowerCase().includes("claude")) return undefined + if (model.api.id.toLowerCase().includes("gemini-3-pro")) return 1.0 return 0 } - export function topP(_providerID: string, modelID: string) { - if (modelID.toLowerCase().includes("qwen")) return 1 + export function topP(model: Provider.Model) { + if (model.api.id.toLowerCase().includes("qwen")) return 1 return undefined } export function options( - providerID: string, - modelID: string, - npm: string, + model: Provider.Model, sessionID: string, providerOptions?: Record, ): Record { const result: Record = {} // switch to providerID later, for now use this - if (npm === "@openrouter/ai-sdk-provider") { + if (model.api.npm === "@openrouter/ai-sdk-provider") { result["usage"] = { include: true, } } - if (providerID === "openai" || providerOptions?.setCacheKey) { + if (model.providerID === "openai" || providerOptions?.setCacheKey) { result["promptCacheKey"] = sessionID } - if (providerID === "google" || (providerID.startsWith("opencode") && modelID.includes("gemini-3"))) { + if ( + model.providerID === "google" || + (model.providerID.startsWith("opencode") && model.api.id.includes("gemini-3")) + ) { result["thinkingConfig"] = { includeThoughts: true, } } - if (modelID.includes("gpt-5") && !modelID.includes("gpt-5-chat")) { - if (modelID.includes("codex")) { + if (model.api.id.includes("gpt-5") && !model.api.id.includes("gpt-5-chat")) { + if (model.providerID.includes("codex")) { result["store"] = false } - if (!modelID.includes("codex") && !modelID.includes("gpt-5-pro")) { + if (!model.api.id.includes("codex") && !model.api.id.includes("gpt-5-pro")) { result["reasoningEffort"] = "medium" } - if (modelID.endsWith("gpt-5.1") && providerID !== "azure") { + if (model.api.id.endsWith("gpt-5.1") && model.providerID !== "azure") { result["textVerbosity"] = "low" } - if (providerID.startsWith("opencode")) { + if (model.providerID.startsWith("opencode")) { result["promptCacheKey"] = sessionID result["include"] = ["reasoning.encrypted_content"] result["reasoningSummary"] = "auto" @@ -176,17 +218,17 @@ export namespace ProviderTransform { return result } - export function smallOptions(input: { providerID: string; modelID: string }) { + export function smallOptions(model: Provider.Model) { const options: Record = {} - if (input.providerID === "openai" || input.modelID.includes("gpt-5")) { - if (input.modelID.includes("5.1")) { + if (model.providerID === "openai" || model.api.id.includes("gpt-5")) { + if (model.api.id.includes("5.1")) { options["reasoningEffort"] = "low" } else { options["reasoningEffort"] = "minimal" } } - if (input.providerID === "google") { + if (model.providerID === "google") { options["thinkingConfig"] = { thinkingBudget: 0, } @@ -254,7 +296,7 @@ export namespace ProviderTransform { return standardLimit } - export function schema(providerID: string, modelID: string, schema: JSONSchema.BaseSchema) { + export function schema(model: Provider.Model, schema: JSONSchema.BaseSchema) { /* if (["openai", "azure"].includes(providerID)) { if (schema.type === "object" && schema.properties) { @@ -274,7 +316,7 @@ export namespace ProviderTransform { */ // Convert integer enums to string enums for Google/Gemini - if (providerID === "google" || modelID.includes("gemini")) { + if (model.providerID === "google" || model.api.id.includes("gemini")) { const sanitizeGemini = (obj: any): any => { if (obj === null || typeof obj !== "object") { return obj diff --git a/packages/opencode/src/server/server.ts b/packages/opencode/src/server/server.ts index fe4ad195aab..31d0822762b 100644 --- a/packages/opencode/src/server/server.ts +++ b/packages/opencode/src/server/server.ts @@ -8,7 +8,7 @@ import { proxy } from "hono/proxy" import { Session } from "../session" import z from "zod" import { Provider } from "../provider/provider" -import { mapValues } from "remeda" +import { mapValues, pipe } from "remeda" import { NamedError } from "@opencode-ai/util/error" import { ModelsDev } from "../provider/models" import { Ripgrep } from "../file/ripgrep" @@ -296,8 +296,8 @@ export namespace Server { }), ), async (c) => { - const { provider, model } = c.req.valid("query") - const tools = await ToolRegistry.tools(provider, model) + const { provider } = c.req.valid("query") + const tools = await ToolRegistry.tools(provider) return c.json( tools.map((t) => ({ id: t.id, @@ -1025,7 +1025,7 @@ export namespace Server { async (c) => { c.status(204) c.header("Content-Type", "application/json") - return stream(c, async (stream) => { + return stream(c, async () => { const sessionID = c.req.valid("param").id const body = c.req.valid("json") SessionPrompt.prompt({ ...body, sessionID }) @@ -1231,7 +1231,7 @@ export namespace Server { "application/json": { schema: resolver( z.object({ - providers: ModelsDev.Provider.array(), + providers: Provider.Info.array(), default: z.record(z.string(), z.string()), }), ), @@ -1242,7 +1242,7 @@ export namespace Server { }), async (c) => { using _ = log.time("providers") - const providers = await Provider.list().then((x) => mapValues(x, (item) => item.info)) + const providers = await Provider.list().then((x) => mapValues(x, (item) => item)) return c.json({ providers: Object.values(providers), default: mapValues(providers, (item) => Provider.sort(Object.values(item.models))[0].id), @@ -1272,7 +1272,10 @@ export namespace Server { }, }), async (c) => { - const providers = await ModelsDev.get() + const providers = pipe( + await ModelsDev.get(), + mapValues((x) => Provider.fromModelsDevProvider(x)), + ) const connected = await Provider.list().then((x) => Object.keys(x)) return c.json({ all: Object.values(providers), diff --git a/packages/opencode/src/session/compaction.ts b/packages/opencode/src/session/compaction.ts index 4dd2e8f8428..36add43130a 100644 --- a/packages/opencode/src/session/compaction.ts +++ b/packages/opencode/src/session/compaction.ts @@ -1,4 +1,4 @@ -import { streamText, wrapLanguageModel, type ModelMessage } from "ai" +import { wrapLanguageModel, type ModelMessage } from "ai" import { Session } from "." import { Identifier } from "../id/id" import { Instance } from "../project/instance" @@ -7,7 +7,6 @@ import { MessageV2 } from "./message-v2" import { SystemPrompt } from "./system" import { Bus } from "../bus" import z from "zod" -import type { ModelsDev } from "../provider/models" import { SessionPrompt } from "./prompt" import { Flag } from "../flag/flag" import { Token } from "../util/token" @@ -29,7 +28,7 @@ export namespace SessionCompaction { ), } - export function isOverflow(input: { tokens: MessageV2.Assistant["tokens"]; model: ModelsDev.Model }) { + export function isOverflow(input: { tokens: MessageV2.Assistant["tokens"]; model: Provider.Model }) { if (Flag.OPENCODE_DISABLE_AUTOCOMPACT) return false const context = input.model.limit.context if (context === 0) return false @@ -98,6 +97,7 @@ export namespace SessionCompaction { auto: boolean }) { const model = await Provider.getModel(input.model.providerID, input.model.modelID) + const language = await Provider.getLanguage(model) const system = [...SystemPrompt.compaction(model.providerID)] const lastFinished = input.messages.find((m) => m.info.role === "assistant" && m.info.finish)?.info as | MessageV2.Assistant @@ -133,79 +133,72 @@ export namespace SessionCompaction { const processor = SessionProcessor.create({ assistantMessage: msg, sessionID: input.sessionID, - providerID: input.model.providerID, - model: model.info, + model: model, abort: input.abort, }) - const result = await processor.process(() => - streamText({ - onError(error) { - log.error("stream error", { - error, - }) - }, - // set to 0, we handle loop - maxRetries: 0, - providerOptions: ProviderTransform.providerOptions( - model.npm, - model.providerID, - pipe( - {}, - mergeDeep(ProviderTransform.options(model.providerID, model.modelID, model.npm ?? "", input.sessionID)), - mergeDeep(model.info.options), - ), + const result = await processor.process({ + onError(error) { + log.error("stream error", { + error, + }) + }, + // set to 0, we handle loop + maxRetries: 0, + providerOptions: ProviderTransform.providerOptions( + model.api.npm, + model.providerID, + pipe({}, mergeDeep(ProviderTransform.options(model, input.sessionID)), mergeDeep(model.options)), + ), + headers: model.headers, + abortSignal: input.abort, + tools: model.capabilities.toolcall ? {} : undefined, + messages: [ + ...system.map( + (x): ModelMessage => ({ + role: "system", + content: x, + }), ), - headers: model.info.headers, - abortSignal: input.abort, - tools: model.info.tool_call ? {} : undefined, - messages: [ - ...system.map( - (x): ModelMessage => ({ - role: "system", - content: x, - }), - ), - ...MessageV2.toModelMessage( - input.messages.filter((m) => { - if (m.info.role !== "assistant" || m.info.error === undefined) { - return true - } - if ( - MessageV2.AbortedError.isInstance(m.info.error) && - m.parts.some((part) => part.type !== "step-start" && part.type !== "reasoning") - ) { - return true - } + ...MessageV2.toModelMessage( + input.messages.filter((m) => { + if (m.info.role !== "assistant" || m.info.error === undefined) { + return true + } + if ( + MessageV2.AbortedError.isInstance(m.info.error) && + m.parts.some((part) => part.type !== "step-start" && part.type !== "reasoning") + ) { + return true + } - return false - }), - ), - { - role: "user", - content: [ - { - type: "text", - text: "Summarize our conversation above. This summary will be the only context available when the conversation continues, so preserve critical information including: what was accomplished, current work in progress, files involved, next steps, and any key user requests or constraints. Be concise but detailed enough that work can continue seamlessly.", - }, - ], - }, - ], - model: wrapLanguageModel({ - model: model.language, - middleware: [ + return false + }), + ), + { + role: "user", + content: [ { - async transformParams(args) { - if (args.type === "stream") { - // @ts-expect-error - args.params.prompt = ProviderTransform.message(args.params.prompt, model.providerID, model.modelID) - } - return args.params - }, + type: "text", + text: "Summarize our conversation above. This summary will be the only context available when the conversation continues, so preserve critical information including: what was accomplished, current work in progress, files involved, next steps, and any key user requests or constraints. Be concise but detailed enough that work can continue seamlessly.", }, ], - }), + }, + ], + model: wrapLanguageModel({ + model: language, + middleware: [ + { + async transformParams(args) { + if (args.type === "stream") { + // @ts-expect-error + args.params.prompt = ProviderTransform.message(args.params.prompt, model) + } + return args.params + }, + }, + ], }), - ) + }) if (result === "continue" && input.auto) { const continueMsg = await Session.updateMessage({ id: Identifier.ascending("message"), diff --git a/packages/opencode/src/session/index.ts b/packages/opencode/src/session/index.ts index f09818caa2e..6a148e973ab 100644 --- a/packages/opencode/src/session/index.ts +++ b/packages/opencode/src/session/index.ts @@ -6,8 +6,7 @@ import { Config } from "../config/config" import { Flag } from "../flag/flag" import { Identifier } from "../id/id" import { Installation } from "../installation" -import type { ModelsDev } from "../provider/models" -import { Share } from "../share/share" + import { Storage } from "../storage/storage" import { Log } from "../util/log" import { MessageV2 } from "./message-v2" @@ -16,7 +15,8 @@ import { SessionPrompt } from "./prompt" import { fn } from "@/util/fn" import { Command } from "../command" import { Snapshot } from "@/snapshot" -import { ShareNext } from "@/share/share-next" + +import type { Provider } from "@/provider/provider" export namespace Session { const log = Log.create({ service: "session" }) @@ -223,6 +223,7 @@ export namespace Session { } if (cfg.enterprise?.url) { + const { ShareNext } = await import("@/share/share-next") const share = await ShareNext.create(id) await update(id, (draft) => { draft.share = { @@ -233,6 +234,7 @@ export namespace Session { const session = await get(id) if (session.share) return session.share + const { Share } = await import("../share/share") const share = await Share.create(id) await update(id, (draft) => { draft.share = { @@ -253,6 +255,7 @@ export namespace Session { export const unshare = fn(Identifier.schema("session"), async (id) => { const cfg = await Config.get() if (cfg.enterprise?.url) { + const { ShareNext } = await import("@/share/share-next") await ShareNext.remove(id) await update(id, (draft) => { draft.share = undefined @@ -264,6 +267,7 @@ export namespace Session { await update(id, (draft) => { draft.share = undefined }) + const { Share } = await import("../share/share") await Share.remove(id, share.secret) }) @@ -389,7 +393,7 @@ export namespace Session { export const getUsage = fn( z.object({ - model: z.custom(), + model: z.custom(), usage: z.custom(), metadata: z.custom().optional(), }), @@ -420,16 +424,16 @@ export namespace Session { } const costInfo = - input.model.cost?.context_over_200k && tokens.input + tokens.cache.read > 200_000 - ? input.model.cost.context_over_200k + input.model.cost?.experimentalOver200K && tokens.input + tokens.cache.read > 200_000 + ? input.model.cost.experimentalOver200K : input.model.cost return { cost: safe( new Decimal(0) .add(new Decimal(tokens.input).mul(costInfo?.input ?? 0).div(1_000_000)) .add(new Decimal(tokens.output).mul(costInfo?.output ?? 0).div(1_000_000)) - .add(new Decimal(tokens.cache.read).mul(costInfo?.cache_read ?? 0).div(1_000_000)) - .add(new Decimal(tokens.cache.write).mul(costInfo?.cache_write ?? 0).div(1_000_000)) + .add(new Decimal(tokens.cache.read).mul(costInfo?.cache.read ?? 0).div(1_000_000)) + .add(new Decimal(tokens.cache.write).mul(costInfo?.cache.write ?? 0).div(1_000_000)) // TODO: update models.dev to have better pricing model, for now: // charge reasoning tokens at the same rate as output tokens .add(new Decimal(tokens.reasoning).mul(costInfo?.output ?? 0).div(1_000_000)) diff --git a/packages/opencode/src/session/processor.ts b/packages/opencode/src/session/processor.ts index 92ac1e62fc9..80f66bf6cdd 100644 --- a/packages/opencode/src/session/processor.ts +++ b/packages/opencode/src/session/processor.ts @@ -1,6 +1,5 @@ -import type { ModelsDev } from "@/provider/models" import { MessageV2 } from "./message-v2" -import { type StreamTextResult, type Tool as AITool, APICallError } from "ai" +import { streamText } from "ai" import { Log } from "@/util/log" import { Identifier } from "@/id/id" import { Session } from "." @@ -12,6 +11,7 @@ import { Bus } from "@/bus" import { SessionRetry } from "./retry" import { SessionStatus } from "./status" import { Token } from "@/util/token" +import type { Provider } from "@/provider/provider" export namespace SessionProcessor { const DOOM_LOOP_THRESHOLD = 3 @@ -20,11 +20,19 @@ export namespace SessionProcessor { export type Info = Awaited> export type Result = Awaited> + export type StreamInput = Parameters[0] + + export type TBD = { + model: { + modelID: string + providerID: string + } + } + export function create(input: { assistantMessage: MessageV2.Assistant sessionID: string - providerID: string - model: ModelsDev.Model + model: Provider.Model abort: AbortSignal }) { const toolcalls: Record = {} @@ -39,7 +47,7 @@ export namespace SessionProcessor { partFromToolCall(toolCallID: string) { return toolcalls[toolCallID] }, - async process(fn: () => StreamTextResult, never>) { + async process(streamInput: StreamInput) { log.info("process") // Initialize from existing estimates (convert tokens to characters) to accumulate across multiple process() calls let reasoningTotal = Token.toCharCount(input.assistantMessage.reasoningEstimate ?? 0) @@ -48,7 +56,7 @@ export namespace SessionProcessor { try { let currentText: MessageV2.TextPart | undefined let reasoningMap: Record = {} - const stream = fn() + const stream = streamText(streamInput) for await (const value of stream.fullStream) { input.abort.throwIfAborted() @@ -350,11 +358,12 @@ export namespace SessionProcessor { continue } } - } catch (e) { + } catch (e: any) { log.error("process", { error: e, + stack: JSON.stringify(e.stack), }) - const error = MessageV2.fromError(e, { providerID: input.providerID }) + const error = MessageV2.fromError(e, { providerID: input.model.providerID }) const retry = SessionRetry.retryable(error) if (retry !== undefined) { attempt++ diff --git a/packages/opencode/src/session/prompt.ts b/packages/opencode/src/session/prompt.ts index 5b80added51..b125425814c 100644 --- a/packages/opencode/src/session/prompt.ts +++ b/packages/opencode/src/session/prompt.ts @@ -11,7 +11,6 @@ import { Agent } from "../agent/agent" import { Provider } from "../provider/provider" import { generateText, - streamText, type ModelMessage, type Tool as AITool, tool, @@ -289,6 +288,7 @@ export namespace SessionPrompt { }) const model = await Provider.getModel(lastUser.model.providerID, lastUser.model.modelID) + const language = await Provider.getLanguage(model) const task = tasks.pop() // pending subtask @@ -312,7 +312,7 @@ export namespace SessionPrompt { reasoning: 0, cache: { read: 0, write: 0 }, }, - modelID: model.modelID, + modelID: model.id, providerID: model.providerID, time: { created: Date.now(), @@ -356,7 +356,7 @@ export namespace SessionPrompt { agent: lastUser.agent, messageID: assistantMessage.id, callID: part.callID, - extra: { providerID: model.providerID, modelID: model.modelID }, + extra: { providerID: model.providerID, modelID: model.id }, metadata: async () => {}, }, ) @@ -388,7 +388,7 @@ export namespace SessionPrompt { agent: lastUser.agent, model: { providerID: model.providerID, - modelID: model.modelID, + modelID: model.id, }, sessionID, auto: task.auto, @@ -401,7 +401,7 @@ export namespace SessionPrompt { if ( lastFinished && lastFinished.summary !== true && - SessionCompaction.isOverflow({ tokens: lastFinished.tokens, model: model.info }) + SessionCompaction.isOverflow({ tokens: lastFinished.tokens, model }) ) { await SessionCompaction.create({ sessionID, @@ -446,7 +446,7 @@ export namespace SessionPrompt { reasoning: 0, cache: { read: 0, write: 0 }, }, - modelID: model.modelID, + modelID: model.id, providerID: model.providerID, time: { created: Date.now(), @@ -458,20 +458,18 @@ export namespace SessionPrompt { sentEstimate: (lastAssistant?.sentEstimate || 0) + (lastUser.sentEstimate || 0), })) as MessageV2.Assistant, sessionID: sessionID, - model: model.info, - providerID: model.providerID, + model, abort, }) const system = await resolveSystemPrompt({ - providerID: model.providerID, - modelID: model.info.id, + model, agent, system: lastUser.system, }) const tools = await resolveTools({ agent, sessionID, - model: lastUser.model, + model, tools: lastUser.tools, processor, }) @@ -481,21 +479,19 @@ export namespace SessionPrompt { { sessionID: sessionID, agent: lastUser.agent, - model: model.info, + model: model, provider, message: lastUser, }, { - temperature: model.info.temperature - ? (agent.temperature ?? ProviderTransform.temperature(model.providerID, model.modelID)) + temperature: model.capabilities.temperature + ? (agent.temperature ?? ProviderTransform.temperature(model)) : undefined, - topP: agent.topP ?? ProviderTransform.topP(model.providerID, model.modelID), + topP: agent.topP ?? ProviderTransform.topP(model), options: pipe( {}, - mergeDeep( - ProviderTransform.options(model.providerID, model.modelID, model.npm ?? "", sessionID, provider?.options), - ), - mergeDeep(model.info.options), + mergeDeep(ProviderTransform.options(model, sessionID, provider?.options)), + mergeDeep(model.options), mergeDeep(agent.options), ), }, @@ -508,113 +504,111 @@ export namespace SessionPrompt { }) } - const result = await processor.process(() => - streamText({ - onError(error) { - log.error("stream error", { - error, + const result = await processor.process({ + onError(error) { + log.error("stream error", { + error, + }) + }, + async experimental_repairToolCall(input) { + const lower = input.toolCall.toolName.toLowerCase() + if (lower !== input.toolCall.toolName && tools[lower]) { + log.info("repairing tool call", { + tool: input.toolCall.toolName, + repaired: lower, }) - }, - async experimental_repairToolCall(input) { - const lower = input.toolCall.toolName.toLowerCase() - if (lower !== input.toolCall.toolName && tools[lower]) { - log.info("repairing tool call", { - tool: input.toolCall.toolName, - repaired: lower, - }) - return { - ...input.toolCall, - toolName: lower, - } - } return { ...input.toolCall, - input: JSON.stringify({ - tool: input.toolCall.toolName, - error: input.error.message, - }), - toolName: "invalid", + toolName: lower, } - }, - headers: { - ...(model.providerID.startsWith("opencode") - ? { - "x-opencode-project": Instance.project.id, - "x-opencode-session": sessionID, - "x-opencode-request": lastUser.id, - } - : undefined), - ...model.info.headers, - }, - // set to 0, we handle loop - maxRetries: 0, - activeTools: Object.keys(tools).filter((x) => x !== "invalid"), - maxOutputTokens: ProviderTransform.maxOutputTokens( - model.providerID, - params.options, - model.info.limit.output, - OUTPUT_TOKEN_MAX, + } + return { + ...input.toolCall, + input: JSON.stringify({ + tool: input.toolCall.toolName, + error: input.error.message, + }), + toolName: "invalid", + } + }, + headers: { + ...(model.providerID.startsWith("opencode") + ? { + "x-opencode-project": Instance.project.id, + "x-opencode-session": sessionID, + "x-opencode-request": lastUser.id, + } + : undefined), + ...model.headers, + }, + // set to 0, we handle loop + maxRetries: 0, + activeTools: Object.keys(tools).filter((x) => x !== "invalid"), + maxOutputTokens: ProviderTransform.maxOutputTokens( + model.api.npm, + params.options, + model.limit.output, + OUTPUT_TOKEN_MAX, + ), + abortSignal: abort, + providerOptions: ProviderTransform.providerOptions(model.api.npm, model.providerID, params.options), + stopWhen: stepCountIs(1), + temperature: params.temperature, + topP: params.topP, + messages: [ + ...system.map( + (x): ModelMessage => ({ + role: "system", + content: x, + }), ), - abortSignal: abort, - providerOptions: ProviderTransform.providerOptions(model.npm, model.providerID, params.options), - stopWhen: stepCountIs(1), - temperature: params.temperature, - topP: params.topP, - messages: [ - ...system.map( - (x): ModelMessage => ({ - role: "system", - content: x, - }), - ), - ...MessageV2.toModelMessage( - msgs.filter((m) => { - if (m.info.role !== "assistant" || m.info.error === undefined) { - return true - } - if ( - MessageV2.AbortedError.isInstance(m.info.error) && - m.parts.some((part) => part.type !== "step-start" && part.type !== "reasoning") - ) { - return true - } + ...MessageV2.toModelMessage( + msgs.filter((m) => { + if (m.info.role !== "assistant" || m.info.error === undefined) { + return true + } + if ( + MessageV2.AbortedError.isInstance(m.info.error) && + m.parts.some((part) => part.type !== "step-start" && part.type !== "reasoning") + ) { + return true + } - return false - }), - ), - ], - tools: model.info.tool_call === false ? undefined : tools, - model: wrapLanguageModel({ - model: model.language, - middleware: [ - { - async transformParams(args) { - if (args.type === "stream") { - // @ts-expect-error - args.params.prompt = ProviderTransform.message(args.params.prompt, model.providerID, model.modelID) - } - // Transform tool schemas for provider compatibility - if (args.params.tools && Array.isArray(args.params.tools)) { - args.params.tools = args.params.tools.map((tool: any) => { - // Tools at middleware level have inputSchema, not parameters - if (tool.inputSchema && typeof tool.inputSchema === "object") { - // Transform the inputSchema for provider compatibility - return { - ...tool, - inputSchema: ProviderTransform.schema(model.providerID, model.modelID, tool.inputSchema), - } + return false + }), + ), + ], + tools: model.capabilities.toolcall === false ? undefined : tools, + model: wrapLanguageModel({ + model: language, + middleware: [ + { + async transformParams(args) { + if (args.type === "stream") { + // @ts-expect-error - prompt types are compatible at runtime + args.params.prompt = ProviderTransform.message(args.params.prompt, model) + } + // Transform tool schemas for provider compatibility + if (args.params.tools && Array.isArray(args.params.tools)) { + args.params.tools = args.params.tools.map((tool: any) => { + // Tools at middleware level have inputSchema, not parameters + if (tool.inputSchema && typeof tool.inputSchema === "object") { + // Transform the inputSchema for provider compatibility + return { + ...tool, + inputSchema: ProviderTransform.schema(model, tool.inputSchema), } - // If no inputSchema, return tool unchanged - return tool - }) - } - return args.params - }, + } + // If no inputSchema, return tool unchanged + return tool + }) + } + return args.params }, - ], - }), + }, + ], }), - ) + }) if (result === "stop") break continue } @@ -637,18 +631,13 @@ export namespace SessionPrompt { return Provider.defaultModel() } - async function resolveSystemPrompt(input: { - system?: string - agent: Agent.Info - providerID: string - modelID: string - }) { - let system = SystemPrompt.header(input.providerID) + async function resolveSystemPrompt(input: { system?: string; agent: Agent.Info; model: Provider.Model }) { + let system = SystemPrompt.header(input.model.providerID) system.push( ...(() => { if (input.system) return [input.system] if (input.agent.prompt) return [input.agent.prompt] - return SystemPrompt.provider(input.modelID) + return SystemPrompt.provider(input.model) })(), ) system.push(...(await SystemPrompt.environment())) @@ -661,10 +650,7 @@ export namespace SessionPrompt { async function resolveTools(input: { agent: Agent.Info - model: { - providerID: string - modelID: string - } + model: Provider.Model sessionID: string tools?: Record processor: SessionProcessor.Info @@ -672,16 +658,12 @@ export namespace SessionPrompt { const tools: Record = {} const enabledTools = pipe( input.agent.tools, - mergeDeep(await ToolRegistry.enabled(input.model.providerID, input.model.modelID, input.agent)), + mergeDeep(await ToolRegistry.enabled(input.agent)), mergeDeep(input.tools ?? {}), ) - for (const item of await ToolRegistry.tools(input.model.providerID, input.model.modelID)) { + for (const item of await ToolRegistry.tools(input.model.providerID)) { if (Wildcard.all(item.id, enabledTools) === false) continue - const schema = ProviderTransform.schema( - input.model.providerID, - input.model.modelID, - z.toJSONSchema(item.parameters), - ) + const schema = ProviderTransform.schema(input.model, z.toJSONSchema(item.parameters)) tools[item.id] = tool({ id: item.id as any, description: item.description, @@ -1480,25 +1462,18 @@ export namespace SessionPrompt { if (!isFirst) return const small = (await Provider.getSmallModel(input.providerID)) ?? (await Provider.getModel(input.providerID, input.modelID)) + const language = await Provider.getLanguage(small) const provider = await Provider.getProvider(small.providerID) const options = pipe( {}, - mergeDeep( - ProviderTransform.options( - small.providerID, - small.modelID, - small.npm ?? "", - input.session.id, - provider?.options, - ), - ), - mergeDeep(ProviderTransform.smallOptions({ providerID: small.providerID, modelID: small.modelID })), - mergeDeep(small.info.options), + mergeDeep(ProviderTransform.options(small, input.session.id, provider?.options)), + mergeDeep(ProviderTransform.smallOptions(small)), + mergeDeep(small.options), ) await generateText({ // use higher # for reasoning models since reasoning tokens eat up a lot of the budget - maxOutputTokens: small.info.reasoning ? 3000 : 20, - providerOptions: ProviderTransform.providerOptions(small.npm, small.providerID, options), + maxOutputTokens: small.capabilities.reasoning ? 3000 : 20, + providerOptions: ProviderTransform.providerOptions(small.api.npm, small.providerID, options), messages: [ ...SystemPrompt.title(small.providerID).map( (x): ModelMessage => ({ @@ -1529,8 +1504,8 @@ export namespace SessionPrompt { }, ]), ], - headers: small.info.headers, - model: small.language, + headers: small.headers, + model: language, }) .then((result) => { if (result.text) @@ -1547,7 +1522,7 @@ export namespace SessionPrompt { }) }) .catch((error) => { - log.error("failed to generate title", { error, model: small.info.id }) + log.error("failed to generate title", { error, model: small.id }) }) } } diff --git a/packages/opencode/src/session/retry.ts b/packages/opencode/src/session/retry.ts index ace7350b220..3cabacdb87b 100644 --- a/packages/opencode/src/session/retry.ts +++ b/packages/opencode/src/session/retry.ts @@ -65,6 +65,9 @@ export namespace SessionRetry { if (json.type === "error" && json.error?.type === "too_many_requests") { return "Too Many Requests" } + if (json.code === "Some resource has been exhausted") { + return "Provider is overloaded" + } } catch {} } diff --git a/packages/opencode/src/session/summary.ts b/packages/opencode/src/session/summary.ts index d9247f182dc..8d366e4991c 100644 --- a/packages/opencode/src/session/summary.ts +++ b/packages/opencode/src/session/summary.ts @@ -76,19 +76,20 @@ export namespace SessionSummary { const small = (await Provider.getSmallModel(assistantMsg.providerID)) ?? (await Provider.getModel(assistantMsg.providerID, assistantMsg.modelID)) + const language = await Provider.getLanguage(small) const options = pipe( {}, - mergeDeep(ProviderTransform.options(small.providerID, small.modelID, small.npm ?? "", assistantMsg.sessionID)), - mergeDeep(ProviderTransform.smallOptions({ providerID: small.providerID, modelID: small.modelID })), - mergeDeep(small.info.options), + mergeDeep(ProviderTransform.options(small, assistantMsg.sessionID)), + mergeDeep(ProviderTransform.smallOptions(small)), + mergeDeep(small.options), ) const textPart = msgWithParts.parts.find((p) => p.type === "text" && !p.synthetic) as MessageV2.TextPart if (textPart && !userMsg.summary?.title) { const result = await generateText({ - maxOutputTokens: small.info.reasoning ? 1500 : 20, - providerOptions: ProviderTransform.providerOptions(small.npm, small.providerID, options), + maxOutputTokens: small.capabilities.reasoning ? 1500 : 20, + providerOptions: ProviderTransform.providerOptions(small.api.npm, small.providerID, options), messages: [ ...SystemPrompt.title(small.providerID).map( (x): ModelMessage => ({ @@ -106,8 +107,8 @@ export namespace SessionSummary { `, }, ], - headers: small.info.headers, - model: small.language, + headers: small.headers, + model: language, }) log.info("title", { title: result.text }) userMsg.summary.title = result.text @@ -132,9 +133,9 @@ export namespace SessionSummary { } } const result = await generateText({ - model: small.language, + model: language, maxOutputTokens: 100, - providerOptions: ProviderTransform.providerOptions(small.npm, small.providerID, options), + providerOptions: ProviderTransform.providerOptions(small.api.npm, small.providerID, options), messages: [ ...SystemPrompt.summarize(small.providerID).map( (x): ModelMessage => ({ @@ -148,7 +149,7 @@ export namespace SessionSummary { content: `Summarize the above conversation according to your system prompts.`, }, ], - headers: small.info.headers, + headers: small.headers, }).catch(() => {}) if (result) summary = result.text } diff --git a/packages/opencode/src/session/system.ts b/packages/opencode/src/session/system.ts index 399cad8cde5..3146110cf3f 100644 --- a/packages/opencode/src/session/system.ts +++ b/packages/opencode/src/session/system.ts @@ -17,6 +17,7 @@ import PROMPT_COMPACTION from "./prompt/compaction.txt" import PROMPT_SUMMARIZE from "./prompt/summarize.txt" import PROMPT_TITLE from "./prompt/title.txt" import PROMPT_CODEX from "./prompt/codex.txt" +import type { Provider } from "@/provider/provider" export namespace SystemPrompt { export function header(providerID: string) { @@ -24,12 +25,13 @@ export namespace SystemPrompt { return [] } - export function provider(modelID: string) { - if (modelID.includes("gpt-5")) return [PROMPT_CODEX] - if (modelID.includes("gpt-") || modelID.includes("o1") || modelID.includes("o3")) return [PROMPT_BEAST] - if (modelID.includes("gemini-")) return [PROMPT_GEMINI] - if (modelID.includes("claude")) return [PROMPT_ANTHROPIC] - if (modelID.includes("polaris-alpha")) return [PROMPT_POLARIS] + export function provider(model: Provider.Model) { + if (model.api.id.includes("gpt-5")) return [PROMPT_CODEX] + if (model.api.id.includes("gpt-") || model.api.id.includes("o1") || model.api.id.includes("o3")) + return [PROMPT_BEAST] + if (model.api.id.includes("gemini-")) return [PROMPT_GEMINI] + if (model.api.id.includes("claude")) return [PROMPT_ANTHROPIC] + if (model.api.id.includes("polaris-alpha")) return [PROMPT_POLARIS] return [PROMPT_ANTHROPIC_WITHOUT_TODO] } diff --git a/packages/opencode/src/share/share-next.ts b/packages/opencode/src/share/share-next.ts index 9543149a813..996400280d1 100644 --- a/packages/opencode/src/share/share-next.ts +++ b/packages/opencode/src/share/share-next.ts @@ -1,7 +1,6 @@ import { Bus } from "@/bus" import { Config } from "@/config/config" import { ulid } from "ulid" -import type { ModelsDev } from "@/provider/models" import { Provider } from "@/provider/provider" import { Session } from "@/session" import { MessageV2 } from "@/session/message-v2" @@ -36,7 +35,7 @@ export namespace ShareNext { type: "model", data: [ await Provider.getModel(evt.properties.info.model.providerID, evt.properties.info.model.modelID).then( - (m) => m.info, + (m) => m, ), ], }, @@ -105,7 +104,7 @@ export namespace ShareNext { } | { type: "model" - data: ModelsDev.Model[] + data: SDK.Model[] } const queue = new Map }>() @@ -171,7 +170,7 @@ export namespace ShareNext { messages .filter((m) => m.info.role === "user") .map((m) => (m.info as SDK.UserMessage).model) - .map((m) => Provider.getModel(m.providerID, m.modelID).then((m) => m.info)), + .map((m) => Provider.getModel(m.providerID, m.modelID).then((m) => m)), ) await sync(sessionID, [ { diff --git a/packages/opencode/src/tool/batch.ts b/packages/opencode/src/tool/batch.ts index 7d6449e7dcb..cc61b090aa3 100644 --- a/packages/opencode/src/tool/batch.ts +++ b/packages/opencode/src/tool/batch.ts @@ -37,7 +37,7 @@ export const BatchTool = Tool.define("batch", async () => { const discardedCalls = params.tool_calls.slice(10) const { ToolRegistry } = await import("./registry") - const availableTools = await ToolRegistry.tools("", "") + const availableTools = await ToolRegistry.tools("") const toolMap = new Map(availableTools.map((t) => [t.id, t])) const executeCall = async (call: (typeof toolCalls)[0]) => { diff --git a/packages/opencode/src/tool/read.ts b/packages/opencode/src/tool/read.ts index cf7b20e8b30..7e01246b539 100644 --- a/packages/opencode/src/tool/read.ts +++ b/packages/opencode/src/tool/read.ts @@ -101,7 +101,7 @@ export const ReadTool = Tool.define("read", { const modelID = ctx.extra["modelID"] as string const model = await Provider.getModel(providerID, modelID).catch(() => undefined) if (!model) return false - return model.info.modalities?.input?.includes("image") ?? false + return model.capabilities.input.image })() if (isImage) { if (!supportsImages) { diff --git a/packages/opencode/src/tool/registry.ts b/packages/opencode/src/tool/registry.ts index 26b6ea9fcf2..33a54675ffa 100644 --- a/packages/opencode/src/tool/registry.ts +++ b/packages/opencode/src/tool/registry.ts @@ -108,7 +108,7 @@ export namespace ToolRegistry { return all().then((x) => x.map((t) => t.id)) } - export async function tools(providerID: string, _modelID: string) { + export async function tools(providerID: string) { const tools = await all() const result = await Promise.all( tools @@ -124,11 +124,7 @@ export namespace ToolRegistry { return result } - export async function enabled( - _providerID: string, - _modelID: string, - agent: Agent.Info, - ): Promise> { + export async function enabled(agent: Agent.Info): Promise> { const result: Record = {} if (agent.permission.edit === "deny") { diff --git a/packages/opencode/src/tool/task.ts b/packages/opencode/src/tool/task.ts index a5645c5f8d9..b499c3582d5 100644 --- a/packages/opencode/src/tool/task.ts +++ b/packages/opencode/src/tool/task.ts @@ -10,6 +10,7 @@ import { SessionPrompt } from "../session/prompt" import { iife } from "@/util/iife" import { defer } from "@/util/defer" import { Wildcard } from "@/util/wildcard" +import { Config } from "../config/config" export { DESCRIPTION as TASK_DESCRIPTION } @@ -87,6 +88,8 @@ export const TaskTool = Tool.define("task", async () => { ctx.abort.addEventListener("abort", cancel) using _ = defer(() => ctx.abort.removeEventListener("abort", cancel)) const promptParts = await SessionPrompt.resolvePromptParts(params.prompt) + + const config = await Config.get() const result = await SessionPrompt.prompt({ messageID, sessionID: session.id, @@ -99,6 +102,7 @@ export const TaskTool = Tool.define("task", async () => { todowrite: false, todoread: false, task: false, + ...Object.fromEntries((config.experimental?.primary_tools ?? []).map((t) => [t, false])), ...agent.tools, }, parts: promptParts, diff --git a/packages/opencode/test/preload.ts b/packages/opencode/test/preload.ts index 16fb3cd2184..43d01274024 100644 --- a/packages/opencode/test/preload.ts +++ b/packages/opencode/test/preload.ts @@ -1,4 +1,35 @@ -import { Log } from "../src/util/log" +// IMPORTANT: Set env vars BEFORE any imports from src/ directory +// xdg-basedir reads env vars at import time, so we must set these first +import os from "os" +import path from "path" + +const testDataDir = path.join(os.tmpdir(), "opencode-test-data-" + process.pid) +process.env["XDG_DATA_HOME"] = testDataDir +process.env["XDG_CACHE_HOME"] = path.join(testDataDir, "cache") +process.env["XDG_CONFIG_HOME"] = path.join(testDataDir, "config") +process.env["XDG_STATE_HOME"] = path.join(testDataDir, "state") + +// Clear provider env vars to ensure clean test state +delete process.env["ANTHROPIC_API_KEY"] +delete process.env["OPENAI_API_KEY"] +delete process.env["GOOGLE_API_KEY"] +delete process.env["GOOGLE_GENERATIVE_AI_API_KEY"] +delete process.env["AZURE_OPENAI_API_KEY"] +delete process.env["AWS_ACCESS_KEY_ID"] +delete process.env["AWS_PROFILE"] +delete process.env["OPENROUTER_API_KEY"] +delete process.env["GROQ_API_KEY"] +delete process.env["MISTRAL_API_KEY"] +delete process.env["PERPLEXITY_API_KEY"] +delete process.env["TOGETHER_API_KEY"] +delete process.env["XAI_API_KEY"] +delete process.env["DEEPSEEK_API_KEY"] +delete process.env["FIREWORKS_API_KEY"] +delete process.env["CEREBRAS_API_KEY"] +delete process.env["SAMBANOVA_API_KEY"] + +// Now safe to import from src/ +const { Log } = await import("../src/util/log") Log.init({ print: false, diff --git a/packages/opencode/test/provider/provider.test.ts b/packages/opencode/test/provider/provider.test.ts new file mode 100644 index 00000000000..698fdddfb42 --- /dev/null +++ b/packages/opencode/test/provider/provider.test.ts @@ -0,0 +1,1729 @@ +import { test, expect } from "bun:test" +import path from "path" +import { tmpdir } from "../fixture/fixture" +import { Instance } from "../../src/project/instance" +import { Provider } from "../../src/provider/provider" +import { Env } from "../../src/env" + +test("provider loaded from env variable", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["anthropic"]).toBeDefined() + // Note: source becomes "custom" because CUSTOM_LOADERS run after env loading + // and anthropic has a custom loader that merges additional options + expect(providers["anthropic"].source).toBe("custom") + }, + }) +}) + +test("provider loaded from config with apiKey option", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + options: { + apiKey: "config-api-key", + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + expect(providers["anthropic"]).toBeDefined() + }, + }) +}) + +test("disabled_providers excludes provider", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + disabled_providers: ["anthropic"], + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["anthropic"]).toBeUndefined() + }, + }) +}) + +test("enabled_providers restricts to only listed providers", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + enabled_providers: ["anthropic"], + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + Env.set("OPENAI_API_KEY", "test-openai-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["anthropic"]).toBeDefined() + expect(providers["openai"]).toBeUndefined() + }, + }) +}) + +test("model whitelist filters models for provider", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + whitelist: ["claude-sonnet-4-20250514"], + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["anthropic"]).toBeDefined() + const models = Object.keys(providers["anthropic"].models) + expect(models).toContain("claude-sonnet-4-20250514") + expect(models.length).toBe(1) + }, + }) +}) + +test("model blacklist excludes specific models", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + blacklist: ["claude-sonnet-4-20250514"], + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["anthropic"]).toBeDefined() + const models = Object.keys(providers["anthropic"].models) + expect(models).not.toContain("claude-sonnet-4-20250514") + }, + }) +}) + +test("custom model alias via config", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + models: { + "my-alias": { + id: "claude-sonnet-4-20250514", + name: "My Custom Alias", + }, + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["anthropic"]).toBeDefined() + expect(providers["anthropic"].models["my-alias"]).toBeDefined() + expect(providers["anthropic"].models["my-alias"].name).toBe("My Custom Alias") + }, + }) +}) + +test("custom provider with npm package", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "custom-provider": { + name: "Custom Provider", + npm: "@ai-sdk/openai-compatible", + api: "https://api.custom.com/v1", + env: ["CUSTOM_API_KEY"], + models: { + "custom-model": { + name: "Custom Model", + tool_call: true, + limit: { + context: 128000, + output: 4096, + }, + }, + }, + options: { + apiKey: "custom-key", + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + expect(providers["custom-provider"]).toBeDefined() + expect(providers["custom-provider"].name).toBe("Custom Provider") + expect(providers["custom-provider"].models["custom-model"]).toBeDefined() + }, + }) +}) + +test("env variable takes precedence, config merges options", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + options: { + timeout: 60000, + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "env-api-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["anthropic"]).toBeDefined() + // Config options should be merged + expect(providers["anthropic"].options.timeout).toBe(60000) + }, + }) +}) + +test("getModel returns model for valid provider/model", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const model = await Provider.getModel("anthropic", "claude-sonnet-4-20250514") + expect(model).toBeDefined() + expect(model.providerID).toBe("anthropic") + expect(model.id).toBe("claude-sonnet-4-20250514") + const language = await Provider.getLanguage(model) + expect(language).toBeDefined() + }, + }) +}) + +test("getModel throws ModelNotFoundError for invalid model", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + expect(Provider.getModel("anthropic", "nonexistent-model")).rejects.toThrow() + }, + }) +}) + +test("getModel throws ModelNotFoundError for invalid provider", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + expect(Provider.getModel("nonexistent-provider", "some-model")).rejects.toThrow() + }, + }) +}) + +test("parseModel correctly parses provider/model string", () => { + const result = Provider.parseModel("anthropic/claude-sonnet-4") + expect(result.providerID).toBe("anthropic") + expect(result.modelID).toBe("claude-sonnet-4") +}) + +test("parseModel handles model IDs with slashes", () => { + const result = Provider.parseModel("openrouter/anthropic/claude-3-opus") + expect(result.providerID).toBe("openrouter") + expect(result.modelID).toBe("anthropic/claude-3-opus") +}) + +test("defaultModel returns first available model when no config set", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const model = await Provider.defaultModel() + expect(model.providerID).toBeDefined() + expect(model.modelID).toBeDefined() + }, + }) +}) + +test("defaultModel respects config model setting", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + model: "anthropic/claude-sonnet-4-20250514", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const model = await Provider.defaultModel() + expect(model.providerID).toBe("anthropic") + expect(model.modelID).toBe("claude-sonnet-4-20250514") + }, + }) +}) + +test("provider with baseURL from config", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "custom-openai": { + name: "Custom OpenAI", + npm: "@ai-sdk/openai-compatible", + env: [], + models: { + "gpt-4": { + name: "GPT-4", + tool_call: true, + limit: { context: 128000, output: 4096 }, + }, + }, + options: { + apiKey: "test-key", + baseURL: "https://custom.openai.com/v1", + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + expect(providers["custom-openai"]).toBeDefined() + expect(providers["custom-openai"].options.baseURL).toBe("https://custom.openai.com/v1") + }, + }) +}) + +test("model cost defaults to zero when not specified", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "test-provider": { + name: "Test Provider", + npm: "@ai-sdk/openai-compatible", + env: [], + models: { + "test-model": { + name: "Test Model", + tool_call: true, + limit: { context: 128000, output: 4096 }, + }, + }, + options: { + apiKey: "test-key", + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + const model = providers["test-provider"].models["test-model"] + expect(model.cost.input).toBe(0) + expect(model.cost.output).toBe(0) + expect(model.cost.cache.read).toBe(0) + expect(model.cost.cache.write).toBe(0) + }, + }) +}) + +test("model options are merged from existing model", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + models: { + "claude-sonnet-4-20250514": { + options: { + customOption: "custom-value", + }, + }, + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const providers = await Provider.list() + const model = providers["anthropic"].models["claude-sonnet-4-20250514"] + expect(model.options.customOption).toBe("custom-value") + }, + }) +}) + +test("provider removed when all models filtered out", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + whitelist: ["nonexistent-model"], + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["anthropic"]).toBeUndefined() + }, + }) +}) + +test("closest finds model by partial match", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const result = await Provider.closest("anthropic", ["sonnet-4"]) + expect(result).toBeDefined() + expect(result?.providerID).toBe("anthropic") + expect(result?.modelID).toContain("sonnet-4") + }, + }) +}) + +test("closest returns undefined for nonexistent provider", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const result = await Provider.closest("nonexistent", ["model"]) + expect(result).toBeUndefined() + }, + }) +}) + +test("getModel uses realIdByKey for aliased models", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + models: { + "my-sonnet": { + id: "claude-sonnet-4-20250514", + name: "My Sonnet Alias", + }, + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["anthropic"].models["my-sonnet"]).toBeDefined() + + const model = await Provider.getModel("anthropic", "my-sonnet") + expect(model).toBeDefined() + expect(model.id).toBe("my-sonnet") + expect(model.name).toBe("My Sonnet Alias") + }, + }) +}) + +test("provider api field sets model api.url", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "custom-api": { + name: "Custom API", + npm: "@ai-sdk/openai-compatible", + api: "https://api.example.com/v1", + env: [], + models: { + "model-1": { + name: "Model 1", + tool_call: true, + limit: { context: 8000, output: 2000 }, + }, + }, + options: { + apiKey: "test-key", + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + // api field is stored on model.api.url, used by getSDK to set baseURL + expect(providers["custom-api"].models["model-1"].api.url).toBe("https://api.example.com/v1") + }, + }) +}) + +test("explicit baseURL overrides api field", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "custom-api": { + name: "Custom API", + npm: "@ai-sdk/openai-compatible", + api: "https://api.example.com/v1", + env: [], + models: { + "model-1": { + name: "Model 1", + tool_call: true, + limit: { context: 8000, output: 2000 }, + }, + }, + options: { + apiKey: "test-key", + baseURL: "https://custom.override.com/v1", + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + expect(providers["custom-api"].options.baseURL).toBe("https://custom.override.com/v1") + }, + }) +}) + +test("model inherits properties from existing database model", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + models: { + "claude-sonnet-4-20250514": { + name: "Custom Name for Sonnet", + }, + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const providers = await Provider.list() + const model = providers["anthropic"].models["claude-sonnet-4-20250514"] + expect(model.name).toBe("Custom Name for Sonnet") + expect(model.capabilities.toolcall).toBe(true) + expect(model.capabilities.attachment).toBe(true) + expect(model.limit.context).toBeGreaterThan(0) + }, + }) +}) + +test("disabled_providers prevents loading even with env var", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + disabled_providers: ["openai"], + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("OPENAI_API_KEY", "test-openai-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["openai"]).toBeUndefined() + }, + }) +}) + +test("enabled_providers with empty array allows no providers", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + enabled_providers: [], + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + Env.set("OPENAI_API_KEY", "test-openai-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(Object.keys(providers).length).toBe(0) + }, + }) +}) + +test("whitelist and blacklist can be combined", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + whitelist: ["claude-sonnet-4-20250514", "claude-opus-4-20250514"], + blacklist: ["claude-opus-4-20250514"], + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["anthropic"]).toBeDefined() + const models = Object.keys(providers["anthropic"].models) + expect(models).toContain("claude-sonnet-4-20250514") + expect(models).not.toContain("claude-opus-4-20250514") + expect(models.length).toBe(1) + }, + }) +}) + +test("model modalities default correctly", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "test-provider": { + name: "Test", + npm: "@ai-sdk/openai-compatible", + env: [], + models: { + "test-model": { + name: "Test Model", + tool_call: true, + limit: { context: 8000, output: 2000 }, + }, + }, + options: { apiKey: "test" }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + const model = providers["test-provider"].models["test-model"] + expect(model.capabilities.input.text).toBe(true) + expect(model.capabilities.output.text).toBe(true) + }, + }) +}) + +test("model with custom cost values", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "test-provider": { + name: "Test", + npm: "@ai-sdk/openai-compatible", + env: [], + models: { + "test-model": { + name: "Test Model", + tool_call: true, + limit: { context: 8000, output: 2000 }, + cost: { + input: 5, + output: 15, + cache_read: 2.5, + cache_write: 7.5, + }, + }, + }, + options: { apiKey: "test" }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + const model = providers["test-provider"].models["test-model"] + expect(model.cost.input).toBe(5) + expect(model.cost.output).toBe(15) + expect(model.cost.cache.read).toBe(2.5) + expect(model.cost.cache.write).toBe(7.5) + }, + }) +}) + +test("getSmallModel returns appropriate small model", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const model = await Provider.getSmallModel("anthropic") + expect(model).toBeDefined() + expect(model?.id).toContain("haiku") + }, + }) +}) + +test("getSmallModel respects config small_model override", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + small_model: "anthropic/claude-sonnet-4-20250514", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const model = await Provider.getSmallModel("anthropic") + expect(model).toBeDefined() + expect(model?.providerID).toBe("anthropic") + expect(model?.id).toBe("claude-sonnet-4-20250514") + }, + }) +}) + +test("provider.sort prioritizes preferred models", () => { + const models = [ + { id: "random-model", name: "Random" }, + { id: "claude-sonnet-4-latest", name: "Claude Sonnet 4" }, + { id: "gpt-5-turbo", name: "GPT-5 Turbo" }, + { id: "other-model", name: "Other" }, + ] as any[] + + const sorted = Provider.sort(models) + expect(sorted[0].id).toContain("sonnet-4") + expect(sorted[0].id).toContain("latest") + expect(sorted[sorted.length - 1].id).not.toContain("gpt-5") + expect(sorted[sorted.length - 1].id).not.toContain("sonnet-4") +}) + +test("multiple providers can be configured simultaneously", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + options: { timeout: 30000 }, + }, + openai: { + options: { timeout: 60000 }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-anthropic-key") + Env.set("OPENAI_API_KEY", "test-openai-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["anthropic"]).toBeDefined() + expect(providers["openai"]).toBeDefined() + expect(providers["anthropic"].options.timeout).toBe(30000) + expect(providers["openai"].options.timeout).toBe(60000) + }, + }) +}) + +test("provider with custom npm package", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "local-llm": { + name: "Local LLM", + npm: "@ai-sdk/openai-compatible", + env: [], + models: { + "llama-3": { + name: "Llama 3", + tool_call: true, + limit: { context: 8192, output: 2048 }, + }, + }, + options: { + apiKey: "not-needed", + baseURL: "http://localhost:11434/v1", + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + expect(providers["local-llm"]).toBeDefined() + expect(providers["local-llm"].models["llama-3"].api.npm).toBe("@ai-sdk/openai-compatible") + expect(providers["local-llm"].options.baseURL).toBe("http://localhost:11434/v1") + }, + }) +}) + +// Edge cases for model configuration + +test("model alias name defaults to alias key when id differs", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + models: { + sonnet: { + id: "claude-sonnet-4-20250514", + // no name specified - should default to "sonnet" (the key) + }, + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["anthropic"].models["sonnet"].name).toBe("sonnet") + }, + }) +}) + +test("provider with multiple env var options only includes apiKey when single env", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "multi-env": { + name: "Multi Env Provider", + npm: "@ai-sdk/openai-compatible", + env: ["MULTI_ENV_KEY_1", "MULTI_ENV_KEY_2"], + models: { + "model-1": { + name: "Model 1", + tool_call: true, + limit: { context: 8000, output: 2000 }, + }, + }, + options: { + baseURL: "https://api.example.com/v1", + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("MULTI_ENV_KEY_1", "test-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["multi-env"]).toBeDefined() + // When multiple env options exist, key should NOT be auto-set + expect(providers["multi-env"].key).toBeUndefined() + }, + }) +}) + +test("provider with single env var includes apiKey automatically", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "single-env": { + name: "Single Env Provider", + npm: "@ai-sdk/openai-compatible", + env: ["SINGLE_ENV_KEY"], + models: { + "model-1": { + name: "Model 1", + tool_call: true, + limit: { context: 8000, output: 2000 }, + }, + }, + options: { + baseURL: "https://api.example.com/v1", + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("SINGLE_ENV_KEY", "my-api-key") + }, + fn: async () => { + const providers = await Provider.list() + expect(providers["single-env"]).toBeDefined() + // Single env option should auto-set key + expect(providers["single-env"].key).toBe("my-api-key") + }, + }) +}) + +test("model cost overrides existing cost values", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + models: { + "claude-sonnet-4-20250514": { + cost: { + input: 999, + output: 888, + }, + }, + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const providers = await Provider.list() + const model = providers["anthropic"].models["claude-sonnet-4-20250514"] + expect(model.cost.input).toBe(999) + expect(model.cost.output).toBe(888) + }, + }) +}) + +test("completely new provider not in database can be configured", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "brand-new-provider": { + name: "Brand New", + npm: "@ai-sdk/openai-compatible", + env: [], + api: "https://new-api.com/v1", + models: { + "new-model": { + name: "New Model", + tool_call: true, + reasoning: true, + attachment: true, + temperature: true, + limit: { context: 32000, output: 8000 }, + modalities: { + input: ["text", "image"], + output: ["text"], + }, + }, + }, + options: { + apiKey: "new-key", + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + expect(providers["brand-new-provider"]).toBeDefined() + expect(providers["brand-new-provider"].name).toBe("Brand New") + const model = providers["brand-new-provider"].models["new-model"] + expect(model.capabilities.reasoning).toBe(true) + expect(model.capabilities.attachment).toBe(true) + expect(model.capabilities.input.image).toBe(true) + }, + }) +}) + +test("disabled_providers and enabled_providers interaction", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + // enabled_providers takes precedence - only these are considered + enabled_providers: ["anthropic", "openai"], + // Then disabled_providers filters from the enabled set + disabled_providers: ["openai"], + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-anthropic") + Env.set("OPENAI_API_KEY", "test-openai") + Env.set("GOOGLE_GENERATIVE_AI_API_KEY", "test-google") + }, + fn: async () => { + const providers = await Provider.list() + // anthropic: in enabled, not in disabled = allowed + expect(providers["anthropic"]).toBeDefined() + // openai: in enabled, but also in disabled = NOT allowed + expect(providers["openai"]).toBeUndefined() + // google: not in enabled = NOT allowed (even though not disabled) + expect(providers["google"]).toBeUndefined() + }, + }) +}) + +test("model with tool_call false", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "no-tools": { + name: "No Tools Provider", + npm: "@ai-sdk/openai-compatible", + env: [], + models: { + "basic-model": { + name: "Basic Model", + tool_call: false, + limit: { context: 4000, output: 1000 }, + }, + }, + options: { apiKey: "test" }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + expect(providers["no-tools"].models["basic-model"].capabilities.toolcall).toBe(false) + }, + }) +}) + +test("model defaults tool_call to true when not specified", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "default-tools": { + name: "Default Tools Provider", + npm: "@ai-sdk/openai-compatible", + env: [], + models: { + model: { + name: "Model", + // tool_call not specified + limit: { context: 4000, output: 1000 }, + }, + }, + options: { apiKey: "test" }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + expect(providers["default-tools"].models["model"].capabilities.toolcall).toBe(true) + }, + }) +}) + +test("model headers are preserved", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "headers-provider": { + name: "Headers Provider", + npm: "@ai-sdk/openai-compatible", + env: [], + models: { + model: { + name: "Model", + tool_call: true, + limit: { context: 4000, output: 1000 }, + headers: { + "X-Custom-Header": "custom-value", + Authorization: "Bearer special-token", + }, + }, + }, + options: { apiKey: "test" }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + const model = providers["headers-provider"].models["model"] + expect(model.headers).toEqual({ + "X-Custom-Header": "custom-value", + Authorization: "Bearer special-token", + }) + }, + }) +}) + +test("provider env fallback - second env var used if first missing", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "fallback-env": { + name: "Fallback Env Provider", + npm: "@ai-sdk/openai-compatible", + env: ["PRIMARY_KEY", "FALLBACK_KEY"], + models: { + model: { + name: "Model", + tool_call: true, + limit: { context: 4000, output: 1000 }, + }, + }, + options: { baseURL: "https://api.example.com" }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + // Only set fallback, not primary + Env.set("FALLBACK_KEY", "fallback-api-key") + }, + fn: async () => { + const providers = await Provider.list() + // Provider should load because fallback env var is set + expect(providers["fallback-env"]).toBeDefined() + }, + }) +}) + +test("getModel returns consistent results", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const model1 = await Provider.getModel("anthropic", "claude-sonnet-4-20250514") + const model2 = await Provider.getModel("anthropic", "claude-sonnet-4-20250514") + expect(model1.providerID).toEqual(model2.providerID) + expect(model1.id).toEqual(model2.id) + expect(model1).toEqual(model2) + }, + }) +}) + +test("provider name defaults to id when not in database", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "my-custom-id": { + // no name specified + npm: "@ai-sdk/openai-compatible", + env: [], + models: { + model: { + name: "Model", + tool_call: true, + limit: { context: 4000, output: 1000 }, + }, + }, + options: { apiKey: "test" }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + expect(providers["my-custom-id"].name).toBe("my-custom-id") + }, + }) +}) + +test("ModelNotFoundError includes suggestions for typos", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + try { + await Provider.getModel("anthropic", "claude-sonet-4") // typo: sonet instead of sonnet + expect(true).toBe(false) // Should not reach here + } catch (e: any) { + expect(e.data.suggestions).toBeDefined() + expect(e.data.suggestions.length).toBeGreaterThan(0) + } + }, + }) +}) + +test("ModelNotFoundError for provider includes suggestions", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + try { + await Provider.getModel("antropic", "claude-sonnet-4") // typo: antropic + expect(true).toBe(false) // Should not reach here + } catch (e: any) { + expect(e.data.suggestions).toBeDefined() + expect(e.data.suggestions).toContain("anthropic") + } + }, + }) +}) + +test("getProvider returns undefined for nonexistent provider", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const provider = await Provider.getProvider("nonexistent") + expect(provider).toBeUndefined() + }, + }) +}) + +test("getProvider returns provider info", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const provider = await Provider.getProvider("anthropic") + expect(provider).toBeDefined() + expect(provider?.id).toBe("anthropic") + }, + }) +}) + +test("closest returns undefined when no partial match found", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const result = await Provider.closest("anthropic", ["nonexistent-xyz-model"]) + expect(result).toBeUndefined() + }, + }) +}) + +test("closest checks multiple query terms in order", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + // First term won't match, second will + const result = await Provider.closest("anthropic", ["nonexistent", "haiku"]) + expect(result).toBeDefined() + expect(result?.modelID).toContain("haiku") + }, + }) +}) + +test("model limit defaults to zero when not specified", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "no-limit": { + name: "No Limit Provider", + npm: "@ai-sdk/openai-compatible", + env: [], + models: { + model: { + name: "Model", + tool_call: true, + // no limit specified + }, + }, + options: { apiKey: "test" }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + const model = providers["no-limit"].models["model"] + expect(model.limit.context).toBe(0) + expect(model.limit.output).toBe(0) + }, + }) +}) + +test("provider options are deeply merged", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { + options: { + headers: { + "X-Custom": "custom-value", + }, + timeout: 30000, + }, + }, + }, + }), + ) + }, + }) + await Instance.provide({ + directory: tmp.path, + init: async () => { + Env.set("ANTHROPIC_API_KEY", "test-api-key") + }, + fn: async () => { + const providers = await Provider.list() + // Custom options should be merged + expect(providers["anthropic"].options.timeout).toBe(30000) + expect(providers["anthropic"].options.headers["X-Custom"]).toBe("custom-value") + // anthropic custom loader adds its own headers, they should coexist + expect(providers["anthropic"].options.headers["anthropic-beta"]).toBeDefined() + }, + }) +}) diff --git a/packages/opencode/test/provider/transform.test.ts b/packages/opencode/test/provider/transform.test.ts index e6080d54c68..648f108bd66 100644 --- a/packages/opencode/test/provider/transform.test.ts +++ b/packages/opencode/test/provider/transform.test.ts @@ -96,3 +96,210 @@ describe("ProviderTransform.maxOutputTokens", () => { }) }) }) + +describe("ProviderTransform.message - DeepSeek reasoning content", () => { + test("DeepSeek with tool calls includes reasoning_content in providerOptions", () => { + const msgs = [ + { + role: "assistant", + content: [ + { type: "reasoning", text: "Let me think about this..." }, + { + type: "tool-call", + toolCallId: "test", + toolName: "bash", + input: { command: "echo hello" }, + }, + ], + }, + ] as any[] + + const result = ProviderTransform.message(msgs, { + id: "deepseek/deepseek-chat", + providerID: "deepseek", + api: { + id: "deepseek-chat", + url: "https://api.deepseek.com", + npm: "@ai-sdk/openai-compatible", + }, + name: "DeepSeek Chat", + capabilities: { + temperature: true, + reasoning: true, + attachment: false, + toolcall: true, + input: { text: true, audio: false, image: false, video: false, pdf: false }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + }, + cost: { + input: 0.001, + output: 0.002, + cache: { read: 0.0001, write: 0.0002 }, + }, + limit: { + context: 128000, + output: 8192, + }, + status: "active", + options: {}, + headers: {}, + }) + + expect(result).toHaveLength(1) + expect(result[0].content).toEqual([ + { + type: "tool-call", + toolCallId: "test", + toolName: "bash", + input: { command: "echo hello" }, + }, + ]) + expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBe("Let me think about this...") + }) + + test("DeepSeek without tool calls strips reasoning from content", () => { + const msgs = [ + { + role: "assistant", + content: [ + { type: "reasoning", text: "Let me think about this..." }, + { type: "text", text: "Final answer" }, + ], + }, + ] as any[] + + const result = ProviderTransform.message(msgs, { + id: "deepseek/deepseek-chat", + providerID: "deepseek", + api: { + id: "deepseek-chat", + url: "https://api.deepseek.com", + npm: "@ai-sdk/openai-compatible", + }, + name: "DeepSeek Chat", + capabilities: { + temperature: true, + reasoning: true, + attachment: false, + toolcall: true, + input: { text: true, audio: false, image: false, video: false, pdf: false }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + }, + cost: { + input: 0.001, + output: 0.002, + cache: { read: 0.0001, write: 0.0002 }, + }, + limit: { + context: 128000, + output: 8192, + }, + status: "active", + options: {}, + headers: {}, + }) + + expect(result).toHaveLength(1) + expect(result[0].content).toEqual([{ type: "text", text: "Final answer" }]) + expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBeUndefined() + }) + + test("DeepSeek model ID containing 'deepseek' matches (case insensitive)", () => { + const msgs = [ + { + role: "assistant", + content: [ + { type: "reasoning", text: "Thinking..." }, + { + type: "tool-call", + toolCallId: "test", + toolName: "get_weather", + input: { location: "Hangzhou" }, + }, + ], + }, + ] as any[] + + const result = ProviderTransform.message(msgs, { + id: "someprovider/deepseek-reasoner", + providerID: "someprovider", + api: { + id: "deepseek-reasoner", + url: "https://api.someprovider.com", + npm: "@ai-sdk/openai-compatible", + }, + name: "SomeProvider DeepSeek Reasoner", + capabilities: { + temperature: true, + reasoning: true, + attachment: false, + toolcall: true, + input: { text: true, audio: false, image: false, video: false, pdf: false }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + }, + cost: { + input: 0.001, + output: 0.002, + cache: { read: 0.0001, write: 0.0002 }, + }, + limit: { + context: 128000, + output: 8192, + }, + status: "active", + options: {}, + headers: {}, + }) + + expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBe("Thinking...") + }) + + test("Non-DeepSeek providers leave reasoning content unchanged", () => { + const msgs = [ + { + role: "assistant", + content: [ + { type: "reasoning", text: "Should not be processed" }, + { type: "text", text: "Answer" }, + ], + }, + ] as any[] + + const result = ProviderTransform.message(msgs, { + id: "openai/gpt-4", + providerID: "openai", + api: { + id: "gpt-4", + url: "https://api.openai.com", + npm: "@ai-sdk/openai", + }, + name: "GPT-4", + capabilities: { + temperature: true, + reasoning: false, + attachment: true, + toolcall: true, + input: { text: true, audio: false, image: true, video: false, pdf: false }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + }, + cost: { + input: 0.03, + output: 0.06, + cache: { read: 0.001, write: 0.002 }, + }, + limit: { + context: 128000, + output: 4096, + }, + status: "active", + options: {}, + headers: {}, + }) + + expect(result[0].content).toEqual([ + { type: "reasoning", text: "Should not be processed" }, + { type: "text", text: "Answer" }, + ]) + expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBeUndefined() + }) +}) diff --git a/packages/plugin/package.json b/packages/plugin/package.json index 2871fc7db2e..35c1cb63fd6 100644 --- a/packages/plugin/package.json +++ b/packages/plugin/package.json @@ -1,7 +1,7 @@ { "$schema": "https://json.schemastore.org/package.json", "name": "@opencode-ai/plugin", - "version": "1.0.129", + "version": "1.0.130", "type": "module", "scripts": { "typecheck": "tsgo --noEmit", diff --git a/packages/sdk/js/package.json b/packages/sdk/js/package.json index 9ea1689ae24..8bdab7ca202 100644 --- a/packages/sdk/js/package.json +++ b/packages/sdk/js/package.json @@ -1,7 +1,7 @@ { "$schema": "https://json.schemastore.org/package.json", "name": "@opencode-ai/sdk", - "version": "1.0.129", + "version": "1.0.130", "type": "module", "scripts": { "typecheck": "tsgo --noEmit", diff --git a/packages/sdk/js/src/gen/types.gen.ts b/packages/sdk/js/src/gen/types.gen.ts index d3a617b032e..4ed2e472044 100644 --- a/packages/sdk/js/src/gen/types.gen.ts +++ b/packages/sdk/js/src/gen/types.gen.ts @@ -802,6 +802,10 @@ export type KeybindsConfig = { * Navigate to last message */ messages_last?: string + /** + * Navigate to last user message + */ + messages_last_user?: string /** * Copy message */ @@ -818,6 +822,10 @@ export type KeybindsConfig = { * Toggle code block concealment in messages */ messages_toggle_conceal?: string + /** + * Toggle tool details visibility + */ + tool_details?: string /** * List available models */ @@ -950,6 +958,75 @@ export type AgentConfig = { | undefined } +export type ProviderConfig = { + api?: string + name?: string + env?: Array + id?: string + npm?: string + models?: { + [key: string]: { + id?: string + name?: string + release_date?: string + attachment?: boolean + reasoning?: boolean + temperature?: boolean + tool_call?: boolean + cost?: { + input: number + output: number + cache_read?: number + cache_write?: number + context_over_200k?: { + input: number + output: number + cache_read?: number + cache_write?: number + } + } + limit?: { + context: number + output: number + } + modalities?: { + input: Array<"text" | "audio" | "image" | "video" | "pdf"> + output: Array<"text" | "audio" | "image" | "video" | "pdf"> + } + experimental?: boolean + status?: "alpha" | "beta" | "deprecated" + options?: { + [key: string]: unknown + } + headers?: { + [key: string]: string + } + provider?: { + npm: string + } + } + } + whitelist?: Array + blacklist?: Array + options?: { + apiKey?: string + baseURL?: string + /** + * GitHub Enterprise URL for copilot authentication + */ + enterpriseUrl?: string + /** + * Enable promptCacheKey for this provider (default false) + */ + setCacheKey?: boolean + /** + * Timeout in milliseconds for requests to this provider. Default is 300000 (5 minutes). Set to false to disable timeout. + */ + timeout?: number | false + [key: string]: unknown | string | boolean | (number | false) | undefined + } +} + export type McpLocalConfig = { /** * Type of MCP server connection @@ -1108,74 +1185,7 @@ export type Config = { * Custom provider configurations and model overrides */ provider?: { - [key: string]: { - api?: string - name?: string - env?: Array - id?: string - npm?: string - models?: { - [key: string]: { - id?: string - name?: string - release_date?: string - attachment?: boolean - reasoning?: boolean - temperature?: boolean - tool_call?: boolean - cost?: { - input: number - output: number - cache_read?: number - cache_write?: number - context_over_200k?: { - input: number - output: number - cache_read?: number - cache_write?: number - } - } - limit?: { - context: number - output: number - } - modalities?: { - input: Array<"text" | "audio" | "image" | "video" | "pdf"> - output: Array<"text" | "audio" | "image" | "video" | "pdf"> - } - experimental?: boolean - status?: "alpha" | "beta" | "deprecated" - options?: { - [key: string]: unknown - } - headers?: { - [key: string]: string - } - provider?: { - npm: string - } - } - } - whitelist?: Array - blacklist?: Array - options?: { - apiKey?: string - baseURL?: string - /** - * GitHub Enterprise URL for copilot authentication - */ - enterpriseUrl?: string - /** - * Enable promptCacheKey for this provider (default false) - */ - setCacheKey?: boolean - /** - * Timeout in milliseconds for requests to this provider. Default is 300000 (5 minutes). Set to false to disable timeout. - */ - timeout?: number | false - [key: string]: unknown | string | boolean | (number | false) | undefined - } - } + [key: string]: ProviderConfig } /** * MCP (Model Context Protocol) server configurations @@ -1265,6 +1275,10 @@ export type Config = { * Enable the batch tool */ batch_tool?: boolean + /** + * Tools that should only be available to primary agents. + */ + primary_tools?: Array } } @@ -1358,51 +1372,71 @@ export type Command = { export type Model = { id: string + providerID: string + api: { + id: string + url: string + npm: string + } name: string - release_date: string - attachment: boolean - reasoning: boolean - temperature: boolean - tool_call: boolean + capabilities: { + temperature: boolean + reasoning: boolean + attachment: boolean + toolcall: boolean + input: { + text: boolean + audio: boolean + image: boolean + video: boolean + pdf: boolean + } + output: { + text: boolean + audio: boolean + image: boolean + video: boolean + pdf: boolean + } + } cost: { input: number output: number - cache_read?: number - cache_write?: number - context_over_200k?: { + cache: { + read: number + write: number + } + experimentalOver200K?: { input: number output: number - cache_read?: number - cache_write?: number + cache: { + read: number + write: number + } } } limit: { context: number output: number } - modalities?: { - input: Array<"text" | "audio" | "image" | "video" | "pdf"> - output: Array<"text" | "audio" | "image" | "video" | "pdf"> - } - experimental?: boolean - status?: "alpha" | "beta" | "deprecated" + status: "alpha" | "beta" | "deprecated" | "active" options: { [key: string]: unknown } - headers?: { + headers: { [key: string]: string } - provider?: { - npm: string - } } export type Provider = { - api?: string + id: string name: string + source: "env" | "config" | "custom" | "api" env: Array - id: string - npm?: string + key?: string + options: { + [key: string]: unknown + } models: { [key: string]: Model } @@ -2672,7 +2706,55 @@ export type ProviderListResponses = { * List of providers */ 200: { - all: Array + all: Array<{ + api?: string + name: string + env: Array + id: string + npm?: string + models: { + [key: string]: { + id: string + name: string + release_date: string + attachment: boolean + reasoning: boolean + temperature: boolean + tool_call: boolean + cost?: { + input: number + output: number + cache_read?: number + cache_write?: number + context_over_200k?: { + input: number + output: number + cache_read?: number + cache_write?: number + } + } + limit: { + context: number + output: number + } + modalities?: { + input: Array<"text" | "audio" | "image" | "video" | "pdf"> + output: Array<"text" | "audio" | "image" | "video" | "pdf"> + } + experimental?: boolean + status?: "alpha" | "beta" | "deprecated" + options: { + [key: string]: unknown + } + headers?: { + [key: string]: string + } + provider?: { + npm: string + } + } + } + }> default: { [key: string]: string } diff --git a/packages/slack/package.json b/packages/slack/package.json index f81f9069f2b..bcae3b7a4d8 100644 --- a/packages/slack/package.json +++ b/packages/slack/package.json @@ -1,6 +1,6 @@ { "name": "@opencode-ai/slack", - "version": "1.0.129", + "version": "1.0.130", "type": "module", "scripts": { "dev": "bun run src/index.ts", diff --git a/packages/tauri/package.json b/packages/tauri/package.json index 01f42b4bfe1..5e71959f26f 100644 --- a/packages/tauri/package.json +++ b/packages/tauri/package.json @@ -1,7 +1,7 @@ { "name": "@opencode-ai/tauri", "private": true, - "version": "1.0.129", + "version": "1.0.130", "type": "module", "scripts": { "dev": "vite", diff --git a/packages/ui/package.json b/packages/ui/package.json index 78d7c226568..b9011d6c860 100644 --- a/packages/ui/package.json +++ b/packages/ui/package.json @@ -1,6 +1,6 @@ { "name": "@opencode-ai/ui", - "version": "1.0.129", + "version": "1.0.130", "type": "module", "exports": { "./*": "./src/components/*.tsx", diff --git a/packages/util/package.json b/packages/util/package.json index a9f365df5fc..c891a034666 100644 --- a/packages/util/package.json +++ b/packages/util/package.json @@ -1,6 +1,6 @@ { "name": "@opencode-ai/util", - "version": "1.0.129", + "version": "1.0.130", "private": true, "type": "module", "exports": { diff --git a/packages/web/package.json b/packages/web/package.json index 1789991fc32..1277872da7f 100644 --- a/packages/web/package.json +++ b/packages/web/package.json @@ -1,7 +1,7 @@ { "name": "@opencode-ai/web", "type": "module", - "version": "1.0.129", + "version": "1.0.130", "scripts": { "dev": "astro dev", "dev:remote": "VITE_API_URL=https://api.opencode.ai astro dev", diff --git a/packages/web/src/content/docs/1-0.mdx b/packages/web/src/content/docs/1-0.mdx index 6737482a7cf..11d3b629f43 100644 --- a/packages/web/src/content/docs/1-0.mdx +++ b/packages/web/src/content/docs/1-0.mdx @@ -44,10 +44,6 @@ We removed some functionality that we weren't sure anyone actually used. If some ## Breaking changes -### Theme - -The `system` theme has not yet been ported and custom themes aren't loaded yet but both of these will be fixed this week. - ### Keybinds renamed - messages_revert -> messages_undo diff --git a/packages/web/src/content/docs/keybinds.mdx b/packages/web/src/content/docs/keybinds.mdx index 80a74c159bb..a32756e18c8 100644 --- a/packages/web/src/content/docs/keybinds.mdx +++ b/packages/web/src/content/docs/keybinds.mdx @@ -35,6 +35,7 @@ OpenCode has a list of keybinds that you can customize through the OpenCode conf "messages_copy": "y", "messages_undo": "u", "messages_redo": "r", + "messages_last_user": "none", "messages_toggle_conceal": "h", "model_list": "m", "model_cycle_recent": "f2", diff --git a/sdks/vscode/package.json b/sdks/vscode/package.json index bc20f1a18b1..fd0b3e63541 100644 --- a/sdks/vscode/package.json +++ b/sdks/vscode/package.json @@ -2,7 +2,7 @@ "name": "opencode", "displayName": "opencode", "description": "opencode for VS Code", - "version": "1.0.129", + "version": "1.0.130", "publisher": "sst-dev", "repository": { "type": "git",