From cbce332bc48ca256dbe914fbf0e7aa66abb187a0 Mon Sep 17 00:00:00 2001 From: Connor Chevli Date: Thu, 22 Jan 2026 16:57:14 -0800 Subject: [PATCH 01/22] copy package before significant changes --- packages/ai-sdk-provider-2/.prettierignore | 3 + packages/ai-sdk-provider-2/CHANGELOG.md | 41 + packages/ai-sdk-provider-2/LICENSE | 202 ++++ packages/ai-sdk-provider-2/README.md | 85 ++ packages/ai-sdk-provider-2/eslint.config.js | 9 + packages/ai-sdk-provider-2/package.json | 70 ++ .../src/__generated__/models.gen.ts | 41 + ...onvert-to-hyperbolic-chat-messages.test.ts | 437 ++++++++ .../convert-to-hyperbolic-chat-messages.ts | 165 +++ ...convert-to-hyperbolic-completion-prompt.ts | 134 +++ .../hyperbolic-chat-language-model.test.ts | 990 ++++++++++++++++++ .../src/hyperbolic-chat-language-model.ts | 659 ++++++++++++ .../src/hyperbolic-chat-prompt.ts | 67 ++ .../src/hyperbolic-chat-settings.ts | 50 + ...perbolic-completion-language-model.test.ts | 496 +++++++++ .../hyperbolic-completion-language-model.ts | 352 +++++++ .../src/hyperbolic-completion-settings.ts | 42 + .../ai-sdk-provider-2/src/hyperbolic-error.ts | 49 + .../src/hyperbolic-image-language-model.ts | 130 +++ .../src/hyperbolic-image-settings.ts | 40 + .../src/hyperbolic-provider-options.test.ts | 64 ++ .../src/hyperbolic-provider.ts | 180 ++++ packages/ai-sdk-provider-2/src/index.ts | 3 + .../ai-sdk-provider-2/src/internal/index.ts | 7 + .../src/map-hyperbolic-chat-logprobs.ts | 37 + .../src/map-hyperbolic-completion-logprobs.ts | 24 + .../src/map-hyperbolic-finish-reason.ts | 23 + .../src/scripts/templates/models.ts.hbs | 32 + .../src/scripts/update-models-list.ts | 31 + packages/ai-sdk-provider-2/src/types.ts | 47 + packages/ai-sdk-provider-2/tsconfig.json | 5 + packages/ai-sdk-provider-2/tsup.config.ts | 17 + packages/ai-sdk-provider-2/turbo.json | 10 + packages/ai-sdk-provider-2/vitest.config.mts | 10 + .../ai-sdk-provider-2/vitest.edge.config.ts | 10 + .../ai-sdk-provider-2/vitest.node.config.ts | 10 + 36 files changed, 4572 insertions(+) create mode 100644 packages/ai-sdk-provider-2/.prettierignore create mode 100644 packages/ai-sdk-provider-2/CHANGELOG.md create mode 100644 packages/ai-sdk-provider-2/LICENSE create mode 100644 packages/ai-sdk-provider-2/README.md create mode 100644 packages/ai-sdk-provider-2/eslint.config.js create mode 100644 packages/ai-sdk-provider-2/package.json create mode 100644 packages/ai-sdk-provider-2/src/__generated__/models.gen.ts create mode 100644 packages/ai-sdk-provider-2/src/convert-to-hyperbolic-chat-messages.test.ts create mode 100644 packages/ai-sdk-provider-2/src/convert-to-hyperbolic-chat-messages.ts create mode 100644 packages/ai-sdk-provider-2/src/convert-to-hyperbolic-completion-prompt.ts create mode 100644 packages/ai-sdk-provider-2/src/hyperbolic-chat-language-model.test.ts create mode 100644 packages/ai-sdk-provider-2/src/hyperbolic-chat-language-model.ts create mode 100644 packages/ai-sdk-provider-2/src/hyperbolic-chat-prompt.ts create mode 100644 packages/ai-sdk-provider-2/src/hyperbolic-chat-settings.ts create mode 100644 packages/ai-sdk-provider-2/src/hyperbolic-completion-language-model.test.ts create mode 100644 packages/ai-sdk-provider-2/src/hyperbolic-completion-language-model.ts create mode 100644 packages/ai-sdk-provider-2/src/hyperbolic-completion-settings.ts create mode 100644 packages/ai-sdk-provider-2/src/hyperbolic-error.ts create mode 100644 packages/ai-sdk-provider-2/src/hyperbolic-image-language-model.ts create mode 100644 packages/ai-sdk-provider-2/src/hyperbolic-image-settings.ts create mode 100644 packages/ai-sdk-provider-2/src/hyperbolic-provider-options.test.ts create mode 100644 packages/ai-sdk-provider-2/src/hyperbolic-provider.ts create mode 100644 packages/ai-sdk-provider-2/src/index.ts create mode 100644 packages/ai-sdk-provider-2/src/internal/index.ts create mode 100644 packages/ai-sdk-provider-2/src/map-hyperbolic-chat-logprobs.ts create mode 100644 packages/ai-sdk-provider-2/src/map-hyperbolic-completion-logprobs.ts create mode 100644 packages/ai-sdk-provider-2/src/map-hyperbolic-finish-reason.ts create mode 100644 packages/ai-sdk-provider-2/src/scripts/templates/models.ts.hbs create mode 100644 packages/ai-sdk-provider-2/src/scripts/update-models-list.ts create mode 100644 packages/ai-sdk-provider-2/src/types.ts create mode 100644 packages/ai-sdk-provider-2/tsconfig.json create mode 100644 packages/ai-sdk-provider-2/tsup.config.ts create mode 100644 packages/ai-sdk-provider-2/turbo.json create mode 100644 packages/ai-sdk-provider-2/vitest.config.mts create mode 100644 packages/ai-sdk-provider-2/vitest.edge.config.ts create mode 100644 packages/ai-sdk-provider-2/vitest.node.config.ts diff --git a/packages/ai-sdk-provider-2/.prettierignore b/packages/ai-sdk-provider-2/.prettierignore new file mode 100644 index 0000000..523438f --- /dev/null +++ b/packages/ai-sdk-provider-2/.prettierignore @@ -0,0 +1,3 @@ +# Ignore Handlebars template files +**/*.hbs +**/*.ts.hbs \ No newline at end of file diff --git a/packages/ai-sdk-provider-2/CHANGELOG.md b/packages/ai-sdk-provider-2/CHANGELOG.md new file mode 100644 index 0000000..6a00837 --- /dev/null +++ b/packages/ai-sdk-provider-2/CHANGELOG.md @@ -0,0 +1,41 @@ +# @hyperbolic/ai-sdk-provider + +## 0.1.3 + +### Patch Changes + +- [#19](https://github.com/HyperbolicLabs/hyperbolic-ts/pull/19) [`825eb5f6be2f1d9ee959b62b79573a65f56362a5`](https://github.com/HyperbolicLabs/hyperbolic-ts/commit/825eb5f6be2f1d9ee959b62b79573a65f56362a5) Thanks [@connorch](https://github.com/connorch)! - bump deps and tweak readme + +## 0.1.2 + +### Patch Changes + +- [#16](https://github.com/HyperbolicLabs/hyperbolic-ts/pull/16) [`78fde27c775553b84af8a1c90b1c08ba6821513c`](https://github.com/HyperbolicLabs/hyperbolic-ts/commit/78fde27c775553b84af8a1c90b1c08ba6821513c) Thanks [@connorch](https://github.com/connorch)! - handle error parsing when the termination sequence is appended to the… + +## 0.1.1 + +### Patch Changes + +- [#13](https://github.com/HyperbolicLabs/hyperbolic-ts/pull/13) [`6d53d2982a58215344fa66b3cf45d553e713d7f6`](https://github.com/HyperbolicLabs/hyperbolic-ts/commit/6d53d2982a58215344fa66b3cf45d553e713d7f6) Thanks [@connorch](https://github.com/connorch)! - Add links to repo in package.json + +## 0.1.0 + +### Minor Changes + +- [`21c60cbb9aff7d7256bbbf39007c824a604eea00`](https://github.com/HyperbolicLabs/hyperbolic-ts/commit/21c60cbb9aff7d7256bbbf39007c824a604eea00) Thanks [@connorch](https://github.com/connorch)! - Initial release for the ai-sdk-provider + +### Patch Changes + +- [`21c60cbb9aff7d7256bbbf39007c824a604eea00`](https://github.com/HyperbolicLabs/hyperbolic-ts/commit/21c60cbb9aff7d7256bbbf39007c824a604eea00) Thanks [@connorch](https://github.com/connorch)! - Add documentation + +## 0.0.3 + +### Patch Changes + +- [#4](https://github.com/HyperbolicLabs/hyperbolic-ts/pull/4) [`3abde9c43edd10d4cc2b65e036298fdad5a21c96`](https://github.com/HyperbolicLabs/hyperbolic-ts/commit/3abde9c43edd10d4cc2b65e036298fdad5a21c96) Thanks [@connorch](https://github.com/connorch)! - update npm token in release + +## 0.0.2 + +### Patch Changes + +- [#1](https://github.com/HyperbolicLabs/hyperbolic-ts/pull/1) [`dd3804f79603b4d6876b866efb6fd17cf72e99a2`](https://github.com/HyperbolicLabs/hyperbolic-ts/commit/dd3804f79603b4d6876b866efb6fd17cf72e99a2) Thanks [@connorch](https://github.com/connorch)! - fix: test changeset diff --git a/packages/ai-sdk-provider-2/LICENSE b/packages/ai-sdk-provider-2/LICENSE new file mode 100644 index 0000000..1bceb99 --- /dev/null +++ b/packages/ai-sdk-provider-2/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2025 OpenRouter Inc, + Copyright 2025 Hyperbolic Labs Inc, + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/packages/ai-sdk-provider-2/README.md b/packages/ai-sdk-provider-2/README.md new file mode 100644 index 0000000..3e99fbc --- /dev/null +++ b/packages/ai-sdk-provider-2/README.md @@ -0,0 +1,85 @@ +# Hyperbolic Provider for Vercel AI SDK + +The [Hyperbolic](https://hyperbolic.xyz/) provider for the [Vercel AI SDK](https://sdk.vercel.ai/docs) gives access to any model found at . + +This is based on the [OpenRouter](https://openrouter.ai/) provider for the Vercel AI SDK, with a number of changes to support the Hyperbolic API and add image generation support. + +## Setup + +```bash +# For pnpm +pnpm add @hyperbolic/ai-sdk-provider + +# For npm +npm install @hyperbolic/ai-sdk-provider + +# For yarn +yarn add @hyperbolic/ai-sdk-provider +``` + +## Provider Instance + +You can create a provider instance with `createHyperbolic` from `@hyperbolic/ai-sdk-provider`: + +```ts +import { createHyperbolic } from "@hyperbolic/ai-sdk-provider"; +``` + +## Example + +```ts +import { generateText } from "ai"; + +import { createHyperbolic } from "@hyperbolic/ai-sdk-provider"; + +const hyperbolic = createHyperbolic({ + apiKey: process.env.HYPERBOLIC_API_KEY, // Found in settings after logging in at https://app.hyperbolic.xyz +}); + +const { text } = await generateText({ + model: hyperbolic.chat("deepseek-ai/DeepSeek-R1"), + prompt: "Write a vegetarian lasagna recipe for 4 people.", +}); +``` + +## Supported models + +This list is not a definitive list of models supported by Hyperbolic, as it constantly changes as we add new models (and deprecate old ones) to our system. +You can find the latest list of models supported by Hyperbolic [here](https://openrouter.ai/models). + +## Using Models + +### Language Models + +```ts +const { text } = await generateText({ + model: hyperbolic.chat("deepseek-ai/DeepSeek-R1"), + prompt: "Write a vegetarian lasagna recipe for 4 people.", +}); + +const { text } = await generateText({ + model: hyperbolic.completion("deepseek-ai/DeepSeek-R1"), + prompt: "The capital of France is", +}); +``` + +### Image Generation Models + +```ts +import { experimental_generateImage as generateImage } from "ai"; + +// Text to Image +const { images } = await generateImage({ + model: hyperbolic.image("SDXL1.0-base"), + prompt: "A beautiful sunset over a calm ocean", + size: "1024x1024", + providerOptions: { + hyperbolic: { + cfgScale: 5, + steps: 30, + negativePrompt: "low quality, blurry, distorted", + enableRefiner: false, + } satisfies HyperbolicImageProviderOptions, + }, +}); +``` diff --git a/packages/ai-sdk-provider-2/eslint.config.js b/packages/ai-sdk-provider-2/eslint.config.js new file mode 100644 index 0000000..5b13b2e --- /dev/null +++ b/packages/ai-sdk-provider-2/eslint.config.js @@ -0,0 +1,9 @@ +import baseConfig from "@hyperbolic/eslint-config/base"; + +/** @type {import('typescript-eslint').Config} */ +export default [ + { + ignores: [], + }, + ...baseConfig, +]; diff --git a/packages/ai-sdk-provider-2/package.json b/packages/ai-sdk-provider-2/package.json new file mode 100644 index 0000000..0a53dc6 --- /dev/null +++ b/packages/ai-sdk-provider-2/package.json @@ -0,0 +1,70 @@ +{ + "name": "@hyperbolic/ai-sdk-provider-2", + "private": false, + "version": "0.1.3", + "type": "module", + "main": "./dist/index.js", + "module": "./dist/index.js", + "types": "./dist/index.d.ts", + "repository": { + "type": "git", + "url": "https://github.com/HyperbolicLabs/hyperbolic-ts.git", + "directory": "packages/ai-sdk-provider" + }, + "homepage": "https://github.com/HyperbolicLabs/hyperbolic-ts/tree/main/packages/ai-sdk-provider", + "bugs": { + "url": "https://github.com/HyperbolicLabs/hyperbolic-ts/issues" + }, + "exports": { + "./package.json": "./package.json", + ".": { + "types": "./dist/index.d.ts", + "import": "./dist/index.js", + "require": "./dist/index.cjs" + }, + "./internal": { + "types": "./dist/internal/index.d.ts", + "import": "./dist/internal/index.js", + "module": "./dist/internal/index.js", + "require": "./dist/internal/index.cjs" + } + }, + "files": [ + "dist/**/*", + "CHANGELOG.md" + ], + "license": "Apache-2.0", + "scripts": { + "build": "tsup", + "clean": "git clean -xdf .cache .turbo dist node_modules", + "dev": "pnpm with-env tsup", + "format": "prettier --check . --ignore-path ../../.gitignore --ignore-path .prettierignore", + "lint": "eslint", + "typecheck": "tsc --noEmit --emitDeclarationOnly false", + "with-env": "dotenv -e ../../.env -c --", + "codegen:update-models": "pnpm with-env npx tsx src/scripts/update-models-list.ts", + "test": "pnpm test:node && pnpm test:edge", + "test:edge": "pnpm with-env vitest --config vitest.edge.config.ts --run", + "test:node": "pnpm with-env vitest --config vitest.node.config.ts --run" + }, + "prettier": "@hyperbolic/prettier-config", + "devDependencies": { + "@edge-runtime/vm": "^5.0.0", + "@hyperbolic/api": "workspace:*", + "@hyperbolic/eslint-config": "workspace:*", + "@hyperbolic/prettier-config": "workspace:*", + "@hyperbolic/tsconfig": "workspace:*", + "eslint": "catalog:", + "handlebars": "^4.7.8", + "prettier": "catalog:", + "tsup": "8.4.0", + "type-fest": "^4.37.0", + "typescript": "catalog:" + }, + "dependencies": { + "@ai-sdk/provider": "^1.1.3", + "@ai-sdk/provider-utils": "^2.2.8", + "ai": "^4.3.16", + "zod": "^3.24.2" + } +} diff --git a/packages/ai-sdk-provider-2/src/__generated__/models.gen.ts b/packages/ai-sdk-provider-2/src/__generated__/models.gen.ts new file mode 100644 index 0000000..08a1ae7 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/__generated__/models.gen.ts @@ -0,0 +1,41 @@ +// prettier-ignore + +// This file is auto-generated by the pnpm codegen:update-models script. Do not edit manually. + +const _models = [ + "Qwen/Qwen2.5-72B-Instruct", + "Qwen/Qwen2.5-VL-72B-Instruct", + "meta-llama/Meta-Llama-3-70B-Instruct", + "meta-llama/Meta-Llama-3.1-70B-Instruct", + "meta-llama/Meta-Llama-3.1-405B-FP8", + "Qwen/Qwen2.5-VL-7B-Instruct", + "meta-llama/Meta-Llama-3.1-405B-Instruct", + "Qwen/QwQ-32B", + "deepseek-ai/DeepSeek-V3", + "Qwen/QwQ-32B-Preview", + "meta-llama/Llama-3.3-70B-Instruct", + "NousResearch/Hermes-3-Llama-3.1-70B", + "meta-llama/Meta-Llama-3.1-405B", + "meta-llama/Llama-3.2-3B-Instruct", + "FLUX.1-dev", + "mistralai/Pixtral-12B-2409", + "StableDiffusion", + "meta-llama/Meta-Llama-3.1-8B-Instruct", + "Qwen/Qwen2.5-Coder-32B-Instruct", + "TTS", + "deepseek-ai/DeepSeek-R1", +] as const; + +const _imageModels = [] as const; + +const _chatModels = [] as const; + +const _completionModels = [] as const; + +export type HyperbolicImageModelId = (typeof _imageModels)[number] | string; +export type HyperbolicChatModelId = (typeof _chatModels)[number] | string; +export type HyperbolicCompletionModelId = (typeof _completionModels)[number] | string; +export type HyperbolicModelId = + | HyperbolicImageModelId + | HyperbolicChatModelId + | HyperbolicCompletionModelId; diff --git a/packages/ai-sdk-provider-2/src/convert-to-hyperbolic-chat-messages.test.ts b/packages/ai-sdk-provider-2/src/convert-to-hyperbolic-chat-messages.test.ts new file mode 100644 index 0000000..af2e058 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/convert-to-hyperbolic-chat-messages.test.ts @@ -0,0 +1,437 @@ +// Modified by Hyperbolic Labs, Inc. on 2025-03-25 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + +import { describe, expect, it } from "vitest"; + +import { convertToHyperbolicChatMessages } from "./convert-to-hyperbolic-chat-messages"; + +describe("user messages", () => { + it("should convert messages with image parts to multiple parts", async () => { + const result = convertToHyperbolicChatMessages([ + { + role: "user", + content: [ + { type: "text", text: "Hello" }, + { + type: "image", + image: new Uint8Array([0, 1, 2, 3]), + mimeType: "image/png", + }, + ], + }, + ]); + + expect(result).toEqual([ + { + role: "user", + content: [ + { type: "text", text: "Hello" }, + { + type: "image_url", + image_url: { url: "data:image/png;base64,AAECAw==" }, + }, + ], + }, + ]); + }); + + it("should convert messages with only a text part to a string content", async () => { + const result = convertToHyperbolicChatMessages([ + { + role: "user", + content: [{ type: "text", text: "Hello" }], + }, + ]); + + expect(result).toEqual([{ role: "user", content: "Hello" }]); + }); +}); + +describe("cache control", () => { + it("should pass cache control from system message provider metadata", () => { + const result = convertToHyperbolicChatMessages([ + { + role: "system", + content: "System prompt", + providerMetadata: { + anthropic: { + cacheControl: { type: "ephemeral" }, + }, + }, + }, + ]); + + expect(result).toEqual([ + { + role: "system", + content: "System prompt", + cache_control: { type: "ephemeral" }, + }, + ]); + }); + + it("should pass cache control from user message provider metadata (single text part)", () => { + const result = convertToHyperbolicChatMessages([ + { + role: "user", + content: [{ type: "text", text: "Hello" }], + providerMetadata: { + anthropic: { + cacheControl: { type: "ephemeral" }, + }, + }, + }, + ]); + + expect(result).toEqual([ + { + role: "user", + content: "Hello", + cache_control: { type: "ephemeral" }, + }, + ]); + }); + + it("should pass cache control from user message provider metadata (multiple parts)", () => { + const result = convertToHyperbolicChatMessages([ + { + role: "user", + content: [ + { type: "text", text: "Hello" }, + { + type: "image", + image: new Uint8Array([0, 1, 2, 3]), + mimeType: "image/png", + }, + ], + providerMetadata: { + anthropic: { + cacheControl: { type: "ephemeral" }, + }, + }, + }, + ]); + + expect(result).toEqual([ + { + role: "user", + content: [ + { + type: "text", + text: "Hello", + cache_control: undefined, + }, + { + type: "image_url", + image_url: { url: "data:image/png;base64,AAECAw==" }, + cache_control: { type: "ephemeral" }, + }, + ], + }, + ]); + }); + + it("should pass cache control to multiple image parts from user message provider metadata", () => { + const result = convertToHyperbolicChatMessages([ + { + role: "user", + content: [ + { type: "text", text: "Hello" }, + { + type: "image", + image: new Uint8Array([0, 1, 2, 3]), + mimeType: "image/png", + }, + { + type: "image", + image: new Uint8Array([4, 5, 6, 7]), + mimeType: "image/jpeg", + }, + ], + providerMetadata: { + anthropic: { + cacheControl: { type: "ephemeral" }, + }, + }, + }, + ]); + + expect(result).toEqual([ + { + role: "user", + content: [ + { + type: "text", + text: "Hello", + cache_control: undefined, + }, + { + type: "image_url", + image_url: { url: "data:image/png;base64,AAECAw==" }, + cache_control: { type: "ephemeral" }, + }, + { + type: "image_url", + image_url: { url: "data:image/jpeg;base64,BAUGBw==" }, + cache_control: { type: "ephemeral" }, + }, + ], + }, + ]); + }); + + it("should pass cache control to file parts from user message provider metadata", () => { + const result = convertToHyperbolicChatMessages([ + { + role: "user", + content: [ + { type: "text", text: "Hello" }, + { + type: "file", + data: "file content", + mimeType: "text/plain", + }, + ], + providerMetadata: { + anthropic: { + cacheControl: { type: "ephemeral" }, + }, + }, + }, + ]); + + expect(result).toEqual([ + { + role: "user", + content: [ + { + type: "text", + text: "Hello", + cache_control: undefined, + }, + { + type: "text", + text: "file content", + cache_control: { type: "ephemeral" }, + }, + ], + }, + ]); + }); + + it("should handle mixed part-specific and message-level cache control for multiple parts", () => { + const result = convertToHyperbolicChatMessages([ + { + role: "user", + content: [ + { + type: "text", + text: "Hello", + // No part-specific provider metadata + }, + { + type: "image", + image: new Uint8Array([0, 1, 2, 3]), + mimeType: "image/png", + providerMetadata: { + anthropic: { + cacheControl: { type: "ephemeral" }, + }, + }, + }, + { + type: "file", + data: "file content", + mimeType: "text/plain", + // No part-specific provider metadata + }, + ], + providerMetadata: { + anthropic: { + cacheControl: { type: "ephemeral" }, + }, + }, + }, + ]); + + expect(result).toEqual([ + { + role: "user", + content: [ + { + type: "text", + text: "Hello", + cache_control: undefined, + }, + { + type: "image_url", + image_url: { url: "data:image/png;base64,AAECAw==" }, + cache_control: { type: "ephemeral" }, + }, + { + type: "text", + text: "file content", + cache_control: { type: "ephemeral" }, + }, + ], + }, + ]); + }); + + it("should pass cache control from individual content part provider metadata", () => { + const result = convertToHyperbolicChatMessages([ + { + role: "user", + content: [ + { + type: "text", + text: "Hello", + providerMetadata: { + anthropic: { + cacheControl: { type: "ephemeral" }, + }, + }, + }, + { + type: "image", + image: new Uint8Array([0, 1, 2, 3]), + mimeType: "image/png", + }, + ], + }, + ]); + + expect(result).toEqual([ + { + role: "user", + content: [ + { + type: "text", + text: "Hello", + cache_control: { type: "ephemeral" }, + }, + { + type: "image_url", + image_url: { url: "data:image/png;base64,AAECAw==" }, + }, + ], + }, + ]); + }); + + it("should pass cache control from assistant message provider metadata", () => { + const result = convertToHyperbolicChatMessages([ + { + role: "assistant", + content: [{ type: "text", text: "Assistant response" }], + providerMetadata: { + anthropic: { + cacheControl: { type: "ephemeral" }, + }, + }, + }, + ]); + + expect(result).toEqual([ + { + role: "assistant", + content: "Assistant response", + cache_control: { type: "ephemeral" }, + }, + ]); + }); + + it("should pass cache control from tool message provider metadata", () => { + const result = convertToHyperbolicChatMessages([ + { + role: "tool", + content: [ + { + type: "tool-result", + toolCallId: "call-123", + toolName: "calculator", + result: { answer: 42 }, + isError: false, + }, + ], + providerMetadata: { + anthropic: { + cacheControl: { type: "ephemeral" }, + }, + }, + }, + ]); + + expect(result).toEqual([ + { + role: "tool", + tool_call_id: "call-123", + content: JSON.stringify({ answer: 42 }), + cache_control: { type: "ephemeral" }, + }, + ]); + }); + + it("should support the alias cache_control field", () => { + const result = convertToHyperbolicChatMessages([ + { + role: "system", + content: "System prompt", + providerMetadata: { + anthropic: { + cache_control: { type: "ephemeral" }, + }, + }, + }, + ]); + + expect(result).toEqual([ + { + role: "system", + content: "System prompt", + cache_control: { type: "ephemeral" }, + }, + ]); + }); + + it("should support cache control on last message in content array", () => { + const result = convertToHyperbolicChatMessages([ + { + role: "system", + content: "System prompt", + }, + { + role: "user", + content: [ + { type: "text", text: "User prompt" }, + { + type: "text", + text: "User prompt 2", + providerMetadata: { + anthropic: { cacheControl: { type: "ephemeral" } }, + }, + }, + ], + }, + ]); + + expect(result).toEqual([ + { + role: "system", + content: "System prompt", + }, + { + role: "user", + content: [ + { type: "text", text: "User prompt" }, + { + type: "text", + text: "User prompt 2", + cache_control: { type: "ephemeral" }, + }, + ], + }, + ]); + }); +}); diff --git a/packages/ai-sdk-provider-2/src/convert-to-hyperbolic-chat-messages.ts b/packages/ai-sdk-provider-2/src/convert-to-hyperbolic-chat-messages.ts new file mode 100644 index 0000000..30bae0b --- /dev/null +++ b/packages/ai-sdk-provider-2/src/convert-to-hyperbolic-chat-messages.ts @@ -0,0 +1,165 @@ +// Modified by Hyperbolic Labs, Inc. on 2025-03-25 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + +import type { LanguageModelV1Prompt, LanguageModelV1ProviderMetadata } from "@ai-sdk/provider"; +import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils"; + +import type { ChatCompletionContentPart, HyperbolicChatPrompt } from "./hyperbolic-chat-prompt"; + +// Type for Hyperbolic Cache Control following Anthropic's pattern +export type HyperbolicCacheControl = { type: "ephemeral" }; + +export function convertToHyperbolicChatMessages( + prompt: LanguageModelV1Prompt, +): HyperbolicChatPrompt { + const messages: HyperbolicChatPrompt = []; + + function getCacheControl( + providerMetadata: LanguageModelV1ProviderMetadata | undefined, + ): HyperbolicCacheControl | undefined { + const anthropic = providerMetadata?.anthropic; + + // Allow both cacheControl and cache_control: + const cacheControlValue = anthropic?.cacheControl ?? anthropic?.cache_control; + + // Return the cache control object if it exists + return cacheControlValue as HyperbolicCacheControl | undefined; + } + + for (const { role, content, providerMetadata } of prompt) { + switch (role) { + case "system": { + messages.push({ + role: "system", + content, + cache_control: getCacheControl(providerMetadata), + }); + break; + } + + case "user": { + if (content.length === 1 && content[0]?.type === "text") { + messages.push({ + role: "user", + content: content[0].text, + cache_control: + getCacheControl(providerMetadata) ?? getCacheControl(content[0].providerMetadata), + }); + break; + } + + // Get message level cache control + const messageCacheControl = getCacheControl(providerMetadata); + + const contentParts: ChatCompletionContentPart[] = content.map((part) => { + switch (part.type) { + case "text": + return { + type: "text" as const, + text: part.text, + // For text parts, only use part-specific cache control + cache_control: getCacheControl(part.providerMetadata), + }; + case "image": + return { + type: "image_url" as const, + image_url: { + url: + part.image instanceof URL + ? part.image.toString() + : `data:${part.mimeType ?? "image/jpeg"};base64,${convertUint8ArrayToBase64( + part.image, + )}`, + }, + // For image parts, use part-specific or message-level cache control + cache_control: getCacheControl(part.providerMetadata) ?? messageCacheControl, + }; + case "file": + return { + type: "text" as const, + text: part.data instanceof URL ? part.data.toString() : part.data, + cache_control: getCacheControl(part.providerMetadata) ?? messageCacheControl, + }; + default: { + const _exhaustiveCheck: never = part; + throw new Error(`Unsupported content part type: ${_exhaustiveCheck}`); + } + } + }); + + // For multi-part messages, don't add cache_control at the root level + messages.push({ + role: "user", + content: contentParts, + }); + + break; + } + + case "assistant": { + let text = ""; + const toolCalls: Array<{ + id: string; + type: "function"; + function: { name: string; arguments: string }; + }> = []; + + for (const part of content) { + switch (part.type) { + case "text": { + text += part.text; + break; + } + case "tool-call": { + toolCalls.push({ + id: part.toolCallId, + type: "function", + function: { + name: part.toolName, + arguments: JSON.stringify(part.args), + }, + }); + break; + } + // TODO: Handle reasoning and redacted-reasoning + case "reasoning": + case "redacted-reasoning": + break; + default: { + throw new Error(`Unsupported part: ${part}`); + } + } + } + + messages.push({ + role: "assistant", + content: text, + tool_calls: toolCalls.length > 0 ? toolCalls : undefined, + cache_control: getCacheControl(providerMetadata), + }); + + break; + } + + case "tool": { + for (const toolResponse of content) { + messages.push({ + role: "tool", + tool_call_id: toolResponse.toolCallId, + content: JSON.stringify(toolResponse.result), + cache_control: + getCacheControl(providerMetadata) ?? getCacheControl(toolResponse.providerMetadata), + }); + } + break; + } + + default: { + throw new Error(`Unsupported role: ${role}`); + } + } + } + + return messages; +} diff --git a/packages/ai-sdk-provider-2/src/convert-to-hyperbolic-completion-prompt.ts b/packages/ai-sdk-provider-2/src/convert-to-hyperbolic-completion-prompt.ts new file mode 100644 index 0000000..c241b77 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/convert-to-hyperbolic-completion-prompt.ts @@ -0,0 +1,134 @@ +// Modified by Hyperbolic Labs, Inc. on 2025-03-25 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + +import type { LanguageModelV1Prompt } from "@ai-sdk/provider"; +import { InvalidPromptError, UnsupportedFunctionalityError } from "@ai-sdk/provider"; + +export function convertToHyperbolicCompletionPrompt({ + prompt, + inputFormat, + user = "user", + assistant = "assistant", +}: { + prompt: LanguageModelV1Prompt; + inputFormat: "prompt" | "messages"; + user?: string; + assistant?: string; +}): { + prompt: string; +} { + // When the user supplied a prompt input, we don't transform it: + if ( + inputFormat === "prompt" && + prompt.length === 1 && + prompt[0] && + prompt[0].role === "user" && + prompt[0].content.length === 1 && + prompt[0].content[0] && + prompt[0].content[0].type === "text" + ) { + return { prompt: prompt[0].content[0].text }; + } + + // otherwise transform to a chat message format: + let text = ""; + + // if first message is a system message, add it to the text: + if (prompt[0] && prompt[0].role === "system") { + text += `${prompt[0].content}\n\n`; + prompt = prompt.slice(1); + } + + for (const { role, content } of prompt) { + switch (role) { + case "system": { + throw new InvalidPromptError({ + message: "Unexpected system message in prompt: ${content}", + prompt, + }); + } + + case "user": { + const userMessage = content + .map((part) => { + switch (part.type) { + case "text": { + return part.text; + } + case "image": { + throw new UnsupportedFunctionalityError({ + functionality: "images", + }); + } + case "file": { + throw new UnsupportedFunctionalityError({ + functionality: "file attachments", + }); + } + default: { + const _exhaustiveCheck: never = part; + throw new Error(`Unsupported content type: ${_exhaustiveCheck}`); + } + } + }) + .join(""); + + text += `${user}:\n${userMessage}\n\n`; + break; + } + + case "assistant": { + const assistantMessage = content + .map((part) => { + switch (part.type) { + case "text": { + return part.text; + } + case "tool-call": { + throw new UnsupportedFunctionalityError({ + functionality: "tool-call messages", + }); + } + case "reasoning": { + throw new UnsupportedFunctionalityError({ + functionality: "reasoning messages", + }); + } + + case "redacted-reasoning": { + throw new UnsupportedFunctionalityError({ + functionality: "redacted reasoning messages", + }); + } + + default: { + throw new Error(`Unsupported content type: ${part}`); + } + } + }) + .join(""); + + text += `${assistant}:\n${assistantMessage}\n\n`; + break; + } + + case "tool": { + throw new UnsupportedFunctionalityError({ + functionality: "tool messages", + }); + } + + default: { + throw new Error(`Unsupported role: ${role}`); + } + } + } + + // Assistant message prefix: + text += `${assistant}:\n`; + + return { + prompt: text, + }; +} diff --git a/packages/ai-sdk-provider-2/src/hyperbolic-chat-language-model.test.ts b/packages/ai-sdk-provider-2/src/hyperbolic-chat-language-model.test.ts new file mode 100644 index 0000000..944ccce --- /dev/null +++ b/packages/ai-sdk-provider-2/src/hyperbolic-chat-language-model.test.ts @@ -0,0 +1,990 @@ +// Modified by Hyperbolic Labs, Inc. on 2025-03-25 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + +import type { LanguageModelV1Prompt } from "@ai-sdk/provider"; +import { + convertReadableStreamToArray, + JsonTestServer, + StreamingTestServer, +} from "@ai-sdk/provider-utils/test"; +import { describe, expect, it } from "vitest"; + +import { createHyperbolic } from "./hyperbolic-provider"; +import { mapHyperbolicChatLogProbsOutput } from "./map-hyperbolic-chat-logprobs"; + +const TEST_PROMPT: LanguageModelV1Prompt = [ + { role: "user", content: [{ type: "text", text: "Hello" }] }, +]; + +const TEST_LOGPROBS = { + content: [ + { + token: "Hello", + logprob: -0.0009994634, + top_logprobs: [ + { + token: "Hello", + logprob: -0.0009994634, + }, + ], + }, + { + token: "!", + logprob: -0.13410144, + top_logprobs: [ + { + token: "!", + logprob: -0.13410144, + }, + ], + }, + { + token: " How", + logprob: -0.0009250381, + top_logprobs: [ + { + token: " How", + logprob: -0.0009250381, + }, + ], + }, + { + token: " can", + logprob: -0.047709424, + top_logprobs: [ + { + token: " can", + logprob: -0.047709424, + }, + ], + }, + { + token: " I", + logprob: -0.000009014684, + top_logprobs: [ + { + token: " I", + logprob: -0.000009014684, + }, + ], + }, + { + token: " assist", + logprob: -0.009125131, + top_logprobs: [ + { + token: " assist", + logprob: -0.009125131, + }, + ], + }, + { + token: " you", + logprob: -0.0000066306106, + top_logprobs: [ + { + token: " you", + logprob: -0.0000066306106, + }, + ], + }, + { + token: " today", + logprob: -0.00011093382, + top_logprobs: [ + { + token: " today", + logprob: -0.00011093382, + }, + ], + }, + { + token: "?", + logprob: -0.00004596782, + top_logprobs: [ + { + token: "?", + logprob: -0.00004596782, + }, + ], + }, + ], +}; + +const provider = createHyperbolic({ + apiKey: "test-api-key", + compatibility: "strict", +}); + +const model = provider.chat("anthropic/claude-3.5-sonnet"); + +describe("doGenerate", () => { + const server = new JsonTestServer("https://api.hyperbolic.xyz/v1/chat/completions"); + + server.setupTestEnvironment(); + + function prepareJsonResponse({ + content = "", + usage = { + prompt_tokens: 4, + total_tokens: 34, + completion_tokens: 30, + }, + logprobs = null, + finish_reason = "stop", + }: { + content?: string; + usage?: { + prompt_tokens: number; + total_tokens: number; + completion_tokens: number; + }; + logprobs?: { + content: + | { + token: string; + logprob: number; + top_logprobs: { token: string; logprob: number }[]; + }[] + | null; + } | null; + finish_reason?: string; + } = {}) { + server.responseBodyJson = { + id: "chatcmpl-95ZTZkhr0mHNKqerQfiwkuox3PHAd", + object: "chat.completion", + created: 1711115037, + model: "gpt-3.5-turbo-0125", + choices: [ + { + index: 0, + message: { + role: "assistant", + content, + }, + logprobs, + finish_reason, + }, + ], + usage, + system_fingerprint: "fp_3bc1b5746c", + }; + } + + it("should extract text response", async () => { + prepareJsonResponse({ content: "Hello, World!" }); + + const { text } = await model.doGenerate({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + + expect(text).toStrictEqual("Hello, World!"); + }); + + it("should extract usage", async () => { + prepareJsonResponse({ + content: "", + usage: { prompt_tokens: 20, total_tokens: 25, completion_tokens: 5 }, + }); + + const { usage } = await model.doGenerate({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + + expect(usage).toStrictEqual({ + promptTokens: 20, + completionTokens: 5, + }); + }); + + it("should extract logprobs", async () => { + prepareJsonResponse({ + logprobs: TEST_LOGPROBS, + }); + + const response = await provider.chat("openai/gpt-3.5-turbo", { logprobs: 1 }).doGenerate({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + expect(response.logprobs).toStrictEqual(mapHyperbolicChatLogProbsOutput(TEST_LOGPROBS)); + }); + + it("should extract finish reason", async () => { + prepareJsonResponse({ + content: "", + finish_reason: "stop", + }); + + const response = await model.doGenerate({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + + expect(response.finishReason).toStrictEqual("stop"); + }); + + it("should support unknown finish reason", async () => { + prepareJsonResponse({ + content: "", + finish_reason: "eos", + }); + + const response = await model.doGenerate({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + + expect(response.finishReason).toStrictEqual("unknown"); + }); + + it("should expose the raw response headers", async () => { + prepareJsonResponse({ content: "" }); + + server.responseHeaders = { + "test-header": "test-value", + }; + + const { rawResponse } = await model.doGenerate({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + + expect(rawResponse?.headers).toStrictEqual({ + // default headers: + "content-length": "337", + "content-type": "application/json", + + // custom header + "test-header": "test-value", + }); + }); + + it("should pass the model and the messages", async () => { + prepareJsonResponse({ content: "" }); + + await model.doGenerate({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + + expect(await server.getRequestBodyJson()).toStrictEqual({ + model: "anthropic/claude-3.5-sonnet", + messages: [{ role: "user", content: "Hello" }], + }); + }); + + it("should pass the models array when provided", async () => { + prepareJsonResponse({ content: "" }); + + const customModel = provider.chat("anthropic/claude-3.5-sonnet", { + models: ["anthropic/claude-2", "gryphe/mythomax-l2-13b"], + }); + + await customModel.doGenerate({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + + expect(await server.getRequestBodyJson()).toStrictEqual({ + model: "anthropic/claude-3.5-sonnet", + models: ["anthropic/claude-2", "gryphe/mythomax-l2-13b"], + messages: [{ role: "user", content: "Hello" }], + }); + }); + + it("should pass settings", async () => { + prepareJsonResponse(); + + await provider + .chat("openai/gpt-3.5-turbo", { + logitBias: { 50256: -100 }, + logprobs: 2, + parallelToolCalls: false, + user: "test-user-id", + }) + .doGenerate({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + + expect(await server.getRequestBodyJson()).toStrictEqual({ + model: "openai/gpt-3.5-turbo", + messages: [{ role: "user", content: "Hello" }], + logprobs: true, + top_logprobs: 2, + logit_bias: { 50256: -100 }, + parallel_tool_calls: false, + user: "test-user-id", + }); + }); + + it("should pass tools and toolChoice", async () => { + prepareJsonResponse({ content: "" }); + + await model.doGenerate({ + inputFormat: "prompt", + mode: { + type: "regular", + tools: [ + { + type: "function", + name: "test-tool", + parameters: { + type: "object", + properties: { value: { type: "string" } }, + required: ["value"], + additionalProperties: false, + $schema: "http://json-schema.org/draft-07/schema#", + }, + }, + ], + toolChoice: { + type: "tool", + toolName: "test-tool", + }, + }, + prompt: TEST_PROMPT, + }); + + expect(await server.getRequestBodyJson()).toStrictEqual({ + model: "anthropic/claude-3.5-sonnet", + messages: [{ role: "user", content: "Hello" }], + tools: [ + { + type: "function", + function: { + name: "test-tool", + parameters: { + type: "object", + properties: { value: { type: "string" } }, + required: ["value"], + additionalProperties: false, + $schema: "http://json-schema.org/draft-07/schema#", + }, + }, + }, + ], + tool_choice: { + type: "function", + function: { name: "test-tool" }, + }, + }); + }); + + it("should pass headers", async () => { + prepareJsonResponse({ content: "" }); + + const provider = createHyperbolic({ + apiKey: "test-api-key", + headers: { + "Custom-Provider-Header": "provider-header-value", + }, + }); + + await provider.chat("openai/gpt-3.5-turbo").doGenerate({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + headers: { + "Custom-Request-Header": "request-header-value", + }, + }); + + const requestHeaders = await server.getRequestHeaders(); + + expect(requestHeaders).toStrictEqual({ + authorization: "Bearer test-api-key", + "content-type": "application/json", + "custom-provider-header": "provider-header-value", + "custom-request-header": "request-header-value", + }); + }); +}); + +describe("doStream", () => { + const server = new StreamingTestServer("https://api.hyperbolic.xyz/v1/chat/completions"); + + server.setupTestEnvironment(); + + function prepareStreamResponse({ + content, + usage = { + prompt_tokens: 17, + total_tokens: 244, + completion_tokens: 227, + }, + logprobs = null, + finish_reason = "stop", + }: { + content: string[]; + usage?: { + prompt_tokens: number; + total_tokens: number; + completion_tokens: number; + }; + logprobs?: { + content: + | { + token: string; + logprob: number; + top_logprobs: { token: string; logprob: number }[]; + }[] + | null; + } | null; + finish_reason?: string; + }) { + server.responseChunks = [ + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` + + `"system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null}]}\n\n`, + ...content.flatMap((text) => { + return ( + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` + + `"system_fingerprint":null,"choices":[{"index":1,"delta":{"content":"${text}"},"finish_reason":null}]}\n\n` + ); + }), + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` + + `"system_fingerprint":null,"choices":[{"index":0,"delta":{},"finish_reason":"${finish_reason}","logprobs":${JSON.stringify( + logprobs, + )}}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":${JSON.stringify(usage)}}\n\n`, + "data: [DONE]\n\n", + ]; + } + + it("should stream text deltas", async () => { + prepareStreamResponse({ + content: ["Hello", ", ", "World!"], + finish_reason: "stop", + usage: { + prompt_tokens: 17, + total_tokens: 244, + completion_tokens: 227, + }, + logprobs: TEST_LOGPROBS, + }); + + const { stream } = await model.doStream({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + + // note: space moved to last chunk bc of trimming + expect(await convertReadableStreamToArray(stream)).toStrictEqual([ + { + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", + }, + { + type: "response-metadata", + modelId: "gpt-3.5-turbo-0613", + }, + { type: "text-delta", textDelta: "" }, + { + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", + }, + { + type: "response-metadata", + modelId: "gpt-3.5-turbo-0613", + }, + { type: "text-delta", textDelta: "Hello" }, + { + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", + }, + { + type: "response-metadata", + modelId: "gpt-3.5-turbo-0613", + }, + { type: "text-delta", textDelta: ", " }, + { + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", + }, + { + type: "response-metadata", + modelId: "gpt-3.5-turbo-0613", + }, + { type: "text-delta", textDelta: "World!" }, + { + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", + }, + { + type: "response-metadata", + modelId: "gpt-3.5-turbo-0613", + }, + { + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", + }, + { + type: "response-metadata", + modelId: "gpt-3.5-turbo-0613", + }, + { + type: "finish", + finishReason: "stop", + logprobs: mapHyperbolicChatLogProbsOutput(TEST_LOGPROBS), + usage: { promptTokens: 17, completionTokens: 227 }, + }, + ]); + }); + + it("should stream tool deltas", async () => { + server.responseChunks = [ + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"role":"assistant","content":null,` + + `"tool_calls":[{"index":0,"id":"call_O17Uplv4lJvD6DVdIvFFeRMw","type":"function","function":{"name":"test-tool","arguments":""}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\\""}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"value"}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\\":\\""}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Spark"}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"le"}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" Day"}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\\"}"}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":53,"completion_tokens":17,"total_tokens":70}}\n\n`, + "data: [DONE]\n\n", + ]; + + const { stream } = await model.doStream({ + inputFormat: "prompt", + mode: { + type: "regular", + tools: [ + { + type: "function", + name: "test-tool", + parameters: { + type: "object", + properties: { value: { type: "string" } }, + required: ["value"], + additionalProperties: false, + $schema: "http://json-schema.org/draft-07/schema#", + }, + }, + ], + }, + prompt: TEST_PROMPT, + }); + + expect(await convertReadableStreamToArray(stream)).toStrictEqual([ + { + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", + }, + { + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", + }, + { + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", + }, + { + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", + }, + { + type: "tool-call-delta", + toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", + toolCallType: "function", + toolName: "test-tool", + argsTextDelta: '{"', + }, + { + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", + }, + { + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", + }, + { + type: "tool-call-delta", + toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", + toolCallType: "function", + toolName: "test-tool", + argsTextDelta: "value", + }, + { + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", + }, + { + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", + }, + { + type: "tool-call-delta", + toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", + toolCallType: "function", + toolName: "test-tool", + argsTextDelta: '":"', + }, + { + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", + }, + { + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", + }, + { + type: "tool-call-delta", + toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", + toolCallType: "function", + toolName: "test-tool", + argsTextDelta: "Spark", + }, + { + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", + }, + { + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", + }, + { + type: "tool-call-delta", + toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", + toolCallType: "function", + toolName: "test-tool", + argsTextDelta: "le", + }, + { + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", + }, + { + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", + }, + { + type: "tool-call-delta", + toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", + toolCallType: "function", + toolName: "test-tool", + argsTextDelta: " Day", + }, + { + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", + }, + { + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", + }, + { + type: "tool-call-delta", + toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", + toolCallType: "function", + toolName: "test-tool", + argsTextDelta: '"}', + }, + { + type: "tool-call", + toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", + toolCallType: "function", + toolName: "test-tool", + args: '{"value":"Sparkle Day"}', + }, + { + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", + }, + { + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", + }, + { + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", + }, + { + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", + }, + { + type: "finish", + finishReason: "tool-calls", + logprobs: undefined, + usage: { promptTokens: 53, completionTokens: 17 }, + }, + ]); + }); + + it("should stream tool call that is sent in one chunk", async () => { + server.responseChunks = [ + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"role":"assistant","content":null,` + + `"tool_calls":[{"index":0,"id":"call_O17Uplv4lJvD6DVdIvFFeRMw","type":"function","function":{"name":"test-tool","arguments":"{\\"value\\":\\"Sparkle Day\\"}"}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":53,"completion_tokens":17,"total_tokens":70}}\n\n`, + "data: [DONE]\n\n", + ]; + + const { stream } = await model.doStream({ + inputFormat: "prompt", + mode: { + type: "regular", + tools: [ + { + type: "function", + name: "test-tool", + parameters: { + type: "object", + properties: { value: { type: "string" } }, + required: ["value"], + additionalProperties: false, + $schema: "http://json-schema.org/draft-07/schema#", + }, + }, + ], + }, + prompt: TEST_PROMPT, + }); + + expect(await convertReadableStreamToArray(stream)).toStrictEqual([ + { + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", + }, + { + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", + }, + { + type: "tool-call-delta", + toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", + toolCallType: "function", + toolName: "test-tool", + argsTextDelta: '{"value":"Sparkle Day"}', + }, + { + type: "tool-call", + toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", + toolCallType: "function", + toolName: "test-tool", + args: '{"value":"Sparkle Day"}', + }, + { + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", + }, + { + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", + }, + { + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", + }, + { + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", + }, + { + type: "finish", + finishReason: "tool-calls", + logprobs: undefined, + usage: { promptTokens: 53, completionTokens: 17 }, + }, + ]); + }); + + it("should handle error stream parts", async () => { + server.responseChunks = [ + `data: {"object": "error", "message": "The server had an error processing your request. Sorry about that! You can retry your request, or contact us through our ` + + `help center at app.hyperbolic.xyz/support if you keep seeing this error.","type":"server_error","param":null,"code":null}\n\n`, + "data: [DONE]\n\n", + ]; + + const { stream } = await model.doStream({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + + expect(await convertReadableStreamToArray(stream)).toStrictEqual([ + { + type: "error", + error: { + object: "error", + message: + "The server had an error processing your request. Sorry about that! " + + "You can retry your request, or contact us through our help center at " + + "app.hyperbolic.xyz/support if you keep seeing this error.", + type: "server_error", + code: null, + param: null, + }, + }, + { + finishReason: "error", + logprobs: undefined, + type: "finish", + usage: { + completionTokens: NaN, + promptTokens: NaN, + }, + }, + ]); + }); + + it("should handle unparsable stream parts", async () => { + server.responseChunks = [`data: {unparsable}\n\n`, "data: [DONE]\n\n"]; + + const { stream } = await model.doStream({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + + const elements = await convertReadableStreamToArray(stream); + + expect(elements.length).toBe(2); + expect(elements[0]?.type).toBe("error"); + expect(elements[1]).toStrictEqual({ + finishReason: "error", + logprobs: undefined, + type: "finish", + usage: { + completionTokens: NaN, + promptTokens: NaN, + }, + }); + }); + + it("should expose the raw response headers", async () => { + prepareStreamResponse({ content: [] }); + + server.responseHeaders = { + "test-header": "test-value", + }; + + const { rawResponse } = await model.doStream({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + + expect(rawResponse?.headers).toStrictEqual({ + // default headers: + "content-type": "text/event-stream", + "cache-control": "no-cache", + connection: "keep-alive", + + // custom header + "test-header": "test-value", + }); + }); + + it("should pass the messages and the model", async () => { + prepareStreamResponse({ content: [] }); + + await model.doStream({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + + expect(await server.getRequestBodyJson()).toStrictEqual({ + stream: true, + stream_options: { include_usage: true }, + model: "anthropic/claude-3.5-sonnet", + messages: [{ role: "user", content: "Hello" }], + }); + }); + + it("should pass headers", async () => { + prepareStreamResponse({ content: [] }); + + const provider = createHyperbolic({ + apiKey: "test-api-key", + headers: { + "Custom-Provider-Header": "provider-header-value", + }, + }); + + await provider.chat("openai/gpt-3.5-turbo").doStream({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + headers: { + "Custom-Request-Header": "request-header-value", + }, + }); + + const requestHeaders = await server.getRequestHeaders(); + + expect(requestHeaders).toStrictEqual({ + authorization: "Bearer test-api-key", + "content-type": "application/json", + "custom-provider-header": "provider-header-value", + "custom-request-header": "request-header-value", + }); + }); + + it("should pass extra body", async () => { + prepareStreamResponse({ content: [] }); + + const provider = createHyperbolic({ + apiKey: "test-api-key", + extraBody: { + custom_field: "custom_value", + providers: { + anthropic: { + custom_field: "custom_value", + }, + }, + }, + }); + + await provider.chat("anthropic/claude-3.5-sonnet").doStream({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + + const requestBody = await server.getRequestBodyJson(); + + expect(requestBody).toHaveProperty("custom_field", "custom_value"); + expect(requestBody).toHaveProperty("providers.anthropic.custom_field", "custom_value"); + }); +}); diff --git a/packages/ai-sdk-provider-2/src/hyperbolic-chat-language-model.ts b/packages/ai-sdk-provider-2/src/hyperbolic-chat-language-model.ts new file mode 100644 index 0000000..29d16b2 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/hyperbolic-chat-language-model.ts @@ -0,0 +1,659 @@ +// Modified by Hyperbolic Labs, Inc. on 2025-03-25 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + +import type { + LanguageModelV1, + LanguageModelV1FinishReason, + LanguageModelV1FunctionTool, + LanguageModelV1LogProbs, + LanguageModelV1ProviderDefinedTool, + LanguageModelV1StreamPart, +} from "@ai-sdk/provider"; +import type { ParseResult } from "@ai-sdk/provider-utils"; +import { InvalidResponseDataError, UnsupportedFunctionalityError } from "@ai-sdk/provider"; +import { + combineHeaders, + createEventSourceResponseHandler, + createJsonResponseHandler, + generateId, + isParsableJson, + postJsonToApi, +} from "@ai-sdk/provider-utils"; +import { z } from "zod"; + +import type { HyperbolicChatModelId, HyperbolicChatSettings } from "./hyperbolic-chat-settings"; +import { convertToHyperbolicChatMessages } from "./convert-to-hyperbolic-chat-messages"; +import { + HyperbolicErrorResponseSchema, + hyperbolicFailedResponseHandler, + isHyperbolicError, + tryParsingHyperbolicError, +} from "./hyperbolic-error"; +import { mapHyperbolicChatLogProbsOutput } from "./map-hyperbolic-chat-logprobs"; +import { mapHyperbolicFinishReason } from "./map-hyperbolic-finish-reason"; + +function isFunctionTool( + tool: LanguageModelV1FunctionTool | LanguageModelV1ProviderDefinedTool, +): tool is LanguageModelV1FunctionTool { + return "parameters" in tool; +} + +type HyperbolicChatConfig = { + provider: string; + compatibility: "strict" | "compatible"; + headers: () => Record; + url: (options: { modelId: string; path: string }) => string; + fetch?: typeof fetch; + extraBody?: Record; +}; + +export class HyperbolicChatLanguageModel implements LanguageModelV1 { + readonly specificationVersion = "v1"; + readonly defaultObjectGenerationMode = "tool"; + + readonly modelId: HyperbolicChatModelId; + readonly settings: HyperbolicChatSettings; + + private readonly config: HyperbolicChatConfig; + + constructor( + modelId: HyperbolicChatModelId, + settings: HyperbolicChatSettings, + config: HyperbolicChatConfig, + ) { + this.modelId = modelId; + this.settings = settings; + this.config = config; + } + + get provider(): string { + return this.config.provider; + } + + private getArgs({ + mode, + prompt, + maxTokens, + temperature, + topP, + frequencyPenalty, + presencePenalty, + seed, + stopSequences, + responseFormat, + topK, + providerMetadata, + }: Parameters[0]) { + const type = mode.type; + const extraCallingBody = providerMetadata?.["hyperbolic"] ?? {}; + + const baseArgs = { + // model id: + model: this.modelId, + models: this.settings.models, + + // model specific settings: + logit_bias: this.settings.logitBias, + logprobs: + this.settings.logprobs === true || typeof this.settings.logprobs === "number" + ? true + : undefined, + top_logprobs: + typeof this.settings.logprobs === "number" + ? this.settings.logprobs + : typeof this.settings.logprobs === "boolean" + ? this.settings.logprobs + ? 0 + : undefined + : undefined, + user: this.settings.user, + parallel_tool_calls: this.settings.parallelToolCalls, + + // standardized settings: + max_tokens: maxTokens, + temperature, + top_p: topP, + frequency_penalty: frequencyPenalty, + presence_penalty: presencePenalty, + seed, + + stop: stopSequences, + response_format: responseFormat, + top_k: topK, + + // messages: + messages: convertToHyperbolicChatMessages(prompt), + + // Hyperbolic specific settings: + include_reasoning: this.settings.includeReasoning, + reasoning: this.settings.reasoning, + + // extra body: + ...this.config.extraBody, + ...this.settings.extraBody, + ...extraCallingBody, + }; + + switch (type) { + case "regular": { + return { ...baseArgs, ...prepareToolsAndToolChoice(mode) }; + } + + case "object-json": { + return { + ...baseArgs, + response_format: { type: "json_object" }, + }; + } + + case "object-tool": { + return { + ...baseArgs, + tool_choice: { type: "function", function: { name: mode.tool.name } }, + tools: [ + { + type: "function", + function: { + name: mode.tool.name, + description: mode.tool.description, + parameters: mode.tool.parameters, + }, + }, + ], + }; + } + + // Handle all non-text types with a single default case + default: { + const _exhaustiveCheck: never = type; + throw new UnsupportedFunctionalityError({ + functionality: `${_exhaustiveCheck} mode`, + }); + } + } + } + async doGenerate( + options: Parameters[0], + ): Promise>> { + const args = this.getArgs(options); + + const { responseHeaders, value: response } = await postJsonToApi({ + url: this.config.url({ + path: "/chat/completions", + modelId: this.modelId, + }), + headers: combineHeaders(this.config.headers(), options.headers), + body: args, + failedResponseHandler: hyperbolicFailedResponseHandler, + successfulResponseHandler: createJsonResponseHandler( + HyperbolicNonStreamChatCompletionResponseSchema, + ), + abortSignal: options.abortSignal, + fetch: this.config.fetch, + }); + + const { messages: rawPrompt, ...rawSettings } = args; + const choice = response.choices[0]; + + if (!choice) { + throw new Error("No choice in response"); + } + + return { + response: { + id: response.id, + modelId: response.model, + }, + text: choice.message.content ?? undefined, + reasoning: choice.message.reasoning ?? undefined, + toolCalls: choice.message.tool_calls?.map((toolCall) => ({ + toolCallType: "function", + toolCallId: toolCall.id ?? generateId(), + toolName: toolCall.function.name, + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + args: toolCall.function.arguments!, + })), + finishReason: mapHyperbolicFinishReason(choice.finish_reason), + usage: { + promptTokens: response.usage?.prompt_tokens ?? 0, + completionTokens: response.usage?.completion_tokens ?? 0, + }, + rawCall: { rawPrompt, rawSettings }, + rawResponse: { headers: responseHeaders }, + warnings: [], + logprobs: mapHyperbolicChatLogProbsOutput(choice.logprobs), + }; + } + + async doStream( + options: Parameters[0], + ): Promise>> { + const args = this.getArgs(options); + + const { responseHeaders, value: response } = await postJsonToApi({ + url: this.config.url({ + path: "/chat/completions", + modelId: this.modelId, + }), + headers: combineHeaders(this.config.headers(), options.headers), + body: { + ...args, + stream: true, + + // only include stream_options when in strict compatibility mode: + stream_options: + this.config.compatibility === "strict" ? { include_usage: true } : undefined, + }, + failedResponseHandler: hyperbolicFailedResponseHandler, + successfulResponseHandler: createEventSourceResponseHandler( + HyperbolicStreamChatCompletionChunkSchema, + ), + abortSignal: options.abortSignal, + fetch: this.config.fetch, + }); + + const { messages: rawPrompt, ...rawSettings } = args; + + const toolCalls: Array<{ + id: string; + type: "function"; + function: { + name: string; + arguments: string; + }; + }> = []; + + let finishReason: LanguageModelV1FinishReason = "other"; + let usage: { promptTokens: number; completionTokens: number } = { + promptTokens: Number.NaN, + completionTokens: Number.NaN, + }; + let logprobs: LanguageModelV1LogProbs; + + return { + stream: response.pipeThrough( + new TransformStream< + ParseResult>, + LanguageModelV1StreamPart + >({ + transform(chunk, controller) { + // handle failed chunk parsing / validation: + if (!chunk.success) { + finishReason = "error"; + + // Error messages from the API are sometimes an ugly combo of text and JSON in a single chunk, so attempt to parse it as a hyperbolic error. + const maybeHyperbolicError = tryParsingHyperbolicError(chunk.error); + if (maybeHyperbolicError) { + controller.enqueue({ type: "error", error: maybeHyperbolicError }); + return; + } + + controller.enqueue({ + type: "error", + error: chunk.error, + }); + return; + } + + const value = chunk.value; + + // handle error chunks: + if (isHyperbolicError(value)) { + finishReason = "error"; + controller.enqueue({ type: "error", error: value }); + return; + } + + if (value.id) { + controller.enqueue({ + type: "response-metadata", + id: value.id, + }); + } + + if (value.model) { + controller.enqueue({ + type: "response-metadata", + modelId: value.model, + }); + } + + if (value.usage != null) { + usage = { + promptTokens: value.usage.prompt_tokens, + completionTokens: value.usage.completion_tokens, + }; + } + + const choice = value.choices[0]; + + if (choice?.finish_reason != null) { + finishReason = mapHyperbolicFinishReason(choice.finish_reason); + } + + if (choice?.delta == null) { + return; + } + + const delta = choice.delta; + + if (delta.content != null) { + controller.enqueue({ + type: "text-delta", + textDelta: delta.content, + }); + } + + if (delta.reasoning != null) { + controller.enqueue({ + type: "reasoning", + textDelta: delta.reasoning, + }); + } + + const mappedLogprobs = mapHyperbolicChatLogProbsOutput(choice?.logprobs); + if (mappedLogprobs?.length) { + if (logprobs === undefined) logprobs = []; + logprobs.push(...mappedLogprobs); + } + + if (delta.tool_calls != null) { + for (const toolCallDelta of delta.tool_calls) { + const index = toolCallDelta.index; + + // Tool call start. Hyperbolic returns all information except the arguments in the first chunk. + if (toolCalls[index] == null) { + if (toolCallDelta.type !== "function") { + throw new InvalidResponseDataError({ + data: toolCallDelta, + message: `Expected 'function' type.`, + }); + } + + if (toolCallDelta.id == null) { + throw new InvalidResponseDataError({ + data: toolCallDelta, + message: `Expected 'id' to be a string.`, + }); + } + + if (toolCallDelta.function?.name == null) { + throw new InvalidResponseDataError({ + data: toolCallDelta, + message: `Expected 'function.name' to be a string.`, + }); + } + + toolCalls[index] = { + id: toolCallDelta.id, + type: "function", + function: { + name: toolCallDelta.function.name, + arguments: toolCallDelta.function.arguments ?? "", + }, + }; + + const toolCall = toolCalls[index]; + + if (toolCall == null) { + throw new Error("Tool call is missing"); + } + + // check if tool call is complete (some providers send the full tool call in one chunk) + if ( + toolCall.function?.name != null && + toolCall.function?.arguments != null && + isParsableJson(toolCall.function.arguments) + ) { + // send delta + controller.enqueue({ + type: "tool-call-delta", + toolCallType: "function", + toolCallId: toolCall.id, + toolName: toolCall.function.name, + argsTextDelta: toolCall.function.arguments, + }); + + // send tool call + controller.enqueue({ + type: "tool-call", + toolCallType: "function", + toolCallId: toolCall.id ?? generateId(), + toolName: toolCall.function.name, + args: toolCall.function.arguments, + }); + } + + continue; + } + + // existing tool call, merge + const toolCall = toolCalls[index]; + + if (toolCall == null) { + throw new Error("Tool call is missing"); + } + + if (toolCallDelta.function?.arguments != null) { + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + toolCall.function!.arguments += toolCallDelta.function?.arguments ?? ""; + } + + // send delta + controller.enqueue({ + type: "tool-call-delta", + toolCallType: "function", + toolCallId: toolCall.id, + toolName: toolCall.function.name, + argsTextDelta: toolCallDelta.function.arguments ?? "", + }); + + // check if tool call is complete + if ( + toolCall.function?.name != null && + toolCall.function?.arguments != null && + isParsableJson(toolCall.function.arguments) + ) { + controller.enqueue({ + type: "tool-call", + toolCallType: "function", + toolCallId: toolCall.id ?? generateId(), + toolName: toolCall.function.name, + args: toolCall.function.arguments, + }); + } + } + } + }, + + flush(controller) { + controller.enqueue({ + type: "finish", + finishReason, + logprobs, + usage, + }); + }, + }), + ), + rawCall: { rawPrompt, rawSettings }, + rawResponse: { headers: responseHeaders }, + warnings: [], + }; + } +} + +const HyperbolicChatCompletionBaseResponseSchema = z.object({ + id: z.string().optional(), + model: z.string().optional(), + usage: z + .object({ + prompt_tokens: z.number(), + completion_tokens: z.number(), + total_tokens: z.number(), + }) + .nullish(), +}); + +// limited version of the schema, focussed on what is needed for the implementation +// this approach limits breakages when the API changes and increases efficiency +const HyperbolicNonStreamChatCompletionResponseSchema = + HyperbolicChatCompletionBaseResponseSchema.extend({ + choices: z.array( + z.object({ + message: z.object({ + role: z.literal("assistant"), + content: z.string().nullable().optional(), + reasoning: z.string().nullable().optional(), + tool_calls: z + .array( + z.object({ + id: z.string().optional().nullable(), + type: z.literal("function"), + function: z.object({ + name: z.string(), + arguments: z.string(), + }), + }), + ) + .optional(), + }), + index: z.number(), + logprobs: z + .object({ + content: z + .array( + z.object({ + token: z.string(), + logprob: z.number(), + top_logprobs: z.array( + z.object({ + token: z.string(), + logprob: z.number(), + }), + ), + }), + ) + .nullable(), + }) + .nullable() + .optional(), + finish_reason: z.string().optional().nullable(), + }), + ), + }); + +// limited version of the schema, focussed on what is needed for the implementation +// this approach limits breakages when the API changes and increases efficiency +const HyperbolicStreamChatCompletionChunkSchema = z.union([ + HyperbolicChatCompletionBaseResponseSchema.extend({ + choices: z.array( + z.object({ + delta: z + .object({ + role: z.enum(["assistant"]).optional(), + content: z.string().nullish(), + reasoning: z.string().nullish().optional(), + tool_calls: z + .array( + z.object({ + index: z.number(), + id: z.string().nullish(), + type: z.literal("function").optional(), + function: z.object({ + name: z.string().nullish(), + arguments: z.string().nullish(), + }), + }), + ) + .nullish(), + }) + .nullish(), + logprobs: z + .object({ + content: z + .array( + z.object({ + token: z.string(), + logprob: z.number(), + top_logprobs: z.array( + z.object({ + token: z.string(), + logprob: z.number(), + }), + ), + }), + ) + .nullable(), + }) + .nullish(), + finish_reason: z.string().nullable().optional(), + index: z.number(), + }), + ), + }), + HyperbolicErrorResponseSchema, +]); + +function prepareToolsAndToolChoice( + mode: Parameters[0]["mode"] & { + type: "regular"; + }, +) { + // when the tools array is empty, change it to undefined to prevent errors: + const tools = mode.tools?.length ? mode.tools : undefined; + + if (tools == null) { + return { tools: undefined, tool_choice: undefined }; + } + + const mappedTools = tools.map((tool) => { + if (isFunctionTool(tool)) { + return { + type: "function" as const, + function: { + name: tool.name, + description: tool.description, + parameters: tool.parameters, + }, + }; + } else { + return { + type: "function" as const, + function: { + name: tool.name, + }, + }; + } + }); + + const toolChoice = mode.toolChoice; + + if (toolChoice == null) { + return { tools: mappedTools, tool_choice: undefined }; + } + + const type = toolChoice.type; + + switch (type) { + case "auto": + case "none": + case "required": + return { tools: mappedTools, tool_choice: type }; + case "tool": + return { + tools: mappedTools, + tool_choice: { + type: "function", + function: { + name: toolChoice.toolName, + }, + }, + }; + default: { + const _exhaustiveCheck: never = type; + throw new Error(`Unsupported tool choice type: ${_exhaustiveCheck}`); + } + } +} diff --git a/packages/ai-sdk-provider-2/src/hyperbolic-chat-prompt.ts b/packages/ai-sdk-provider-2/src/hyperbolic-chat-prompt.ts new file mode 100644 index 0000000..f1c2a1e --- /dev/null +++ b/packages/ai-sdk-provider-2/src/hyperbolic-chat-prompt.ts @@ -0,0 +1,67 @@ +// Modified by Hyperbolic Labs, Inc. on 2025-03-25 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + +// Type for Hyperbolic Cache Control following Anthropic's pattern +export type HyperbolicCacheControl = { type: "ephemeral" }; + +export type HyperbolicChatPrompt = Array; + +export type ChatCompletionMessageParam = + | ChatCompletionSystemMessageParam + | ChatCompletionUserMessageParam + | ChatCompletionAssistantMessageParam + | ChatCompletionToolMessageParam; + +export interface ChatCompletionSystemMessageParam { + role: "system"; + content: string; + cache_control?: HyperbolicCacheControl; +} + +export interface ChatCompletionUserMessageParam { + role: "user"; + content: string | Array; + cache_control?: HyperbolicCacheControl; +} + +export type ChatCompletionContentPart = + | ChatCompletionContentPartText + | ChatCompletionContentPartImage; + +export interface ChatCompletionContentPartImage { + type: "image_url"; + image_url: { + url: string; + }; + cache_control?: HyperbolicCacheControl; +} + +export interface ChatCompletionContentPartText { + type: "text"; + text: string; + cache_control?: HyperbolicCacheControl; +} + +export interface ChatCompletionAssistantMessageParam { + role: "assistant"; + content?: string | null; + tool_calls?: Array; + cache_control?: HyperbolicCacheControl; +} + +export interface ChatCompletionMessageToolCall { + type: "function"; + id: string; + function: { + arguments: string; + name: string; + }; +} + +export interface ChatCompletionToolMessageParam { + role: "tool"; + content: string; + tool_call_id: string; + cache_control?: HyperbolicCacheControl; +} diff --git a/packages/ai-sdk-provider-2/src/hyperbolic-chat-settings.ts b/packages/ai-sdk-provider-2/src/hyperbolic-chat-settings.ts new file mode 100644 index 0000000..76c8fc9 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/hyperbolic-chat-settings.ts @@ -0,0 +1,50 @@ +// Modified by Hyperbolic Labs, Inc. on 2025-03-25 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + +import type { HyperbolicSharedSettings } from "./types"; + +// https://app.hyperbolic.xyz/models +export type HyperbolicChatModelId = string; + +export type HyperbolicChatSettings = { + /** + * Modify the likelihood of specified tokens appearing in the completion. + * + * Accepts a JSON object that maps tokens (specified by their token ID in + * the GPT tokenizer) to an associated bias value from -100 to 100. You + * can use this tokenizer tool to convert text to token IDs. Mathematically, + * the bias is added to the logits generated by the model prior to sampling. + * The exact effect will vary per model, but values between -1 and 1 should + * decrease or increase likelihood of selection; values like -100 or 100 + * should result in a ban or exclusive selection of the relevant token. + * + * As an example, you can pass {"50256": -100} to prevent the <|endoftext|> + * token from being generated. + */ + logitBias?: Record; + + /** + * Return the log probabilities of the tokens. Including logprobs will increase + * the response size and can slow down response times. However, it can + * be useful to better understand how the model is behaving. + * + * Setting to true will return the log probabilities of the tokens that + * were generated. + * + * Setting to a number will return the log probabilities of the top n + * tokens that were generated. + */ + logprobs?: boolean | number; + + /** + * Whether to enable parallel function calling during tool use. Default to true. + */ + parallelToolCalls?: boolean; + + /** + * A unique identifier representing your end-user, which can help Hyperbolic + * to monitor and detect abuse. Learn more. + */ + user?: string; +} & HyperbolicSharedSettings; diff --git a/packages/ai-sdk-provider-2/src/hyperbolic-completion-language-model.test.ts b/packages/ai-sdk-provider-2/src/hyperbolic-completion-language-model.test.ts new file mode 100644 index 0000000..9fdaec6 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/hyperbolic-completion-language-model.test.ts @@ -0,0 +1,496 @@ +// Modified by Hyperbolic Labs, Inc. on 2025-03-25 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + +import type { LanguageModelV1Prompt } from "@ai-sdk/provider"; +import { + convertReadableStreamToArray, + JsonTestServer, + StreamingTestServer, +} from "@ai-sdk/provider-utils/test"; +import { describe, expect, it } from "vitest"; + +import { createHyperbolic } from "./hyperbolic-provider"; +import { mapHyperbolicCompletionLogProbs } from "./map-hyperbolic-completion-logprobs"; + +const TEST_PROMPT: LanguageModelV1Prompt = [ + { role: "user", content: [{ type: "text", text: "Hello" }] }, +]; + +const TEST_LOGPROBS = { + tokens: [" ever", " after", ".\n\n", "The", " end", "."], + token_logprobs: [-0.0664508, -0.014520033, -1.3820221, -0.7890417, -0.5323165, -0.10247037], + top_logprobs: [ + { + " ever": -0.0664508, + }, + { + " after": -0.014520033, + }, + { + ".\n\n": -1.3820221, + }, + { + The: -0.7890417, + }, + { + " end": -0.5323165, + }, + { + ".": -0.10247037, + }, + ] as Record[], +}; + +const provider = createHyperbolic({ + apiKey: "test-api-key", + compatibility: "strict", +}); + +const model = provider.completion("meta-llama/Llama-3.1-405B-FP8"); + +describe("doGenerate", () => { + const server = new JsonTestServer("https://api.hyperbolic.xyz/v1/completions"); + server.setupTestEnvironment(); + + function prepareJsonResponse({ + content = "", + usage = { + prompt_tokens: 4, + total_tokens: 34, + completion_tokens: 30, + }, + logprobs = null, + finish_reason = "stop", + }: { + content?: string; + usage?: { + prompt_tokens: number; + total_tokens: number; + completion_tokens: number; + }; + logprobs?: { + tokens: string[]; + token_logprobs: number[]; + top_logprobs: Record[]; + } | null; + finish_reason?: string; + }) { + server.responseBodyJson = { + id: "cmpl-96cAM1v77r4jXa4qb2NSmRREV5oWB", + object: "text_completion", + created: 1711363706, + model: "meta-llama/Llama-3.1-405B-FP8", + choices: [ + { + text: content, + index: 0, + logprobs, + finish_reason, + }, + ], + usage, + }; + } + + it("should extract text response", async () => { + prepareJsonResponse({ content: "Hello, World!" }); + + const { text } = await model.doGenerate({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + + expect(text).toStrictEqual("Hello, World!"); + }); + + it("should extract usage", async () => { + prepareJsonResponse({ + content: "", + usage: { prompt_tokens: 20, total_tokens: 25, completion_tokens: 5 }, + }); + + const { usage } = await model.doGenerate({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + + expect(usage).toStrictEqual({ + promptTokens: 20, + completionTokens: 5, + }); + }); + + it("should extract logprobs", async () => { + prepareJsonResponse({ logprobs: TEST_LOGPROBS }); + + const provider = createHyperbolic({ apiKey: "test-api-key" }); + + const response = await provider.completion("openai/gpt-3.5-turbo", { logprobs: 1 }).doGenerate({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + expect(response.logprobs).toStrictEqual(mapHyperbolicCompletionLogProbs(TEST_LOGPROBS)); + }); + + it("should extract finish reason", async () => { + prepareJsonResponse({ + content: "", + finish_reason: "stop", + }); + + const { finishReason } = await provider.completion("meta-llama/Llama-3.1-405B-FP8").doGenerate({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + + expect(finishReason).toStrictEqual("stop"); + }); + + it("should support unknown finish reason", async () => { + prepareJsonResponse({ + content: "", + finish_reason: "eos", + }); + + const { finishReason } = await provider.completion("meta-llama/Llama-3.1-405B-FP8").doGenerate({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + + expect(finishReason).toStrictEqual("unknown"); + }); + + it("should expose the raw response headers", async () => { + prepareJsonResponse({ content: "" }); + + server.responseHeaders = { + "test-header": "test-value", + }; + + const { rawResponse } = await model.doGenerate({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + + expect(rawResponse?.headers).toStrictEqual({ + // default headers: + "content-length": "273", + "content-type": "application/json", + + // custom header + "test-header": "test-value", + }); + }); + + it("should pass the model and the prompt", async () => { + prepareJsonResponse({ content: "" }); + + await model.doGenerate({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + + expect(await server.getRequestBodyJson()).toStrictEqual({ + model: "meta-llama/Llama-3.1-405B-FP8", + prompt: "Hello", + }); + }); + + it("should pass the models array when provided", async () => { + prepareJsonResponse({ content: "" }); + + const customModel = provider.completion("meta-llama/Llama-3.1-405B-FP8", { + models: ["openai/gpt-4", "anthropic/claude-2"], + }); + + await customModel.doGenerate({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + + expect(await server.getRequestBodyJson()).toStrictEqual({ + model: "meta-llama/Llama-3.1-405B-FP8", + models: ["openai/gpt-4", "anthropic/claude-2"], + prompt: "Hello", + }); + }); + + it("should pass headers", async () => { + prepareJsonResponse({ content: "" }); + + const provider = createHyperbolic({ + apiKey: "test-api-key", + headers: { + "Custom-Provider-Header": "provider-header-value", + }, + }); + + await provider.completion("meta-llama/Llama-3.1-405B-FP8").doGenerate({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + headers: { + "Custom-Request-Header": "request-header-value", + }, + }); + + const requestHeaders = await server.getRequestHeaders(); + + expect(requestHeaders).toStrictEqual({ + authorization: "Bearer test-api-key", + "content-type": "application/json", + "custom-provider-header": "provider-header-value", + "custom-request-header": "request-header-value", + }); + }); +}); + +describe("doStream", () => { + const server = new StreamingTestServer("https://api.hyperbolic.xyz/v1/completions"); + + server.setupTestEnvironment(); + + function prepareStreamResponse({ + content, + finish_reason = "stop", + usage = { + prompt_tokens: 10, + total_tokens: 372, + completion_tokens: 362, + }, + logprobs = null, + }: { + content: string[]; + usage?: { + prompt_tokens: number; + total_tokens: number; + completion_tokens: number; + }; + logprobs?: { + tokens: string[]; + token_logprobs: number[]; + top_logprobs: Record[]; + } | null; + finish_reason?: string; + }) { + server.responseChunks = [ + ...content.map((text) => { + return ( + `data: {"id":"cmpl-96c64EdfhOw8pjFFgVpLuT8k2MtdT","object":"text_completion","created":1711363440,` + + `"choices":[{"text":"${text}","index":0,"logprobs":null,"finish_reason":null}],"model":"meta-llama/Llama-3.1-405B-FP8"}\n\n` + ); + }), + `data: {"id":"cmpl-96c3yLQE1TtZCd6n6OILVmzev8M8H","object":"text_completion","created":1711363310,` + + `"choices":[{"text":"","index":0,"logprobs":${JSON.stringify( + logprobs, + )},"finish_reason":"${finish_reason}"}],"model":"meta-llama/Llama-3.1-405B-FP8"}\n\n`, + `data: {"id":"cmpl-96c3yLQE1TtZCd6n6OILVmzev8M8H","object":"text_completion","created":1711363310,` + + `"model":"meta-llama/Llama-3.1-405B-FP8","usage":${JSON.stringify( + usage, + )},"choices":[]}\n\n`, + "data: [DONE]\n\n", + ]; + } + + it("should stream text deltas", async () => { + prepareStreamResponse({ + content: ["Hello", ", ", "World!"], + finish_reason: "stop", + usage: { + prompt_tokens: 10, + total_tokens: 372, + completion_tokens: 362, + }, + logprobs: TEST_LOGPROBS, + }); + + const { stream } = await model.doStream({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + + // note: space moved to last chunk bc of trimming + expect(await convertReadableStreamToArray(stream)).toStrictEqual([ + { type: "text-delta", textDelta: "Hello" }, + { type: "text-delta", textDelta: ", " }, + { type: "text-delta", textDelta: "World!" }, + { type: "text-delta", textDelta: "" }, + { + type: "finish", + finishReason: "stop", + logprobs: mapHyperbolicCompletionLogProbs(TEST_LOGPROBS), + usage: { promptTokens: 10, completionTokens: 362 }, + }, + ]); + }); + + it("should handle error stream parts", async () => { + server.responseChunks = [ + `data: {"object": "error", "message": "The server had an error processing your request. Sorry about that! You can retry your request, or contact us through our ` + + `help center at app.hyperbolic.xyz/support if you keep seeing this error.","type":"server_error","param":null,"code":null}\n\n`, + "data: [DONE]\n\n", + ]; + + const { stream } = await model.doStream({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + + expect(await convertReadableStreamToArray(stream)).toStrictEqual([ + { + type: "error", + error: { + object: "error", + message: + "The server had an error processing your request. Sorry about that! " + + "You can retry your request, or contact us through our help center at " + + "app.hyperbolic.xyz/support if you keep seeing this error.", + type: "server_error", + code: null, + param: null, + }, + }, + { + finishReason: "error", + logprobs: undefined, + type: "finish", + usage: { + completionTokens: NaN, + promptTokens: NaN, + }, + }, + ]); + }); + + it("should handle unparsable stream parts", async () => { + server.responseChunks = [`data: {unparsable}\n\n`, "data: [DONE]\n\n"]; + + const { stream } = await model.doStream({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + + const elements = await convertReadableStreamToArray(stream); + + expect(elements.length).toBe(2); + expect(elements[0]?.type).toBe("error"); + expect(elements[1]).toStrictEqual({ + finishReason: "error", + logprobs: undefined, + type: "finish", + usage: { + completionTokens: NaN, + promptTokens: NaN, + }, + }); + }); + + it("should expose the raw response headers", async () => { + prepareStreamResponse({ content: [] }); + + server.responseHeaders = { + "test-header": "test-value", + }; + + const { rawResponse } = await model.doStream({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + + expect(rawResponse?.headers).toStrictEqual({ + // default headers: + "content-type": "text/event-stream", + "cache-control": "no-cache", + connection: "keep-alive", + + // custom header + "test-header": "test-value", + }); + }); + + it("should pass the model and the prompt", async () => { + prepareStreamResponse({ content: [] }); + + await model.doStream({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + + expect(await server.getRequestBodyJson()).toStrictEqual({ + stream: true, + stream_options: { include_usage: true }, + model: "meta-llama/Llama-3.1-405B-FP8", + prompt: "Hello", + }); + }); + + it("should pass headers", async () => { + prepareStreamResponse({ content: [] }); + + const provider = createHyperbolic({ + apiKey: "test-api-key", + headers: { + "Custom-Provider-Header": "provider-header-value", + }, + }); + + await provider.completion("meta-llama/Llama-3.1-405B-FP8").doStream({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + headers: { + "Custom-Request-Header": "request-header-value", + }, + }); + + const requestHeaders = await server.getRequestHeaders(); + + expect(requestHeaders).toStrictEqual({ + authorization: "Bearer test-api-key", + "content-type": "application/json", + "custom-provider-header": "provider-header-value", + "custom-request-header": "request-header-value", + }); + }); + + it("should pass extra body", async () => { + prepareStreamResponse({ content: [] }); + + const provider = createHyperbolic({ + apiKey: "test-api-key", + extraBody: { + custom_field: "custom_value", + providers: { + anthropic: { + custom_field: "custom_value", + }, + }, + }, + }); + + await provider.completion("openai/gpt-4o").doStream({ + inputFormat: "prompt", + mode: { type: "regular" }, + prompt: TEST_PROMPT, + }); + + const requestBody = await server.getRequestBodyJson(); + + expect(requestBody).toHaveProperty("custom_field", "custom_value"); + expect(requestBody).toHaveProperty("providers.anthropic.custom_field", "custom_value"); + }); +}); diff --git a/packages/ai-sdk-provider-2/src/hyperbolic-completion-language-model.ts b/packages/ai-sdk-provider-2/src/hyperbolic-completion-language-model.ts new file mode 100644 index 0000000..a37c22c --- /dev/null +++ b/packages/ai-sdk-provider-2/src/hyperbolic-completion-language-model.ts @@ -0,0 +1,352 @@ +// Modified by Hyperbolic Labs, Inc. on 2025-03-25 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + +import type { + LanguageModelV1, + LanguageModelV1FinishReason, + LanguageModelV1LogProbs, + LanguageModelV1StreamPart, +} from "@ai-sdk/provider"; +import type { ParseResult } from "@ai-sdk/provider-utils"; +import { UnsupportedFunctionalityError } from "@ai-sdk/provider"; +import { + combineHeaders, + createEventSourceResponseHandler, + createJsonResponseHandler, + postJsonToApi, +} from "@ai-sdk/provider-utils"; +import { z } from "zod"; + +import type { + HyperbolicCompletionModelId, + HyperbolicCompletionSettings, +} from "./hyperbolic-completion-settings"; +import { convertToHyperbolicCompletionPrompt } from "./convert-to-hyperbolic-completion-prompt"; +import { + HyperbolicErrorResponseSchema, + hyperbolicFailedResponseHandler, + isHyperbolicError, +} from "./hyperbolic-error"; +import { mapHyperbolicCompletionLogProbs } from "./map-hyperbolic-completion-logprobs"; +import { mapHyperbolicFinishReason } from "./map-hyperbolic-finish-reason"; + +type HyperbolicCompletionConfig = { + provider: string; + compatibility: "strict" | "compatible"; + headers: () => Record; + url: (options: { modelId: string; path: string }) => string; + fetch?: typeof fetch; + extraBody?: Record; +}; + +export class HyperbolicCompletionLanguageModel implements LanguageModelV1 { + readonly specificationVersion = "v1"; + readonly defaultObjectGenerationMode = undefined; + + readonly modelId: HyperbolicCompletionModelId; + readonly settings: HyperbolicCompletionSettings; + + private readonly config: HyperbolicCompletionConfig; + + constructor( + modelId: HyperbolicCompletionModelId, + settings: HyperbolicCompletionSettings, + config: HyperbolicCompletionConfig, + ) { + this.modelId = modelId; + this.settings = settings; + this.config = config; + } + + get provider(): string { + return this.config.provider; + } + + private getArgs({ + mode, + inputFormat, + prompt, + maxTokens, + temperature, + topP, + frequencyPenalty, + presencePenalty, + seed, + responseFormat, + topK, + stopSequences, + providerMetadata, + }: Parameters[0]) { + const type = mode.type; + + const extraCallingBody = providerMetadata?.["hyperbolic"] ?? {}; + + const { prompt: completionPrompt } = convertToHyperbolicCompletionPrompt({ + prompt, + inputFormat, + }); + + const baseArgs = { + // model id: + model: this.modelId, + models: this.settings.models, + + // model specific settings: + logit_bias: this.settings.logitBias, + logprobs: + typeof this.settings.logprobs === "number" + ? this.settings.logprobs + : typeof this.settings.logprobs === "boolean" + ? this.settings.logprobs + ? 0 + : undefined + : undefined, + suffix: this.settings.suffix, + user: this.settings.user, + + // standardized settings: + max_tokens: maxTokens, + temperature, + top_p: topP, + frequency_penalty: frequencyPenalty, + presence_penalty: presencePenalty, + seed, + + stop: stopSequences, + response_format: responseFormat, + top_k: topK, + + // prompt: + prompt: completionPrompt, + + // Hyperbolic specific settings: + include_reasoning: this.settings.includeReasoning, + reasoning: this.settings.reasoning, + + // extra body: + ...this.config.extraBody, + ...this.settings.extraBody, + ...extraCallingBody, + }; + + switch (type) { + case "regular": { + if (mode.tools?.length) { + throw new UnsupportedFunctionalityError({ + functionality: "tools", + }); + } + + if (mode.toolChoice) { + throw new UnsupportedFunctionalityError({ + functionality: "toolChoice", + }); + } + + return baseArgs; + } + + case "object-json": { + throw new UnsupportedFunctionalityError({ + functionality: "object-json mode", + }); + } + + case "object-tool": { + throw new UnsupportedFunctionalityError({ + functionality: "object-tool mode", + }); + } + + // Handle all non-text types with a single default case + default: { + const _exhaustiveCheck: never = type; + throw new UnsupportedFunctionalityError({ + functionality: `${_exhaustiveCheck} mode`, + }); + } + } + } + + async doGenerate( + options: Parameters[0], + ): Promise>> { + const args = this.getArgs(options); + + const { responseHeaders, value: response } = await postJsonToApi({ + url: this.config.url({ + path: "/completions", + modelId: this.modelId, + }), + headers: combineHeaders(this.config.headers(), options.headers), + body: args, + failedResponseHandler: hyperbolicFailedResponseHandler, + successfulResponseHandler: createJsonResponseHandler(HyperbolicCompletionChunkSchema), + abortSignal: options.abortSignal, + fetch: this.config.fetch, + }); + + const { prompt: rawPrompt, ...rawSettings } = args; + if (isHyperbolicError(response)) { + throw new Error(`${response.message}`); + } + + const choice = response.choices[0]; + + if (!choice) { + throw new Error("No choice in Hyperbolic completion response"); + } + + return { + response: { + id: response.id, + modelId: response.model, + }, + text: choice.text ?? "", + reasoning: choice.reasoning || undefined, + usage: { + promptTokens: response.usage?.prompt_tokens ?? 0, + completionTokens: response.usage?.completion_tokens ?? 0, + }, + finishReason: mapHyperbolicFinishReason(choice.finish_reason), + logprobs: mapHyperbolicCompletionLogProbs(choice.logprobs), + rawCall: { rawPrompt, rawSettings }, + rawResponse: { headers: responseHeaders }, + warnings: [], + }; + } + + async doStream( + options: Parameters[0], + ): Promise>> { + const args = this.getArgs(options); + + const { responseHeaders, value: response } = await postJsonToApi({ + url: this.config.url({ + path: "/completions", + modelId: this.modelId, + }), + headers: combineHeaders(this.config.headers(), options.headers), + body: { + ...this.getArgs(options), + stream: true, + + // only include stream_options when in strict compatibility mode: + stream_options: + this.config.compatibility === "strict" ? { include_usage: true } : undefined, + }, + failedResponseHandler: hyperbolicFailedResponseHandler, + successfulResponseHandler: createEventSourceResponseHandler(HyperbolicCompletionChunkSchema), + abortSignal: options.abortSignal, + fetch: this.config.fetch, + }); + + const { prompt: rawPrompt, ...rawSettings } = args; + + let finishReason: LanguageModelV1FinishReason = "other"; + let usage: { promptTokens: number; completionTokens: number } = { + promptTokens: Number.NaN, + completionTokens: Number.NaN, + }; + let logprobs: LanguageModelV1LogProbs; + + return { + stream: response.pipeThrough( + new TransformStream< + ParseResult>, + LanguageModelV1StreamPart + >({ + transform(chunk, controller) { + // handle failed chunk parsing / validation: + if (!chunk.success) { + finishReason = "error"; + controller.enqueue({ type: "error", error: chunk.error }); + return; + } + + const value = chunk.value; + + // handle error chunks: + if (isHyperbolicError(value)) { + finishReason = "error"; + controller.enqueue({ type: "error", error: value }); + return; + } + + if (value.usage != null) { + usage = { + promptTokens: value.usage.prompt_tokens, + completionTokens: value.usage.completion_tokens, + }; + } + + const choice = value.choices[0]; + + if (choice?.finish_reason != null) { + finishReason = mapHyperbolicFinishReason(choice.finish_reason); + } + + if (choice?.text != null) { + controller.enqueue({ + type: "text-delta", + textDelta: choice.text, + }); + } + + const mappedLogprobs = mapHyperbolicCompletionLogProbs(choice?.logprobs); + if (mappedLogprobs?.length) { + if (logprobs === undefined) logprobs = []; + logprobs.push(...mappedLogprobs); + } + }, + + flush(controller) { + controller.enqueue({ + type: "finish", + finishReason, + logprobs, + usage, + }); + }, + }), + ), + rawCall: { rawPrompt, rawSettings }, + rawResponse: { headers: responseHeaders }, + warnings: [], + }; + } +} + +// limited version of the schema, focussed on what is needed for the implementation +// this approach limits breakages when the API changes and increases efficiency +const HyperbolicCompletionChunkSchema = z.union([ + z.object({ + id: z.string().optional(), + model: z.string().optional(), + choices: z.array( + z.object({ + text: z.string(), + reasoning: z.string().nullish().optional(), + finish_reason: z.string().nullish(), + index: z.number(), + logprobs: z + .object({ + tokens: z.array(z.string()), + token_logprobs: z.array(z.number()), + top_logprobs: z.array(z.record(z.string(), z.number())).nullable(), + }) + .nullable() + .optional(), + }), + ), + usage: z + .object({ + prompt_tokens: z.number(), + completion_tokens: z.number(), + }) + .optional() + .nullable(), + }), + HyperbolicErrorResponseSchema, +]); diff --git a/packages/ai-sdk-provider-2/src/hyperbolic-completion-settings.ts b/packages/ai-sdk-provider-2/src/hyperbolic-completion-settings.ts new file mode 100644 index 0000000..efc31a0 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/hyperbolic-completion-settings.ts @@ -0,0 +1,42 @@ +// Modified by Hyperbolic Labs, Inc. on 2025-03-25 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + +import type { HyperbolicSharedSettings } from "./types"; + +export type HyperbolicCompletionModelId = string; + +export type HyperbolicCompletionSettings = { + /** + * Modify the likelihood of specified tokens appearing in the completion. + * + * Accepts a JSON object that maps tokens (specified by their token ID in + * the GPT tokenizer) to an associated bias value from -100 to 100. You + * can use this tokenizer tool to convert text to token IDs. Mathematically, + * the bias is added to the logits generated by the model prior to sampling. + * The exact effect will vary per model, but values between -1 and 1 should + * decrease or increase likelihood of selection; values like -100 or 100 + * should result in a ban or exclusive selection of the relevant token. + * As an example, you can pass {"50256": -100} to prevent the <|endoftext|> + * token from being generated. + */ + logitBias?: Record; + + /** + * Return the log probabilities of the tokens. Including logprobs will increase + * the response size and can slow down response times. However, it can + * be useful to better understand how the model is behaving. + * + * Setting to true will return the log probabilities of the tokens that + * were generated. + * + * Setting to a number will return the log probabilities of the top n + * tokens that were generated. + */ + logprobs?: boolean | number; + + /** + * The suffix that comes after a completion of inserted text. + */ + suffix?: string; +} & HyperbolicSharedSettings; diff --git a/packages/ai-sdk-provider-2/src/hyperbolic-error.ts b/packages/ai-sdk-provider-2/src/hyperbolic-error.ts new file mode 100644 index 0000000..60eed7e --- /dev/null +++ b/packages/ai-sdk-provider-2/src/hyperbolic-error.ts @@ -0,0 +1,49 @@ +// Modified by Hyperbolic Labs, Inc. on 2025-03-25 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + +import type { TypeValidationError } from "ai"; +import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils"; +import { JSONParseError } from "ai"; +import { z } from "zod"; + +export const HyperbolicErrorResponseSchema = z.object({ + object: z.literal("error"), + message: z.string(), + type: z.string(), + param: z.any().nullable(), + code: z.coerce.number().nullable(), +}); + +// eslint-disable-next-line @typescript-eslint/no-explicit-any +export const isHyperbolicError = (data: any): data is HyperbolicErrorData => { + return "object" in data && data.object === "error"; +}; + +export type HyperbolicErrorData = z.infer; + +export const hyperbolicFailedResponseHandler = createJsonErrorResponseHandler({ + errorSchema: HyperbolicErrorResponseSchema, + errorToMessage: (data) => data.message, +}); + +/** + * Error messages from the API are sometimes an ugly combo of text and JSON in a single chunk. Extract data from error message if it contains JSON + */ +export const tryParsingHyperbolicError = (error: JSONParseError | TypeValidationError) => { + if (!JSONParseError.isInstance(error)) { + return undefined; + } + + const jsonMatch = error.text.match(/\{.*\}/); // Match between brackets + if (jsonMatch) { + try { + const parsedErrorJson = JSON.parse(jsonMatch[0]); + if (parsedErrorJson.message) { + return HyperbolicErrorResponseSchema.parse(parsedErrorJson); + } + } catch { + return undefined; + } + } +}; diff --git a/packages/ai-sdk-provider-2/src/hyperbolic-image-language-model.ts b/packages/ai-sdk-provider-2/src/hyperbolic-image-language-model.ts new file mode 100644 index 0000000..da5501d --- /dev/null +++ b/packages/ai-sdk-provider-2/src/hyperbolic-image-language-model.ts @@ -0,0 +1,130 @@ +// Modified by Hyperbolic Labs, Inc. on 2025-03-25 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + +import type { ImageModelV1, ImageModelV1CallWarning } from "@ai-sdk/provider"; +import { combineHeaders, createJsonResponseHandler, postJsonToApi } from "@ai-sdk/provider-utils"; +import { z } from "zod"; + +import type { + HyperbolicImageModelId, + HyperbolicImageProviderOptions, + HyperbolicImageProviderResponseMetadata, + HyperbolicImageSettings, +} from "./hyperbolic-image-settings"; +import { hyperbolicFailedResponseHandler } from "./hyperbolic-error"; + +type HyperbolicImageModelConfig = { + provider: string; + compatibility: "strict" | "compatible"; + headers: () => Record; + url: (options: { modelId: string; path: string }) => string; + fetch?: typeof fetch; + extraBody?: Record; +}; + +export class HyperbolicImageModel implements ImageModelV1 { + readonly specificationVersion = "v1"; + readonly provider = "hyperbolic.image"; + + get maxImagesPerCall(): number { + return this.settings.maxImagesPerCall ?? 1; + } + + constructor( + readonly modelId: HyperbolicImageModelId, + private readonly settings: HyperbolicImageSettings, + private readonly config: HyperbolicImageModelConfig, + ) {} + + async doGenerate( + options: Omit[0], "providerOptions"> & { + providerOptions: { + hyperbolic?: HyperbolicImageProviderOptions; + }; + }, + ): Promise< + Omit>, "response"> & { + response: Awaited>["response"] & { + hyperbolic: HyperbolicImageProviderResponseMetadata; + }; + } + > { + const warnings: Array = []; + const [width, height] = options.size ? options.size.split("x").map(Number) : []; + + const args = { + prompt: options.prompt, + height, + width, + cfg_scale: options.providerOptions?.hyperbolic?.cfgScale, + enable_refiner: options.providerOptions?.hyperbolic?.enableRefiner, + model_name: this.modelId, + negative_prompt: options.providerOptions?.hyperbolic?.negativePrompt, + steps: options.providerOptions?.hyperbolic?.steps, + strength: options.providerOptions?.hyperbolic?.strength, + image: options.providerOptions?.hyperbolic?.image, + }; + + if (options.aspectRatio != undefined) { + warnings.push({ + type: "unsupported-setting", + setting: "aspectRatio", + details: "This model does not support `aspectRatio`. Use `size` instead.", + }); + } + if (options.seed != undefined) { + warnings.push({ + type: "unsupported-setting", + setting: "seed", + details: "This model does not support `seed`.", + }); + } + if (options.n != undefined) { + warnings.push({ + type: "unsupported-setting", + setting: "n", + details: "This model does not support `n`.", + }); + } + + const { value: response, responseHeaders } = await postJsonToApi({ + url: this.config.url({ + path: "/image/generation", + modelId: this.modelId, + }), + headers: combineHeaders(this.config.headers(), options.headers), + body: args, + failedResponseHandler: hyperbolicFailedResponseHandler, + successfulResponseHandler: createJsonResponseHandler(hyperbolicImageResponseSchema), + abortSignal: options.abortSignal, + fetch: this.config.fetch, + }); + + return { + images: response.images.map((image) => image.image), + warnings, + response: { + timestamp: new Date(), + modelId: this.modelId, + headers: responseHeaders, + hyperbolic: { + inferenceTime: response.inference_time, + randomSeeds: response.images.map((image) => image.random_seed), + }, + }, + }; + } +} + +// minimal version of the schema, focussed on what is needed for the implementation to avoid breaking changes +const hyperbolicImageResponseSchema = z.object({ + images: z.array( + z.object({ + image: z.string(), + index: z.number(), + random_seed: z.number(), + }), + ), + inference_time: z.number(), +}); diff --git a/packages/ai-sdk-provider-2/src/hyperbolic-image-settings.ts b/packages/ai-sdk-provider-2/src/hyperbolic-image-settings.ts new file mode 100644 index 0000000..9399faf --- /dev/null +++ b/packages/ai-sdk-provider-2/src/hyperbolic-image-settings.ts @@ -0,0 +1,40 @@ +// Modified by Hyperbolic Labs, Inc. on 2025-03-25 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + +import type { Experimental_GenerateImageResult } from "ai"; + +import type { HyperbolicSharedSettings } from "./types"; + +export type HyperbolicImageModelId = string; + +export type HyperbolicImageSettings = { + /** + * Override the maximum number of images per call (default is dependent on the + * model, or 1 for an unknown model). + */ + maxImagesPerCall?: number; +} & HyperbolicSharedSettings; + +export type HyperbolicImageProviderOptions = { + cfgScale?: number; + negativePrompt?: string; + steps?: number; + strength?: number; + enableRefiner?: boolean; + image?: string; +}; + +export type HyperbolicImageProviderResponseMetadata = { + inferenceTime: number; + randomSeeds: number[]; +}; + +export type Experimental_HyperbolicGenerateImageResult = Omit< + Experimental_GenerateImageResult, + "responses" +> & { + responses: (Experimental_GenerateImageResult["responses"][number] & { + hyperbolic: HyperbolicImageProviderResponseMetadata; + })[]; +}; diff --git a/packages/ai-sdk-provider-2/src/hyperbolic-provider-options.test.ts b/packages/ai-sdk-provider-2/src/hyperbolic-provider-options.test.ts new file mode 100644 index 0000000..b1db769 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/hyperbolic-provider-options.test.ts @@ -0,0 +1,64 @@ +// Modified by Hyperbolic Labs, Inc. on 2025-03-25 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + +import type { LanguageModelV1Prompt } from "@ai-sdk/provider"; +import { createTestServer } from "@ai-sdk/provider-utils/test"; +import { streamText } from "ai"; +import { describe, expect, it, vi } from "vitest"; + +import { createHyperbolic } from "./hyperbolic-provider"; + +// Add type assertions for the mocked classes +const TEST_MESSAGES: LanguageModelV1Prompt = [ + { role: "user", content: [{ type: "text", text: "Hello" }] }, +]; + +describe("providerOptions", () => { + const server = createTestServer({ + "https://api.hyperbolic.xyz/v1/chat/completions": { + response: { + type: "stream-chunks", + chunks: [], + }, + }, + }); + + beforeEach(() => { + vi.clearAllMocks(); + }); + + it("should set providerOptions hyperbolic to extra body", async () => { + const hyperbolic = createHyperbolic({ + apiKey: "test", + }); + const model = hyperbolic("Qwen/Qwen2.5-72B-Instruct"); + + await streamText({ + model, + messages: TEST_MESSAGES, + providerOptions: { + hyperbolic: { + reasoning: { + max_tokens: 1000, + }, + }, + }, + }).consumeStream(); + + expect(await server.calls[0]?.requestBody).toStrictEqual({ + messages: [ + { + content: "Hello", + role: "user", + }, + ], + reasoning: { + max_tokens: 1000, + }, + temperature: 0, + model: "Qwen/Qwen2.5-72B-Instruct", + stream: true, + }); + }); +}); diff --git a/packages/ai-sdk-provider-2/src/hyperbolic-provider.ts b/packages/ai-sdk-provider-2/src/hyperbolic-provider.ts new file mode 100644 index 0000000..e878dcf --- /dev/null +++ b/packages/ai-sdk-provider-2/src/hyperbolic-provider.ts @@ -0,0 +1,180 @@ +// Modified by Hyperbolic Labs, Inc. on 2025-03-25 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + +import { loadApiKey, withoutTrailingSlash } from "@ai-sdk/provider-utils"; + +import type { HyperbolicChatModelId, HyperbolicChatSettings } from "./hyperbolic-chat-settings"; +import type { + HyperbolicCompletionModelId, + HyperbolicCompletionSettings, +} from "./hyperbolic-completion-settings"; +import type { HyperbolicImageModelId, HyperbolicImageSettings } from "./hyperbolic-image-settings"; +import { HyperbolicChatLanguageModel } from "./hyperbolic-chat-language-model"; +import { HyperbolicCompletionLanguageModel } from "./hyperbolic-completion-language-model"; +import { HyperbolicImageModel } from "./hyperbolic-image-language-model"; + +export type { HyperbolicCompletionSettings }; + +export interface HyperbolicProvider { + ( + modelId: HyperbolicChatModelId, + settings?: HyperbolicCompletionSettings, + ): HyperbolicCompletionLanguageModel; + (modelId: HyperbolicChatModelId, settings?: HyperbolicChatSettings): HyperbolicChatLanguageModel; + + languageModel( + modelId: HyperbolicChatModelId, + settings?: HyperbolicCompletionSettings, + ): HyperbolicCompletionLanguageModel; + languageModel( + modelId: HyperbolicChatModelId, + settings?: HyperbolicChatSettings, + ): HyperbolicChatLanguageModel; + + /** + * Creates a Hyperbolic chat model for text generation. + */ + chat( + modelId: HyperbolicChatModelId, + settings?: HyperbolicChatSettings, + ): HyperbolicChatLanguageModel; + + /** + * Creates a Hyperbolic completion model for text generation. + */ + completion( + modelId: HyperbolicCompletionModelId, + settings?: HyperbolicCompletionSettings, + ): HyperbolicCompletionLanguageModel; + + /** + * Creates a Hyperbolic image model for image generation. + */ + image(modelId: HyperbolicImageModelId, settings?: HyperbolicImageSettings): HyperbolicImageModel; +} + +export interface HyperbolicProviderSettings { + /** + * Base URL for the Hyperbolic API calls. + */ + baseURL?: string; + + /** + * @deprecated Use `baseURL` instead. + */ + baseUrl?: string; + + /** + * API key for authenticating requests. + */ + apiKey?: string; + + /** + * Custom headers to include in the requests. + */ + headers?: Record; + + /** + * Hyperbolic compatibility mode. Should be set to `strict` when using the Hyperbolic API, + * and `compatible` when using 3rd party providers. In `compatible` mode, newer + * information such as streamOptions are not being sent. Defaults to 'compatible'. + */ + compatibility?: "strict" | "compatible"; + + /** + * Custom fetch implementation. You can use it as a middleware to intercept requests, + * or to provide a custom fetch implementation for e.g. testing. + */ + fetch?: typeof fetch; + + /** + * A JSON object to send as the request body to access Hyperbolic features & upstream provider features. + */ + extraBody?: Record; +} + +/** + * Create an Hyperbolic provider instance. + */ +export function createHyperbolic(options: HyperbolicProviderSettings = {}): HyperbolicProvider { + const baseURL = + withoutTrailingSlash(options.baseURL ?? options.baseUrl) ?? "https://api.hyperbolic.xyz/v1"; + + // we default to compatible, because strict breaks providers like Groq: + const compatibility = options.compatibility ?? "compatible"; + + const getHeaders = () => ({ + Authorization: `Bearer ${loadApiKey({ + apiKey: options.apiKey, + environmentVariableName: "HYPERBOLIC_API_KEY", + description: "Hyperbolic", + })}`, + ...options.headers, + }); + + const createChatModel = (modelId: HyperbolicChatModelId, settings: HyperbolicChatSettings = {}) => + new HyperbolicChatLanguageModel(modelId, settings, { + provider: "hyperbolic.chat", + url: ({ path }) => `${baseURL}${path}`, + headers: getHeaders, + compatibility, + fetch: options.fetch, + extraBody: options.extraBody, + }); + + const createCompletionModel = ( + modelId: HyperbolicCompletionModelId, + settings: HyperbolicCompletionSettings = {}, + ) => + new HyperbolicCompletionLanguageModel(modelId, settings, { + provider: "hyperbolic.completion", + url: ({ path }) => `${baseURL}${path}`, + headers: getHeaders, + compatibility, + fetch: options.fetch, + extraBody: options.extraBody, + }); + + const createImageModel = ( + modelId: HyperbolicImageModelId, + settings: HyperbolicImageSettings = {}, + ) => + new HyperbolicImageModel(modelId, settings, { + provider: "hyperbolic.image", + url: ({ path }) => `${baseURL}${path}`, + headers: getHeaders, + compatibility, + fetch: options.fetch, + extraBody: options.extraBody, + }); + + const createLanguageModel = ( + modelId: HyperbolicChatModelId | HyperbolicCompletionModelId, + settings?: HyperbolicChatSettings | HyperbolicCompletionSettings, + ) => { + if (new.target) { + throw new Error("The Hyperbolic model function cannot be called with the new keyword."); + } + + if (modelId === "openai/gpt-3.5-turbo-instruct") { + return createCompletionModel(modelId, settings as HyperbolicCompletionSettings); + } + + return createChatModel(modelId, settings as HyperbolicChatSettings); + }; + + const provider = function ( + modelId: HyperbolicChatModelId | HyperbolicCompletionModelId, + settings?: HyperbolicChatSettings | HyperbolicCompletionSettings, + ) { + return createLanguageModel(modelId, settings); + }; + + provider.languageModel = createLanguageModel; + provider.chat = createChatModel; + provider.completion = createCompletionModel; + provider.image = createImageModel; + + return provider as HyperbolicProvider; +} diff --git a/packages/ai-sdk-provider-2/src/index.ts b/packages/ai-sdk-provider-2/src/index.ts new file mode 100644 index 0000000..0180b06 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/index.ts @@ -0,0 +1,3 @@ +export * from "./hyperbolic-provider"; +export * from "./types"; +export * from "./hyperbolic-error"; diff --git a/packages/ai-sdk-provider-2/src/internal/index.ts b/packages/ai-sdk-provider-2/src/internal/index.ts new file mode 100644 index 0000000..c9936d2 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/internal/index.ts @@ -0,0 +1,7 @@ +export * from "../hyperbolic-chat-language-model"; +export * from "../hyperbolic-chat-settings"; +export * from "../hyperbolic-completion-language-model"; +export * from "../hyperbolic-completion-settings"; +export * from "../hyperbolic-image-language-model"; +export * from "../hyperbolic-image-settings"; +export * from "../types"; diff --git a/packages/ai-sdk-provider-2/src/map-hyperbolic-chat-logprobs.ts b/packages/ai-sdk-provider-2/src/map-hyperbolic-chat-logprobs.ts new file mode 100644 index 0000000..f325b88 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/map-hyperbolic-chat-logprobs.ts @@ -0,0 +1,37 @@ +// Modified by Hyperbolic Labs, Inc. on 2025-03-25 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + +import type { LanguageModelV1LogProbs } from "@ai-sdk/provider"; + +type HyperbolicChatLogProbs = { + content: + | { + token: string; + logprob: number; + top_logprobs: + | { + token: string; + logprob: number; + }[] + | null; + }[] + | null; +}; + +export function mapHyperbolicChatLogProbsOutput( + logprobs: HyperbolicChatLogProbs | null | undefined, +): LanguageModelV1LogProbs | undefined { + return ( + logprobs?.content?.map(({ token, logprob, top_logprobs }) => ({ + token, + logprob, + topLogprobs: top_logprobs + ? top_logprobs.map(({ token, logprob }) => ({ + token, + logprob, + })) + : [], + })) ?? undefined + ); +} diff --git a/packages/ai-sdk-provider-2/src/map-hyperbolic-completion-logprobs.ts b/packages/ai-sdk-provider-2/src/map-hyperbolic-completion-logprobs.ts new file mode 100644 index 0000000..121d731 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/map-hyperbolic-completion-logprobs.ts @@ -0,0 +1,24 @@ +// Modified by Hyperbolic Labs, Inc. on 2025-03-25 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + +type HyperbolicCompletionLogProps = { + tokens: string[]; + token_logprobs: number[]; + top_logprobs: Record[] | null; +}; + +export function mapHyperbolicCompletionLogProbs( + logprobs: HyperbolicCompletionLogProps | null | undefined, +) { + return logprobs?.tokens.map((token, index) => ({ + token, + logprob: logprobs.token_logprobs[index] ?? 0, + topLogprobs: logprobs.top_logprobs + ? Object.entries(logprobs.top_logprobs[index] ?? {}).map(([token, logprob]) => ({ + token, + logprob, + })) + : [], + })); +} diff --git a/packages/ai-sdk-provider-2/src/map-hyperbolic-finish-reason.ts b/packages/ai-sdk-provider-2/src/map-hyperbolic-finish-reason.ts new file mode 100644 index 0000000..5763ff8 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/map-hyperbolic-finish-reason.ts @@ -0,0 +1,23 @@ +// Modified by Hyperbolic Labs, Inc. on 2025-03-25 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + +import type { LanguageModelV1FinishReason } from "@ai-sdk/provider"; + +export function mapHyperbolicFinishReason( + finishReason: string | null | undefined, +): LanguageModelV1FinishReason { + switch (finishReason) { + case "stop": + return "stop"; + case "length": + return "length"; + case "content_filter": + return "content-filter"; + case "function_call": + case "tool_calls": + return "tool-calls"; + default: + return "unknown"; + } +} diff --git a/packages/ai-sdk-provider-2/src/scripts/templates/models.ts.hbs b/packages/ai-sdk-provider-2/src/scripts/templates/models.ts.hbs new file mode 100644 index 0000000..4eefd94 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/scripts/templates/models.ts.hbs @@ -0,0 +1,32 @@ +// prettier-ignore + +// This file is auto-generated by the pnpm codegen:update-models script. Do not edit manually. + +const _models = [ +{{#each modelId}} + "{{this}}", +{{/each}} +] as const; + +const _imageModels = [ +{{#each imageModelId}} + "{{this}}", +{{/each}} +] as const; + +const _chatModels = [ +{{#each chatModelId}} + "{{this}}", +{{/each}} +] as const; + +const _completionModels = [ +{{#each completionModelId}} + "{{this}}", +{{/each}} +] as const; + +export type HyperbolicImageModelId = (typeof _imageModels)[number] | (string & {}); +export type HyperbolicChatModelId = (typeof _chatModels)[number] | (string & {}); +export type HyperbolicCompletionModelId = (typeof _completionModels)[number] | (string & {}); +export type HyperbolicModelId = HyperbolicImageModelId | HyperbolicChatModelId | HyperbolicCompletionModelId; diff --git a/packages/ai-sdk-provider-2/src/scripts/update-models-list.ts b/packages/ai-sdk-provider-2/src/scripts/update-models-list.ts new file mode 100644 index 0000000..52aea76 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/scripts/update-models-list.ts @@ -0,0 +1,31 @@ +import "@hyperbolic/api"; + +import { readFileSync, writeFileSync } from "fs"; +import path from "path"; +import { fileURLToPath } from "url"; +import Handlebars from "handlebars"; + +import { hyperbolicClient, showModelsV1ModelsGet } from "@hyperbolic/api"; + +/** + * Generates the list of models supported by Hyperbolic for the AI SDK Provider. + */ +const main = async () => { + const { + data: { data }, + } = await showModelsV1ModelsGet({ client: hyperbolicClient, throwOnError: true }); + + const models = data as { id: string; [key: string]: unknown }[]; + const modelIds = models.map((model) => model.id); + + const __dirname = path.dirname(fileURLToPath(import.meta.url)); + const templatePath = path.join(__dirname, "templates", "models.ts.hbs"); + const templateContent = readFileSync(templatePath, "utf-8"); + const template = Handlebars.compile(templateContent); + + const output = template({ modelId: modelIds }); + + writeFileSync(new URL("../__generated__/models.gen.ts", import.meta.url), output); +}; + +main(); diff --git a/packages/ai-sdk-provider-2/src/types.ts b/packages/ai-sdk-provider-2/src/types.ts new file mode 100644 index 0000000..d0d0c8a --- /dev/null +++ b/packages/ai-sdk-provider-2/src/types.ts @@ -0,0 +1,47 @@ +// Modified by Hyperbolic Labs, Inc. on 2025-03-25 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + +import type { LanguageModelV1 } from "@ai-sdk/provider"; + +// Re-export the LanguageModelV1 type to ensure proper type compatibility +export type { LanguageModelV1 }; + +// Export our model types with explicit type constraints +export type HyperbolicLanguageModel = LanguageModelV1; + +export type HyperbolicProviderOptions = { + models?: string[]; + + /** + * https://openrouter.ai/docs/use-cases/reasoning-tokens + * One of `max_tokens` or `effort` is required. + * If `exclude` is true, reasoning will be removed from the response. Default is false. + */ + reasoning?: { + exclude?: boolean; + } & ( + | { + max_tokens: number; + } + | { + effort: "high" | "medium" | "low"; + } + ); + + /** + * A unique identifier representing your end-user, which can + * help Hyperbolic to monitor and detect abuse. + */ + user?: string; +}; + +export type HyperbolicSharedSettings = HyperbolicProviderOptions & { + /** + * @deprecated use `reasoning` instead + */ + includeReasoning?: boolean; + + // eslint-disable-next-line @typescript-eslint/no-explicit-any + extraBody?: Record; +}; diff --git a/packages/ai-sdk-provider-2/tsconfig.json b/packages/ai-sdk-provider-2/tsconfig.json new file mode 100644 index 0000000..61a17f8 --- /dev/null +++ b/packages/ai-sdk-provider-2/tsconfig.json @@ -0,0 +1,5 @@ +{ + "extends": "@hyperbolic/tsconfig/external-package.json", + "include": ["."], + "exclude": ["*/dist", "dist", "build", "node_modules"] +} diff --git a/packages/ai-sdk-provider-2/tsup.config.ts b/packages/ai-sdk-provider-2/tsup.config.ts new file mode 100644 index 0000000..cded0e2 --- /dev/null +++ b/packages/ai-sdk-provider-2/tsup.config.ts @@ -0,0 +1,17 @@ +import { defineConfig } from "tsup"; + +export default defineConfig([ + { + entry: ["src/index.ts"], + format: ["cjs", "esm"], + dts: true, + sourcemap: true, + }, + { + entry: ["src/internal/index.ts"], + outDir: "dist/internal", + format: ["cjs", "esm"], + dts: true, + sourcemap: true, + }, +]); diff --git a/packages/ai-sdk-provider-2/turbo.json b/packages/ai-sdk-provider-2/turbo.json new file mode 100644 index 0000000..7dbd533 --- /dev/null +++ b/packages/ai-sdk-provider-2/turbo.json @@ -0,0 +1,10 @@ +{ + "$schema": "https://turborepo.org/schema.json", + "extends": ["//"], + "tasks": { + "build": { + "dependsOn": ["^build"], + "outputs": ["dist/**"] + } + } +} diff --git a/packages/ai-sdk-provider-2/vitest.config.mts b/packages/ai-sdk-provider-2/vitest.config.mts new file mode 100644 index 0000000..47e4ab5 --- /dev/null +++ b/packages/ai-sdk-provider-2/vitest.config.mts @@ -0,0 +1,10 @@ +import tsconfigPaths from "vite-tsconfig-paths"; +import { configDefaults, defineConfig } from "vitest/config"; + +export default defineConfig({ + test: { + exclude: [...configDefaults.exclude, "**/node_modules/**", "**/fixtures/**", "**/templates/**"], + globals: true, + include: ["**/*.test.ts", "**/*.test.tsx"], + }, +}); diff --git a/packages/ai-sdk-provider-2/vitest.edge.config.ts b/packages/ai-sdk-provider-2/vitest.edge.config.ts new file mode 100644 index 0000000..3f8327a --- /dev/null +++ b/packages/ai-sdk-provider-2/vitest.edge.config.ts @@ -0,0 +1,10 @@ +import { defineConfig } from "vitest/config"; + +// https://vitejs.dev/config/ +export default defineConfig({ + test: { + environment: "edge-runtime", + globals: true, + include: ["**/*.test.ts", "**/*.test.tsx"], + }, +}); diff --git a/packages/ai-sdk-provider-2/vitest.node.config.ts b/packages/ai-sdk-provider-2/vitest.node.config.ts new file mode 100644 index 0000000..da67c22 --- /dev/null +++ b/packages/ai-sdk-provider-2/vitest.node.config.ts @@ -0,0 +1,10 @@ +import { defineConfig } from "vitest/config"; + +// https://vitejs.dev/config/ +export default defineConfig({ + test: { + environment: "node", + globals: true, + include: ["**/*.test.ts", "**/*.test.tsx"], + }, +}); From da9fcda03c417b63f7eb031d04981a98a7428d62 Mon Sep 17 00:00:00 2001 From: Connor Chevli Date: Thu, 22 Jan 2026 17:07:59 -0800 Subject: [PATCH 02/22] update deps --- package.json | 2 +- packages/ai-sdk-provider-2/package.json | 19 +- ...onvert-to-openrouter-chat-messages.test.ts | 947 +++++++ .../convert-to-openrouter-chat-messages.ts | 303 +++ .../ai-sdk-provider-2/src/chat/errors.test.ts | 93 + .../src/chat/file-parser-schema.test.ts | 112 + .../src/chat/file-url-utils.ts | 150 ++ .../src/chat/get-tool-choice.ts | 42 + .../ai-sdk-provider-2/src/chat/index.test.ts | 2280 +++++++++++++++++ packages/ai-sdk-provider-2/src/chat/index.ts | 1100 ++++++++ packages/ai-sdk-provider-2/src/chat/is-url.ts | 15 + .../src/chat/large-pdf-response.test.ts | 104 + .../src/chat/payload-comparison.test.ts | 154 ++ .../ai-sdk-provider-2/src/chat/schemas.ts | 285 +++ ...convert-to-openrouter-completion-prompt.ts | 150 ++ .../src/completion/index.test.ts | 598 +++++ .../ai-sdk-provider-2/src/completion/index.ts | 374 +++ .../src/completion/schemas.ts | 65 + ...onvert-to-hyperbolic-chat-messages.test.ts | 437 ---- .../convert-to-hyperbolic-chat-messages.ts | 165 -- ...convert-to-hyperbolic-completion-prompt.ts | 134 - .../src/embedding/index.test.ts | 253 ++ .../ai-sdk-provider-2/src/embedding/index.ts | 108 + .../src/embedding/schemas.ts | 25 + packages/ai-sdk-provider-2/src/facade.ts | 117 + .../hyperbolic-chat-language-model.test.ts | 990 ------- .../src/hyperbolic-chat-language-model.ts | 659 ----- .../src/hyperbolic-chat-prompt.ts | 67 - .../src/hyperbolic-chat-settings.ts | 50 - ...perbolic-completion-language-model.test.ts | 496 ---- .../hyperbolic-completion-language-model.ts | 352 --- .../src/hyperbolic-completion-settings.ts | 42 - .../ai-sdk-provider-2/src/hyperbolic-error.ts | 49 - .../src/hyperbolic-image-language-model.ts | 130 - .../src/hyperbolic-image-settings.ts | 40 - .../src/hyperbolic-provider-options.test.ts | 64 - .../src/hyperbolic-provider.ts | 180 -- packages/ai-sdk-provider-2/src/index.ts | 6 +- .../src/internal copy/index.ts | 5 + .../ai-sdk-provider-2/src/internal/index.ts | 7 - .../src/map-hyperbolic-chat-logprobs.ts | 37 - .../src/map-hyperbolic-completion-logprobs.ts | 24 - .../src/map-hyperbolic-finish-reason.ts | 23 - packages/ai-sdk-provider-2/src/provider.ts | 230 ++ .../src/schemas/error-response.test.ts | 52 + .../src/schemas/error-response.ts | 32 + .../ai-sdk-provider-2/src/schemas/format.ts | 11 + .../ai-sdk-provider-2/src/schemas/image.ts | 23 + .../src/schemas/provider-metadata.ts | 84 + .../src/schemas/reasoning-details.ts | 92 + .../src/test-utils/test-server.ts | 148 ++ .../src/tests/provider-options.test.ts | 59 + .../src/tests/stream-usage-accounting.test.ts | 177 ++ .../src/tests/usage-accounting.test.ts | 326 +++ packages/ai-sdk-provider-2/src/types.ts | 47 - packages/ai-sdk-provider-2/src/types/index.ts | 72 + .../openrouter-chat-completions-input.ts | 106 + .../src/types/openrouter-chat-settings.ts | 158 ++ .../types/openrouter-completion-settings.ts | 39 + .../types/openrouter-embedding-settings.ts | 56 + .../src/utils/map-finish-reason.ts | 43 + .../src/utils/remove-undefined.ts | 12 + .../src/utils/type-guards.ts | 6 + .../src/utils/with-user-agent-suffix.ts | 30 + packages/ai-sdk-provider-2/src/version.ts | 4 + packages/ai-sdk-provider-2/tsup.config.ts | 9 + packages/ai-sdk-provider/package.json | 10 +- pnpm-lock.yaml | 1095 ++++---- pnpm-workspace.yaml | 2 +- 69 files changed, 9501 insertions(+), 4675 deletions(-) create mode 100644 packages/ai-sdk-provider-2/src/chat/convert-to-openrouter-chat-messages.test.ts create mode 100644 packages/ai-sdk-provider-2/src/chat/convert-to-openrouter-chat-messages.ts create mode 100644 packages/ai-sdk-provider-2/src/chat/errors.test.ts create mode 100644 packages/ai-sdk-provider-2/src/chat/file-parser-schema.test.ts create mode 100644 packages/ai-sdk-provider-2/src/chat/file-url-utils.ts create mode 100644 packages/ai-sdk-provider-2/src/chat/get-tool-choice.ts create mode 100644 packages/ai-sdk-provider-2/src/chat/index.test.ts create mode 100644 packages/ai-sdk-provider-2/src/chat/index.ts create mode 100644 packages/ai-sdk-provider-2/src/chat/is-url.ts create mode 100644 packages/ai-sdk-provider-2/src/chat/large-pdf-response.test.ts create mode 100644 packages/ai-sdk-provider-2/src/chat/payload-comparison.test.ts create mode 100644 packages/ai-sdk-provider-2/src/chat/schemas.ts create mode 100644 packages/ai-sdk-provider-2/src/completion/convert-to-openrouter-completion-prompt.ts create mode 100644 packages/ai-sdk-provider-2/src/completion/index.test.ts create mode 100644 packages/ai-sdk-provider-2/src/completion/index.ts create mode 100644 packages/ai-sdk-provider-2/src/completion/schemas.ts delete mode 100644 packages/ai-sdk-provider-2/src/convert-to-hyperbolic-chat-messages.test.ts delete mode 100644 packages/ai-sdk-provider-2/src/convert-to-hyperbolic-chat-messages.ts delete mode 100644 packages/ai-sdk-provider-2/src/convert-to-hyperbolic-completion-prompt.ts create mode 100644 packages/ai-sdk-provider-2/src/embedding/index.test.ts create mode 100644 packages/ai-sdk-provider-2/src/embedding/index.ts create mode 100644 packages/ai-sdk-provider-2/src/embedding/schemas.ts create mode 100644 packages/ai-sdk-provider-2/src/facade.ts delete mode 100644 packages/ai-sdk-provider-2/src/hyperbolic-chat-language-model.test.ts delete mode 100644 packages/ai-sdk-provider-2/src/hyperbolic-chat-language-model.ts delete mode 100644 packages/ai-sdk-provider-2/src/hyperbolic-chat-prompt.ts delete mode 100644 packages/ai-sdk-provider-2/src/hyperbolic-chat-settings.ts delete mode 100644 packages/ai-sdk-provider-2/src/hyperbolic-completion-language-model.test.ts delete mode 100644 packages/ai-sdk-provider-2/src/hyperbolic-completion-language-model.ts delete mode 100644 packages/ai-sdk-provider-2/src/hyperbolic-completion-settings.ts delete mode 100644 packages/ai-sdk-provider-2/src/hyperbolic-error.ts delete mode 100644 packages/ai-sdk-provider-2/src/hyperbolic-image-language-model.ts delete mode 100644 packages/ai-sdk-provider-2/src/hyperbolic-image-settings.ts delete mode 100644 packages/ai-sdk-provider-2/src/hyperbolic-provider-options.test.ts delete mode 100644 packages/ai-sdk-provider-2/src/hyperbolic-provider.ts create mode 100644 packages/ai-sdk-provider-2/src/internal copy/index.ts delete mode 100644 packages/ai-sdk-provider-2/src/internal/index.ts delete mode 100644 packages/ai-sdk-provider-2/src/map-hyperbolic-chat-logprobs.ts delete mode 100644 packages/ai-sdk-provider-2/src/map-hyperbolic-completion-logprobs.ts delete mode 100644 packages/ai-sdk-provider-2/src/map-hyperbolic-finish-reason.ts create mode 100644 packages/ai-sdk-provider-2/src/provider.ts create mode 100644 packages/ai-sdk-provider-2/src/schemas/error-response.test.ts create mode 100644 packages/ai-sdk-provider-2/src/schemas/error-response.ts create mode 100644 packages/ai-sdk-provider-2/src/schemas/format.ts create mode 100644 packages/ai-sdk-provider-2/src/schemas/image.ts create mode 100644 packages/ai-sdk-provider-2/src/schemas/provider-metadata.ts create mode 100644 packages/ai-sdk-provider-2/src/schemas/reasoning-details.ts create mode 100644 packages/ai-sdk-provider-2/src/test-utils/test-server.ts create mode 100644 packages/ai-sdk-provider-2/src/tests/provider-options.test.ts create mode 100644 packages/ai-sdk-provider-2/src/tests/stream-usage-accounting.test.ts create mode 100644 packages/ai-sdk-provider-2/src/tests/usage-accounting.test.ts delete mode 100644 packages/ai-sdk-provider-2/src/types.ts create mode 100644 packages/ai-sdk-provider-2/src/types/index.ts create mode 100644 packages/ai-sdk-provider-2/src/types/openrouter-chat-completions-input.ts create mode 100644 packages/ai-sdk-provider-2/src/types/openrouter-chat-settings.ts create mode 100644 packages/ai-sdk-provider-2/src/types/openrouter-completion-settings.ts create mode 100644 packages/ai-sdk-provider-2/src/types/openrouter-embedding-settings.ts create mode 100644 packages/ai-sdk-provider-2/src/utils/map-finish-reason.ts create mode 100644 packages/ai-sdk-provider-2/src/utils/remove-undefined.ts create mode 100644 packages/ai-sdk-provider-2/src/utils/type-guards.ts create mode 100644 packages/ai-sdk-provider-2/src/utils/with-user-agent-suffix.ts create mode 100644 packages/ai-sdk-provider-2/src/version.ts diff --git a/package.json b/package.json index a507b92..005fe6b 100644 --- a/package.json +++ b/package.json @@ -43,7 +43,7 @@ "turbo": "^2.3.4", "typescript": "catalog:", "vite-tsconfig-paths": "^5.1.4", - "vitest": "^2.0.5" + "vitest": "3.2.4" }, "packageManager": "pnpm@9.15.4", "engines": { diff --git a/packages/ai-sdk-provider-2/package.json b/packages/ai-sdk-provider-2/package.json index 0a53dc6..ffdd6b5 100644 --- a/packages/ai-sdk-provider-2/package.json +++ b/packages/ai-sdk-provider-2/package.json @@ -54,17 +54,24 @@ "@hyperbolic/eslint-config": "workspace:*", "@hyperbolic/prettier-config": "workspace:*", "@hyperbolic/tsconfig": "workspace:*", + "ai": "^6.0.48", "eslint": "catalog:", "handlebars": "^4.7.8", + "msw": "2.12.4", "prettier": "catalog:", - "tsup": "8.4.0", + "tsup": "8.5.0", "type-fest": "^4.37.0", - "typescript": "catalog:" + "typescript": "catalog:", + "vite-tsconfig-paths": "^5.1.4", + "vitest": "3.2.4", + "zod": "^4.0.0" }, "dependencies": { - "@ai-sdk/provider": "^1.1.3", - "@ai-sdk/provider-utils": "^2.2.8", - "ai": "^4.3.16", - "zod": "^3.24.2" + "@ai-sdk/provider": "^3.0.5", + "@ai-sdk/provider-utils": "^4.0.9" + }, + "peerDependencies": { + "ai": "^6.0.48", + "zod": "^3.25.0 || ^4.0.0" } } diff --git a/packages/ai-sdk-provider-2/src/chat/convert-to-openrouter-chat-messages.test.ts b/packages/ai-sdk-provider-2/src/chat/convert-to-openrouter-chat-messages.test.ts new file mode 100644 index 0000000..b90f1ec --- /dev/null +++ b/packages/ai-sdk-provider-2/src/chat/convert-to-openrouter-chat-messages.test.ts @@ -0,0 +1,947 @@ +import { ReasoningDetailType } from '../schemas/reasoning-details'; +import { convertToOpenRouterChatMessages } from './convert-to-openrouter-chat-messages'; +import { MIME_TO_FORMAT } from './file-url-utils'; + +describe('user messages', () => { + it('should convert image Uint8Array', async () => { + const result = convertToOpenRouterChatMessages([ + { + role: 'user', + content: [ + { type: 'text', text: 'Hello' }, + { + type: 'file', + data: new Uint8Array([0, 1, 2, 3]), + mediaType: 'image/png', + }, + ], + }, + ]); + + expect(result).toEqual([ + { + role: 'user', + content: [ + { type: 'text', text: 'Hello' }, + { + type: 'image_url', + image_url: { url: 'data:image/png;base64,AAECAw==' }, + }, + ], + }, + ]); + }); + + it('should convert image urls', async () => { + const result = convertToOpenRouterChatMessages([ + { + role: 'user', + content: [ + { type: 'text', text: 'Hello' }, + { + type: 'file', + data: 'https://example.com/image.png', + mediaType: 'image/png', + }, + ], + }, + ]); + + expect(result).toEqual([ + { + role: 'user', + content: [ + { type: 'text', text: 'Hello' }, + { + type: 'image_url', + image_url: { url: 'https://example.com/image.png' }, + }, + ], + }, + ]); + }); + + it('should convert messages with image base64', async () => { + const result = convertToOpenRouterChatMessages([ + { + role: 'user', + content: [ + { type: 'text', text: 'Hello' }, + { + type: 'file', + data: 'data:image/png;base64,AAECAw==', + mediaType: 'image/png', + }, + ], + }, + ]); + + expect(result).toEqual([ + { + role: 'user', + content: [ + { type: 'text', text: 'Hello' }, + { + type: 'image_url', + image_url: { url: 'data:image/png;base64,AAECAw==' }, + }, + ], + }, + ]); + }); + + it('should convert messages with only a text part to a string content', async () => { + const result = convertToOpenRouterChatMessages([ + { + role: 'user', + content: [{ type: 'text', text: 'Hello' }], + }, + ]); + + expect(result).toEqual([{ role: 'user', content: 'Hello' }]); + }); + + it.each( + Object.entries(MIME_TO_FORMAT).map(([mimeSubtype, format]) => [ + `audio/${mimeSubtype}`, + format, + ]), + )('should convert %s to input_audio with %s format', (mediaType, expectedFormat) => { + const result = convertToOpenRouterChatMessages([ + { + role: 'user', + content: [ + { + type: 'file', + data: new Uint8Array([0, 1, 2, 3]), + mediaType, + }, + ], + }, + ]); + + expect(result).toEqual([ + { + role: 'user', + content: [ + { + type: 'input_audio', + input_audio: { + data: 'AAECAw==', + format: expectedFormat, + }, + }, + ], + }, + ]); + }); + + it('should convert audio base64 data URL to input_audio', async () => { + const result = convertToOpenRouterChatMessages([ + { + role: 'user', + content: [ + { + type: 'file', + data: 'data:audio/mpeg;base64,AAECAw==', + mediaType: 'audio/mpeg', + }, + ], + }, + ]); + + expect(result).toEqual([ + { + role: 'user', + content: [ + { + type: 'input_audio', + input_audio: { + data: 'AAECAw==', + format: 'mp3', + }, + }, + ], + }, + ]); + }); + + it('should convert raw audio base64 string to input_audio', async () => { + const result = convertToOpenRouterChatMessages([ + { + role: 'user', + content: [ + { + type: 'file', + data: 'AAECAw==', + mediaType: 'audio/mpeg', + }, + ], + }, + ]); + + expect(result).toEqual([ + { + role: 'user', + content: [ + { + type: 'input_audio', + input_audio: { + data: 'AAECAw==', + format: 'mp3', + }, + }, + ], + }, + ]); + }); + + it('should throw error for audio URLs', async () => { + expect(() => + convertToOpenRouterChatMessages([ + { + role: 'user', + content: [ + { + type: 'file', + data: 'https://example.com/audio.mp3', + mediaType: 'audio/mpeg', + }, + ], + }, + ]), + ).toThrow(/Audio files cannot be provided as URLs/); + }); + + it('should throw error for unsupported audio formats', async () => { + expect(() => + convertToOpenRouterChatMessages([ + { + role: 'user', + content: [ + { + type: 'file', + data: new Uint8Array([0, 1, 2, 3]), + mediaType: 'audio/webm', + }, + ], + }, + ]), + ).toThrow(/Unsupported audio format: "audio\/webm"/); + }); +}); + +describe('cache control', () => { + it('should pass cache control from system message provider metadata', () => { + const result = convertToOpenRouterChatMessages([ + { + role: 'system', + content: 'System prompt', + providerOptions: { + anthropic: { + cacheControl: { type: 'ephemeral' }, + }, + }, + }, + ]); + + expect(result).toEqual([ + { + role: 'system', + content: 'System prompt', + cache_control: { type: 'ephemeral' }, + }, + ]); + }); + + it('should pass cache control from user message provider metadata (single text part)', () => { + const result = convertToOpenRouterChatMessages([ + { + role: 'user', + content: [{ type: 'text', text: 'Hello' }], + providerOptions: { + anthropic: { + cacheControl: { type: 'ephemeral' }, + }, + }, + }, + ]); + + expect(result).toEqual([ + { + role: 'user', + content: [ + { + type: 'text', + text: 'Hello', + cache_control: { type: 'ephemeral' }, + }, + ], + }, + ]); + }); + + it('should pass cache control from content part provider metadata (single text part)', () => { + const result = convertToOpenRouterChatMessages([ + { + role: 'user', + content: [ + { + type: 'text', + text: 'Hello', + providerOptions: { + anthropic: { + cacheControl: { type: 'ephemeral' }, + }, + }, + }, + ], + }, + ]); + + expect(result).toEqual([ + { + role: 'user', + content: [ + { + type: 'text', + text: 'Hello', + cache_control: { type: 'ephemeral' }, + }, + ], + }, + ]); + }); + + it('should pass cache control from user message provider metadata (multiple parts)', () => { + const result = convertToOpenRouterChatMessages([ + { + role: 'user', + content: [ + { type: 'text', text: 'Hello' }, + { + type: 'file', + data: new Uint8Array([0, 1, 2, 3]), + mediaType: 'image/png', + }, + ], + providerOptions: { + anthropic: { + cacheControl: { type: 'ephemeral' }, + }, + }, + }, + ]); + + expect(result).toEqual([ + { + role: 'user', + content: [ + { + type: 'text', + text: 'Hello', + cache_control: { type: 'ephemeral' }, + }, + { + type: 'image_url', + image_url: { url: 'data:image/png;base64,AAECAw==' }, + cache_control: { type: 'ephemeral' }, + }, + ], + }, + ]); + }); + + it('should pass cache control from user message provider metadata without cache control (single text part)', () => { + const result = convertToOpenRouterChatMessages([ + { + role: 'user', + content: [{ type: 'text', text: 'Hello' }], + }, + ]); + + expect(result).toEqual([ + { + role: 'user', + content: 'Hello', + }, + ]); + }); + + it('should pass cache control to multiple image parts from user message provider metadata', () => { + const result = convertToOpenRouterChatMessages([ + { + role: 'user', + content: [ + { type: 'text', text: 'Hello' }, + { + type: 'file', + data: new Uint8Array([0, 1, 2, 3]), + mediaType: 'image/png', + }, + { + type: 'file', + data: new Uint8Array([4, 5, 6, 7]), + mediaType: 'image/jpeg', + }, + ], + providerOptions: { + anthropic: { + cacheControl: { type: 'ephemeral' }, + }, + }, + }, + ]); + + expect(result).toEqual([ + { + role: 'user', + content: [ + { + type: 'text', + text: 'Hello', + cache_control: { type: 'ephemeral' }, + }, + { + type: 'image_url', + image_url: { url: 'data:image/png;base64,AAECAw==' }, + cache_control: { type: 'ephemeral' }, + }, + { + type: 'image_url', + image_url: { url: 'data:image/jpeg;base64,BAUGBw==' }, + cache_control: { type: 'ephemeral' }, + }, + ], + }, + ]); + }); + + it('should pass cache control to file parts from user message provider metadata', () => { + const result = convertToOpenRouterChatMessages([ + { + role: 'user', + content: [ + { type: 'text', text: 'Hello' }, + { + type: 'file', + data: 'ZmlsZSBjb250ZW50', + mediaType: 'text/plain', + providerOptions: { + openrouter: { + filename: 'file.txt', + }, + }, + }, + ], + providerOptions: { + anthropic: { + cacheControl: { type: 'ephemeral' }, + }, + }, + }, + ]); + + expect(result).toEqual([ + { + role: 'user', + content: [ + { + type: 'text', + text: 'Hello', + cache_control: { type: 'ephemeral' }, + }, + { + type: 'file', + file: { + filename: 'file.txt', + file_data: 'data:text/plain;base64,ZmlsZSBjb250ZW50', + }, + cache_control: { type: 'ephemeral' }, + }, + ], + }, + ]); + }); + + it('should handle mixed part-specific and message-level cache control for multiple parts', () => { + const result = convertToOpenRouterChatMessages([ + { + role: 'user', + content: [ + { + type: 'text', + text: 'Hello', + // No part-specific provider metadata + }, + { + type: 'file', + data: new Uint8Array([0, 1, 2, 3]), + mediaType: 'image/png', + providerOptions: { + anthropic: { + cacheControl: { type: 'ephemeral' }, + }, + }, + }, + { + type: 'file', + data: 'ZmlsZSBjb250ZW50', + mediaType: 'text/plain', + providerOptions: { + openrouter: { + filename: 'file.txt', + }, + }, + // No part-specific provider metadata + }, + ], + providerOptions: { + anthropic: { + cacheControl: { type: 'ephemeral' }, + }, + }, + }, + ]); + + expect(result).toEqual([ + { + role: 'user', + content: [ + { + type: 'text', + text: 'Hello', + cache_control: { type: 'ephemeral' }, + }, + { + type: 'image_url', + image_url: { url: 'data:image/png;base64,AAECAw==' }, + cache_control: { type: 'ephemeral' }, + }, + { + type: 'file', + file: { + filename: 'file.txt', + file_data: 'data:text/plain;base64,ZmlsZSBjb250ZW50', + }, + cache_control: { type: 'ephemeral' }, + }, + ], + }, + ]); + }); + + it('should pass cache control from individual content part provider metadata', () => { + const result = convertToOpenRouterChatMessages([ + { + role: 'user', + content: [ + { + type: 'text', + text: 'Hello', + providerOptions: { + anthropic: { + cacheControl: { type: 'ephemeral' }, + }, + }, + }, + { + type: 'file', + data: new Uint8Array([0, 1, 2, 3]), + mediaType: 'image/png', + }, + ], + }, + ]); + + expect(result).toEqual([ + { + role: 'user', + content: [ + { + type: 'text', + text: 'Hello', + cache_control: { type: 'ephemeral' }, + }, + { + type: 'image_url', + image_url: { url: 'data:image/png;base64,AAECAw==' }, + }, + ], + }, + ]); + }); + + it('should pass cache control from assistant message provider metadata', () => { + const result = convertToOpenRouterChatMessages([ + { + role: 'assistant', + content: [{ type: 'text', text: 'Assistant response' }], + providerOptions: { + anthropic: { + cacheControl: { type: 'ephemeral' }, + }, + }, + }, + ]); + + expect(result).toEqual([ + { + role: 'assistant', + content: 'Assistant response', + cache_control: { type: 'ephemeral' }, + }, + ]); + }); + + it('should pass cache control from tool message provider metadata', () => { + const result = convertToOpenRouterChatMessages([ + { + role: 'tool', + content: [ + { + type: 'tool-result', + toolCallId: 'call-123', + toolName: 'calculator', + output: { + type: 'json', + value: { answer: 42 }, + }, + }, + ], + providerOptions: { + anthropic: { + cacheControl: { type: 'ephemeral' }, + }, + }, + }, + ]); + + expect(result).toEqual([ + { + role: 'tool', + tool_call_id: 'call-123', + content: JSON.stringify({ answer: 42 }), + cache_control: { type: 'ephemeral' }, + }, + ]); + }); + + it('should support the alias cache_control field', () => { + const result = convertToOpenRouterChatMessages([ + { + role: 'system', + content: 'System prompt', + providerOptions: { + anthropic: { + cache_control: { type: 'ephemeral' }, + }, + }, + }, + ]); + + expect(result).toEqual([ + { + role: 'system', + content: 'System prompt', + cache_control: { type: 'ephemeral' }, + }, + ]); + }); + + it('should support cache control on last message in content array', () => { + const result = convertToOpenRouterChatMessages([ + { + role: 'system', + content: 'System prompt', + }, + { + role: 'user', + content: [ + { type: 'text', text: 'User prompt' }, + { + type: 'text', + text: 'User prompt 2', + providerOptions: { + anthropic: { cacheControl: { type: 'ephemeral' } }, + }, + }, + ], + }, + ]); + + expect(result).toEqual([ + { + role: 'system', + content: 'System prompt', + }, + { + role: 'user', + content: [ + { type: 'text', text: 'User prompt' }, + { + type: 'text', + text: 'User prompt 2', + cache_control: { type: 'ephemeral' }, + }, + ], + }, + ]); + }); + + it('should pass cache control to audio input parts from user message provider metadata', () => { + const result = convertToOpenRouterChatMessages([ + { + role: 'user', + content: [ + { type: 'text', text: 'Listen to this' }, + { + type: 'file', + data: new Uint8Array([0, 1, 2, 3]), + mediaType: 'audio/mpeg', + }, + ], + providerOptions: { + anthropic: { + cacheControl: { type: 'ephemeral' }, + }, + }, + }, + ]); + + expect(result).toEqual([ + { + role: 'user', + content: [ + { + type: 'text', + text: 'Listen to this', + cache_control: { type: 'ephemeral' }, + }, + { + type: 'input_audio', + input_audio: { + data: 'AAECAw==', + format: 'mp3', + }, + cache_control: { type: 'ephemeral' }, + }, + ], + }, + ]); + }); +}); + +describe('reasoning_details accumulation', () => { + it('should accumulate reasoning_details from reasoning part providerOptions', () => { + const result = convertToOpenRouterChatMessages([ + { + role: 'assistant', + content: [ + { + type: 'reasoning', + text: 'First reasoning chunk', + providerOptions: { + openrouter: { + reasoning_details: [ + { + type: ReasoningDetailType.Text, + text: 'First reasoning chunk', + }, + ], + }, + }, + }, + { + type: 'reasoning', + text: 'Second reasoning chunk', + providerOptions: { + openrouter: { + reasoning_details: [ + { + type: ReasoningDetailType.Text, + text: 'Second reasoning chunk', + }, + ], + }, + }, + }, + { + type: 'text', + text: 'Final response', + }, + ], + providerOptions: { + openrouter: { + reasoning_details: [ + { + type: ReasoningDetailType.Text, + text: 'First reasoning chunk', + }, + { + type: ReasoningDetailType.Text, + text: 'Second reasoning chunk', + }, + ], + }, + }, + }, + ]); + + expect(result).toEqual([ + { + role: 'assistant', + content: 'Final response', + reasoning: 'First reasoning chunkSecond reasoning chunk', + reasoning_details: [ + { + type: ReasoningDetailType.Text, + text: 'First reasoning chunk', + }, + { + type: ReasoningDetailType.Text, + text: 'Second reasoning chunk', + }, + ], + }, + ]); + }); + + it('should use preserved reasoning_details from message-level providerOptions when available', () => { + const result = convertToOpenRouterChatMessages([ + { + role: 'assistant', + content: [ + { + type: 'reasoning', + text: 'Reasoning text', + // No providerOptions on part + }, + { + type: 'text', + text: 'Response', + }, + ], + providerOptions: { + openrouter: { + reasoning_details: [ + { + type: ReasoningDetailType.Text, + text: 'Preserved reasoning detail', + }, + { + type: ReasoningDetailType.Summary, + summary: 'Preserved summary', + }, + ], + }, + }, + }, + ]); + + expect(result).toEqual([ + { + role: 'assistant', + content: 'Response', + reasoning: 'Reasoning text', + reasoning_details: [ + { + type: ReasoningDetailType.Text, + text: 'Preserved reasoning detail', + }, + { + type: ReasoningDetailType.Summary, + summary: 'Preserved summary', + }, + ], + }, + ]); + }); + + it('should not include reasoning_details when not present in providerOptions', () => { + const result = convertToOpenRouterChatMessages([ + { + role: 'assistant', + content: [ + { + type: 'reasoning', + text: 'Reasoning text', + // No providerOptions + }, + { + type: 'text', + text: 'Response', + }, + ], + // No providerOptions + }, + ]); + + expect(result).toEqual([ + { + role: 'assistant', + content: 'Response', + reasoning: 'Reasoning text', + // reasoning_details should be undefined when not preserved + reasoning_details: undefined, + }, + ]); + }); + + it('should handle mixed reasoning parts with and without providerOptions', () => { + const result = convertToOpenRouterChatMessages([ + { + role: 'assistant', + content: [ + { + type: 'reasoning', + text: 'First chunk', + providerOptions: { + openrouter: { + reasoning_details: [ + { + type: ReasoningDetailType.Text, + text: 'First chunk', + }, + ], + }, + }, + }, + { + type: 'reasoning', + text: 'Second chunk', + // No providerOptions + }, + { + type: 'text', + text: 'Response', + }, + ], + providerOptions: { + openrouter: { + reasoning_details: [ + { + type: ReasoningDetailType.Text, + text: 'First chunk', + }, + ], + }, + }, + }, + ]); + + expect(result).toEqual([ + { + role: 'assistant', + content: 'Response', + reasoning: 'First chunkSecond chunk', + reasoning_details: [ + { + type: ReasoningDetailType.Text, + text: 'First chunk', + }, + ], + }, + ]); + }); +}); diff --git a/packages/ai-sdk-provider-2/src/chat/convert-to-openrouter-chat-messages.ts b/packages/ai-sdk-provider-2/src/chat/convert-to-openrouter-chat-messages.ts new file mode 100644 index 0000000..1309f1f --- /dev/null +++ b/packages/ai-sdk-provider-2/src/chat/convert-to-openrouter-chat-messages.ts @@ -0,0 +1,303 @@ +import type { + LanguageModelV3FilePart, + LanguageModelV3Prompt, + LanguageModelV3TextPart, + LanguageModelV3ToolResultPart, + SharedV3ProviderMetadata, +} from '@ai-sdk/provider'; +import type { ReasoningDetailUnion } from '../schemas/reasoning-details'; +import type { + ChatCompletionContentPart, + OpenRouterChatCompletionsInput, +} from '../types/openrouter-chat-completions-input'; + +import { OpenRouterProviderOptionsSchema } from '../schemas/provider-metadata'; +import { getFileUrl, getInputAudioData } from './file-url-utils'; +import { isUrl } from './is-url'; + +// Type for OpenRouter Cache Control following Anthropic's pattern +export type OpenRouterCacheControl = { type: 'ephemeral' }; + +function getCacheControl( + providerMetadata: SharedV3ProviderMetadata | undefined, +): OpenRouterCacheControl | undefined { + const anthropic = providerMetadata?.anthropic; + const openrouter = providerMetadata?.openrouter; + + // Allow both cacheControl and cache_control: + return (openrouter?.cacheControl ?? + openrouter?.cache_control ?? + anthropic?.cacheControl ?? + anthropic?.cache_control) as OpenRouterCacheControl | undefined; +} + +export function convertToOpenRouterChatMessages( + prompt: LanguageModelV3Prompt, +): OpenRouterChatCompletionsInput { + const messages: OpenRouterChatCompletionsInput = []; + for (const { role, content, providerOptions } of prompt) { + switch (role) { + case 'system': { + messages.push({ + role: 'system', + content, + cache_control: getCacheControl(providerOptions), + }); + break; + } + + case 'user': { + if (content.length === 1 && content[0]?.type === 'text') { + const cacheControl = + getCacheControl(providerOptions) ?? + getCacheControl(content[0].providerOptions); + const contentWithCacheControl: string | ChatCompletionContentPart[] = + cacheControl + ? [ + { + type: 'text', + text: content[0].text, + cache_control: cacheControl, + }, + ] + : content[0].text; + messages.push({ + role: 'user', + content: contentWithCacheControl, + }); + break; + } + + // Get message level cache control + const messageCacheControl = getCacheControl(providerOptions); + const contentParts: ChatCompletionContentPart[] = content.map( + (part: LanguageModelV3TextPart | LanguageModelV3FilePart) => { + const cacheControl = + getCacheControl(part.providerOptions) ?? messageCacheControl; + + switch (part.type) { + case 'text': + return { + type: 'text' as const, + text: part.text, + // For text parts, only use part-specific cache control + cache_control: cacheControl, + }; + case 'file': { + if (part.mediaType?.startsWith('image/')) { + const url = getFileUrl({ + part, + defaultMediaType: 'image/jpeg', + }); + return { + type: 'image_url' as const, + image_url: { + url, + }, + // For image parts, use part-specific or message-level cache control + cache_control: cacheControl, + }; + } + + // Handle audio files for input_audio format + if (part.mediaType?.startsWith('audio/')) { + return { + type: 'input_audio' as const, + input_audio: getInputAudioData(part), + cache_control: cacheControl, + }; + } + + const fileName = String( + part.providerOptions?.openrouter?.filename ?? + part.filename ?? + '', + ); + + const fileData = getFileUrl({ + part, + defaultMediaType: 'application/pdf', + }); + + if ( + isUrl({ + url: fileData, + protocols: new Set(['http:', 'https:'] as const), + }) + ) { + return { + type: 'file' as const, + file: { + filename: fileName, + file_data: fileData, + }, + } satisfies ChatCompletionContentPart; + } + + return { + type: 'file' as const, + file: { + filename: fileName, + file_data: fileData, + }, + cache_control: cacheControl, + } satisfies ChatCompletionContentPart; + } + default: { + return { + type: 'text' as const, + text: '', + cache_control: cacheControl, + }; + } + } + }, + ); + + // For multi-part messages, don't add cache_control at the root level + messages.push({ + role: 'user', + content: contentParts, + }); + + break; + } + + case 'assistant': { + let text = ''; + let reasoning = ''; + const toolCalls: Array<{ + id: string; + type: 'function'; + function: { name: string; arguments: string }; + }> = []; + const accumulatedReasoningDetails: ReasoningDetailUnion[] = []; + + for (const part of content) { + switch (part.type) { + case 'text': { + text += part.text; + + break; + } + case 'tool-call': { + const partReasoningDetails = ( + part.providerOptions as Record + )?.openrouter as Record | undefined; + if ( + partReasoningDetails?.reasoning_details && + Array.isArray(partReasoningDetails.reasoning_details) + ) { + accumulatedReasoningDetails.push( + ...(partReasoningDetails.reasoning_details as ReasoningDetailUnion[]), + ); + } + toolCalls.push({ + id: part.toolCallId, + type: 'function', + function: { + name: part.toolName, + arguments: JSON.stringify(part.input), + }, + }); + break; + } + case 'reasoning': { + reasoning += part.text; + const parsedPartProviderOptions = + OpenRouterProviderOptionsSchema.safeParse(part.providerOptions); + if ( + parsedPartProviderOptions.success && + parsedPartProviderOptions.data?.openrouter?.reasoning_details + ) { + accumulatedReasoningDetails.push( + ...parsedPartProviderOptions.data.openrouter + .reasoning_details, + ); + } + break; + } + + case 'file': + break; + default: { + break; + } + } + } + + // Check message-level providerOptions for preserved reasoning_details and annotations + const parsedProviderOptions = + OpenRouterProviderOptionsSchema.safeParse(providerOptions); + const messageReasoningDetails = parsedProviderOptions.success + ? parsedProviderOptions.data?.openrouter?.reasoning_details + : undefined; + const messageAnnotations = parsedProviderOptions.success + ? parsedProviderOptions.data?.openrouter?.annotations + : undefined; + + // Use message-level reasoning_details if available, otherwise use accumulated from parts + const finalReasoningDetails = + messageReasoningDetails && + Array.isArray(messageReasoningDetails) && + messageReasoningDetails.length > 0 + ? messageReasoningDetails + : accumulatedReasoningDetails.length > 0 + ? accumulatedReasoningDetails + : undefined; + + messages.push({ + role: 'assistant', + content: text, + tool_calls: toolCalls.length > 0 ? toolCalls : undefined, + reasoning: reasoning || undefined, + reasoning_details: finalReasoningDetails, + annotations: messageAnnotations, + cache_control: getCacheControl(providerOptions), + }); + + break; + } + + case 'tool': { + for (const toolResponse of content) { + // Skip tool approval responses - only process tool results + if (toolResponse.type === 'tool-approval-response') { + continue; + } + const content = getToolResultContent(toolResponse); + + messages.push({ + role: 'tool', + tool_call_id: toolResponse.toolCallId, + content, + cache_control: + getCacheControl(providerOptions) ?? + getCacheControl(toolResponse.providerOptions), + }); + } + break; + } + + default: { + break; + } + } + } + + return messages; +} + +function getToolResultContent(input: LanguageModelV3ToolResultPart): string { + switch (input.output.type) { + case 'text': + case 'error-text': + return input.output.value; + case 'json': + case 'error-json': + case 'content': + return JSON.stringify(input.output.value); + case 'execution-denied': + return input.output.reason ?? 'Tool execution denied'; + } +} diff --git a/packages/ai-sdk-provider-2/src/chat/errors.test.ts b/packages/ai-sdk-provider-2/src/chat/errors.test.ts new file mode 100644 index 0000000..9589191 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/chat/errors.test.ts @@ -0,0 +1,93 @@ +import type { LanguageModelV3Prompt } from '@ai-sdk/provider'; + +import { describe, expect, it } from 'vitest'; +import { createOpenRouter } from '../provider'; +import { createTestServer } from '../test-utils/test-server'; + +const TEST_PROMPT: LanguageModelV3Prompt = [ + { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, +]; + +const provider = createOpenRouter({ + baseURL: 'https://test.openrouter.ai/api/v1', + apiKey: 'test-api-key', +}); + +const server = createTestServer({ + 'https://test.openrouter.ai/api/v1/chat/completions': {}, +}); + +describe('HTTP 200 Error Response Handling', () => { + describe('doGenerate', () => { + it('should throw APICallError for HTTP 200 responses with error payloads', async () => { + // OpenRouter sometimes returns HTTP 200 with an error object instead of choices + // This can occur for various server errors (e.g., internal errors, processing failures) + server.urls[ + 'https://test.openrouter.ai/api/v1/chat/completions' + ]!.response = { + type: 'json-value', + body: { + error: { + message: 'Internal Server Error', + code: 500, + }, + user_id: 'org_abc123', + }, + }; + + const model = provider('anthropic/claude-3.5-sonnet'); + + await expect( + model.doGenerate({ + prompt: TEST_PROMPT, + }), + ).rejects.toThrow('Internal Server Error'); + }); + + it('should parse successful responses normally when no error present', async () => { + // Normal successful response without error + server.urls[ + 'https://test.openrouter.ai/api/v1/chat/completions' + ]!.response = { + type: 'json-value', + body: { + id: 'gen-123', + model: 'anthropic/claude-3.5-sonnet', + provider: 'Anthropic', + choices: [ + { + index: 0, + message: { + role: 'assistant', + content: 'Hello! How can I help you?', + }, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 10, + completion_tokens: 8, + total_tokens: 18, + }, + }, + }; + + const model = provider('anthropic/claude-3.5-sonnet'); + + const result = await model.doGenerate({ + prompt: TEST_PROMPT, + }); + + expect(result.content).toMatchObject([ + { + type: 'text', + text: 'Hello! How can I help you?', + }, + ]); + expect( + (result.usage.inputTokens?.total ?? 0) + + (result.usage.outputTokens?.total ?? 0), + ).toBe(18); + }); + }); +}); diff --git a/packages/ai-sdk-provider-2/src/chat/file-parser-schema.test.ts b/packages/ai-sdk-provider-2/src/chat/file-parser-schema.test.ts new file mode 100644 index 0000000..084eabc --- /dev/null +++ b/packages/ai-sdk-provider-2/src/chat/file-parser-schema.test.ts @@ -0,0 +1,112 @@ +import { describe, expect, it } from 'vitest'; +import { OpenRouterNonStreamChatCompletionResponseSchema } from './schemas'; + +describe('FileParser annotation schema', () => { + it('should parse response with all real API fields', () => { + // This is based on actual API response structure (anonymized) + const response = { + id: 'gen-xxx', + provider: 'Amazon Bedrock', + model: 'anthropic/claude-3.5-sonnet', + object: 'chat.completion', + created: 1763157299, + choices: [ + { + logprobs: null, + finish_reason: 'stop', + native_finish_reason: 'stop', + index: 0, + message: { + role: 'assistant' as const, + content: 'Test response content', + refusal: null, + reasoning: null, + annotations: [ + { + type: 'file' as const, + file: { + hash: 'abc123', + name: '', + content: [ + { + type: 'text', + text: '', + }, + ], + }, + }, + ], + }, + }, + ], + usage: { + prompt_tokens: 100, + completion_tokens: 50, + total_tokens: 150, + }, + }; + + const result = + OpenRouterNonStreamChatCompletionResponseSchema.parse(response); + expect(result).toBeDefined(); + }); + + it('should parse file annotation with content array and extra fields', () => { + const response = { + id: 'gen-test', + provider: 'Amazon Bedrock', + model: 'anthropic/claude-3.5-sonnet', + object: 'chat.completion', + created: 1763157061, + choices: [ + { + logprobs: null, + finish_reason: 'stop', + native_finish_reason: 'stop', // Extra field from API + index: 0, + message: { + role: 'assistant' as const, + content: 'Test response', + refusal: null, // Extra field from API + reasoning: null, + annotations: [ + { + type: 'file' as const, + file: { + hash: '85bd49b97b7ff5be002d9f654776119f253c1cae333b49ba8f4a53da346284ba', + name: '', + content: [ + { + type: 'text', + text: '', + }, + { + type: 'text', + text: 'Some file content', + }, + ], + }, + }, + ], + }, + }, + ], + usage: { + prompt_tokens: 100, + completion_tokens: 50, + total_tokens: 150, + }, + }; + + const result = + OpenRouterNonStreamChatCompletionResponseSchema.parse(response); + + // Check that parsing succeeded + expect(result).toBeDefined(); + // The schema uses passthrough so we can't strictly type check, but we can verify structure + // @ts-expect-error test intentionally inspects passthrough data + const firstChoice = result.choices?.[0]; + expect(firstChoice?.message.annotations).toBeDefined(); + expect(firstChoice?.message.annotations?.[0]?.type).toBe('file'); + }); +}); diff --git a/packages/ai-sdk-provider-2/src/chat/file-url-utils.ts b/packages/ai-sdk-provider-2/src/chat/file-url-utils.ts new file mode 100644 index 0000000..d1ae6ae --- /dev/null +++ b/packages/ai-sdk-provider-2/src/chat/file-url-utils.ts @@ -0,0 +1,150 @@ +import type { LanguageModelV3FilePart } from '@ai-sdk/provider'; +import type { OpenRouterAudioFormat } from '../types/openrouter-chat-completions-input'; + +import { convertUint8ArrayToBase64 } from '@ai-sdk/provider-utils'; +import { OPENROUTER_AUDIO_FORMATS } from '../types/openrouter-chat-completions-input'; +import { isUrl } from './is-url'; + +export function getFileUrl({ + part, + defaultMediaType, +}: { + part: LanguageModelV3FilePart; + defaultMediaType: string; +}) { + if (part.data instanceof Uint8Array) { + const base64 = convertUint8ArrayToBase64(part.data); + return `data:${part.mediaType ?? defaultMediaType};base64,${base64}`; + } + + const stringUrl = part.data.toString(); + + if ( + isUrl({ + url: stringUrl, + protocols: new Set(['http:', 'https:'] as const), + }) + ) { + return stringUrl; + } + + return stringUrl.startsWith('data:') + ? stringUrl + : `data:${part.mediaType ?? defaultMediaType};base64,${stringUrl}`; +} + +export function getMediaType( + dataUrl: string, + defaultMediaType: string, +): string { + const match = dataUrl.match(/^data:([^;]+)/); + return match ? (match[1] ?? defaultMediaType) : defaultMediaType; +} + +export function getBase64FromDataUrl(dataUrl: string): string { + const match = dataUrl.match(/^data:[^;]*;base64,(.+)$/); + return match ? match[1]! : dataUrl; +} + +/** MIME type to format mapping for normalization */ +export const MIME_TO_FORMAT: Record = { + // MP3 variants + mpeg: 'mp3', + mp3: 'mp3', + // WAV variants + 'x-wav': 'wav', + wave: 'wav', + wav: 'wav', + // OGG variants + ogg: 'ogg', + vorbis: 'ogg', + // AAC variants + aac: 'aac', + 'x-aac': 'aac', + // M4A variants + m4a: 'm4a', + 'x-m4a': 'm4a', + mp4: 'm4a', + // AIFF variants + aiff: 'aiff', + 'x-aiff': 'aiff', + // FLAC + flac: 'flac', + 'x-flac': 'flac', + // PCM variants + pcm16: 'pcm16', + pcm24: 'pcm24', +}; + +/** + * Converts an audio file part to OpenRouter's input_audio data format. + * + * This function extracts base64-encoded audio data from a file part and + * normalizes the format to one of the supported OpenRouter audio formats. + * + * @param part - The file part containing audio data. Must have a mediaType + * starting with "audio/" and contain either base64 data or a data URL. + * + * @returns An object with `data` (base64-encoded audio) and `format` + * suitable for use in OpenRouter's `input_audio` field. + * + * @throws {Error} When audio is provided as an HTTP/HTTPS URL. OpenRouter requires + * audio to be base64-encoded inline. The error message includes instructions for + * downloading and encoding the audio locally. + * + * @throws {Error} When the audio format is not supported. + * + * @example + * ```ts + * const audioData = getInputAudioData(filePart); + * // Returns: { data: "base64string...", format: "mp3" } + * ``` + */ +export function getInputAudioData(part: LanguageModelV3FilePart): { + data: string; + format: OpenRouterAudioFormat; +} { + const fileData = getFileUrl({ + part, + defaultMediaType: 'audio/mpeg', + }); + + // OpenRouter's input_audio doesn't support URLs directly + if ( + isUrl({ + url: fileData, + protocols: new Set(['http:', 'https:'] as const), + }) + ) { + throw new Error( + `Audio files cannot be provided as URLs.\n\n` + + `OpenRouter requires audio to be base64-encoded. Please:\n` + + `1. Download the audio file locally\n` + + `2. Read it as a Buffer or Uint8Array\n` + + `3. Pass it as the data parameter\n\n` + + `The AI SDK will automatically handle base64 encoding.\n\n` + + `Learn more: https://openrouter.ai/docs/features/multimodal/audio`, + ); + } + + // Extract base64 data (handles both data URLs and raw base64) + const data = getBase64FromDataUrl(fileData); + + // Map media type to format + const mediaType = part.mediaType || 'audio/mpeg'; + const rawFormat = mediaType.replace('audio/', ''); + + // Normalize format names for OpenRouter using MIME type mapping + const format = MIME_TO_FORMAT[rawFormat]; + + if (format === undefined) { + const supportedList = OPENROUTER_AUDIO_FORMATS.join(', '); + throw new Error( + `Unsupported audio format: "${mediaType}"\n\n` + + `OpenRouter supports the following audio formats: ${supportedList}\n\n` + + `Learn more: https://openrouter.ai/docs/features/multimodal/audio`, + ); + } + + return { data, format }; +} diff --git a/packages/ai-sdk-provider-2/src/chat/get-tool-choice.ts b/packages/ai-sdk-provider-2/src/chat/get-tool-choice.ts new file mode 100644 index 0000000..bba13d8 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/chat/get-tool-choice.ts @@ -0,0 +1,42 @@ +import type { LanguageModelV3ToolChoice } from '@ai-sdk/provider'; + +import { InvalidArgumentError } from '@ai-sdk/provider'; +import { z } from 'zod/v4'; + +const ChatCompletionToolChoiceSchema = z.union([ + z.literal('auto'), + z.literal('none'), + z.literal('required'), + z.object({ + type: z.literal('function'), + function: z.object({ + name: z.string(), + }), + }), +]); + +type ChatCompletionToolChoice = z.infer; + +export function getChatCompletionToolChoice( + toolChoice: LanguageModelV3ToolChoice, +): ChatCompletionToolChoice { + switch (toolChoice.type) { + case 'auto': + case 'none': + case 'required': + return toolChoice.type; + case 'tool': { + return { + type: 'function', + function: { name: toolChoice.toolName }, + }; + } + default: { + toolChoice satisfies never; + throw new InvalidArgumentError({ + argument: 'toolChoice', + message: `Invalid tool choice type: ${JSON.stringify(toolChoice)}`, + }); + } + } +} diff --git a/packages/ai-sdk-provider-2/src/chat/index.test.ts b/packages/ai-sdk-provider-2/src/chat/index.test.ts new file mode 100644 index 0000000..14e1658 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/chat/index.test.ts @@ -0,0 +1,2280 @@ +import type { + LanguageModelV3Prompt, + LanguageModelV3StreamPart, +} from '@ai-sdk/provider'; +import type { JSONSchema7 } from 'json-schema'; +import type { ImageResponse } from '../schemas/image'; +import type { ReasoningDetailUnion } from '../schemas/reasoning-details'; + +import { vi } from 'vitest'; +import { createOpenRouter } from '../provider'; +import { ReasoningDetailType } from '../schemas/reasoning-details'; +import { + convertReadableStreamToArray, + createTestServer, +} from '../test-utils/test-server'; + +vi.mock('@/src/version', () => ({ + VERSION: '0.0.0-test', +})); + +const TEST_PROMPT: LanguageModelV3Prompt = [ + { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, +]; + +const TEST_LOGPROBS = { + content: [ + { + token: 'Hello', + logprob: -0.0009994634, + top_logprobs: [ + { + token: 'Hello', + logprob: -0.0009994634, + }, + ], + }, + { + token: '!', + logprob: -0.13410144, + top_logprobs: [ + { + token: '!', + logprob: -0.13410144, + }, + ], + }, + { + token: ' How', + logprob: -0.0009250381, + top_logprobs: [ + { + token: ' How', + logprob: -0.0009250381, + }, + ], + }, + { + token: ' can', + logprob: -0.047709424, + top_logprobs: [ + { + token: ' can', + logprob: -0.047709424, + }, + ], + }, + { + token: ' I', + logprob: -0.000009014684, + top_logprobs: [ + { + token: ' I', + logprob: -0.000009014684, + }, + ], + }, + { + token: ' assist', + logprob: -0.009125131, + top_logprobs: [ + { + token: ' assist', + logprob: -0.009125131, + }, + ], + }, + { + token: ' you', + logprob: -0.0000066306106, + top_logprobs: [ + { + token: ' you', + logprob: -0.0000066306106, + }, + ], + }, + { + token: ' today', + logprob: -0.00011093382, + top_logprobs: [ + { + token: ' today', + logprob: -0.00011093382, + }, + ], + }, + { + token: '?', + logprob: -0.00004596782, + top_logprobs: [ + { + token: '?', + logprob: -0.00004596782, + }, + ], + }, + ], +}; + +const TEST_IMAGE_URL = `data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABAAAAAQACAIAAADwf7zUAAAAiXpUWHRSYXcgcHJvZmlsZSB0eXBlIGlwdGMAAAiZTYwxDgIxDAT7vOKekDjrtV1T0VHwgbtcIiEhgfh/QaDgmGlWW0w6X66n5fl6jNu9p+ULkapDENgzpj+Kl5aFfa6KnYWgSjZjGOiSYRxTY/v8KIijI==`; + +const TEST_IMAGE_BASE64 = TEST_IMAGE_URL.split(',')[1]!; + +const provider = createOpenRouter({ + apiKey: 'test-api-key', + compatibility: 'strict', +}); + +const model = provider.chat('anthropic/claude-3.5-sonnet'); + +function isReasoningDeltaPart(part: LanguageModelV3StreamPart): part is Extract< + LanguageModelV3StreamPart, + { + type: 'reasoning-delta'; + } +> { + return part.type === 'reasoning-delta'; +} + +function isReasoningStartPart(part: LanguageModelV3StreamPart): part is Extract< + LanguageModelV3StreamPart, + { + type: 'reasoning-start'; + } +> { + return part.type === 'reasoning-start'; +} + +function isTextDeltaPart(part: LanguageModelV3StreamPart): part is Extract< + LanguageModelV3StreamPart, + { + type: 'text-delta'; + } +> { + return part.type === 'text-delta'; +} + +describe('doGenerate', () => { + const server = createTestServer({ + 'https://openrouter.ai/api/v1/chat/completions': { + response: { type: 'json-value', body: {} }, + }, + }); + + function prepareJsonResponse({ + content = '', + reasoning, + reasoning_details, + images, + tool_calls, + usage = { + prompt_tokens: 4, + total_tokens: 34, + completion_tokens: 30, + }, + logprobs = null, + finish_reason = 'stop', + }: { + content?: string; + reasoning?: string; + reasoning_details?: Array; + images?: Array; + tool_calls?: Array<{ + id: string; + type: 'function'; + function: { name: string; arguments: string }; + }>; + usage?: { + prompt_tokens: number; + total_tokens: number; + completion_tokens: number; + }; + logprobs?: { + content: + | { + token: string; + logprob: number; + top_logprobs: { token: string; logprob: number }[]; + }[] + | null; + } | null; + finish_reason?: string; + } = {}) { + server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { + type: 'json-value', + body: { + id: 'chatcmpl-95ZTZkhr0mHNKqerQfiwkuox3PHAd', + object: 'chat.completion', + created: 1711115037, + model: 'gpt-3.5-turbo-0125', + choices: [ + { + index: 0, + message: { + role: 'assistant', + content, + reasoning, + reasoning_details, + images, + tool_calls, + }, + logprobs, + finish_reason, + }, + ], + usage, + system_fingerprint: 'fp_3bc1b5746c', + }, + }; + } + + it('should extract text response', async () => { + prepareJsonResponse({ content: 'Hello, World!' }); + + const result = await model.doGenerate({ + prompt: TEST_PROMPT, + }); + + expect(result.content[0]).toStrictEqual({ + type: 'text', + text: 'Hello, World!', + }); + }); + + it('should extract usage', async () => { + prepareJsonResponse({ + content: '', + usage: { prompt_tokens: 20, total_tokens: 25, completion_tokens: 5 }, + }); + + const { usage } = await model.doGenerate({ + prompt: TEST_PROMPT, + }); + + expect(usage).toStrictEqual({ + inputTokens: { + total: 20, + noCache: undefined, + cacheRead: undefined, + cacheWrite: undefined, + }, + outputTokens: { + total: 5, + text: undefined, + reasoning: undefined, + }, + }); + }); + + it('should extract logprobs', async () => { + prepareJsonResponse({ + logprobs: TEST_LOGPROBS, + }); + + await provider.chat('openai/gpt-3.5-turbo', { logprobs: 1 }).doGenerate({ + prompt: TEST_PROMPT, + }); + }); + + it('should extract finish reason', async () => { + prepareJsonResponse({ + content: '', + finish_reason: 'stop', + }); + + const response = await model.doGenerate({ + prompt: TEST_PROMPT, + }); + + expect(response.finishReason).toStrictEqual({ + unified: 'stop', + raw: 'stop', + }); + }); + + it('should support unknown finish reason', async () => { + prepareJsonResponse({ + content: '', + finish_reason: 'eos', + }); + + const response = await model.doGenerate({ + prompt: TEST_PROMPT, + }); + + expect(response.finishReason).toStrictEqual({ + unified: 'other', + raw: 'eos', + }); + }); + + it('should extract reasoning content from reasoning field', async () => { + prepareJsonResponse({ + content: 'Hello!', + reasoning: + 'I need to think about this... The user said hello, so I should respond with a greeting.', + }); + + const result = await model.doGenerate({ + prompt: TEST_PROMPT, + }); + + expect(result.content).toStrictEqual([ + { + type: 'reasoning', + text: 'I need to think about this... The user said hello, so I should respond with a greeting.', + }, + { + type: 'text', + text: 'Hello!', + }, + ]); + }); + + it('should extract reasoning content from reasoning_details', async () => { + prepareJsonResponse({ + content: 'Hello!', + reasoning_details: [ + { + type: ReasoningDetailType.Text, + text: 'Let me analyze this request...', + }, + { + type: ReasoningDetailType.Summary, + summary: 'The user wants a greeting response.', + }, + ], + }); + + const result = await model.doGenerate({ + prompt: TEST_PROMPT, + }); + + expect(result.content).toStrictEqual([ + { + type: 'reasoning', + text: 'Let me analyze this request...', + providerMetadata: { + openrouter: { + reasoning_details: [ + { + type: 'reasoning.text', + text: 'Let me analyze this request...', + }, + ], + }, + }, + }, + { + type: 'reasoning', + text: 'The user wants a greeting response.', + providerMetadata: { + openrouter: { + reasoning_details: [ + { + type: 'reasoning.summary', + summary: 'The user wants a greeting response.', + }, + ], + }, + }, + }, + { + type: 'text', + text: 'Hello!', + }, + ]); + }); + + it('should handle encrypted reasoning details', async () => { + prepareJsonResponse({ + content: 'Hello!', + reasoning_details: [ + { + type: ReasoningDetailType.Encrypted, + data: 'encrypted_reasoning_data_here', + }, + ], + }); + + const result = await model.doGenerate({ + prompt: TEST_PROMPT, + }); + + expect(result.content).toStrictEqual([ + { + type: 'reasoning', + text: '[REDACTED]', + providerMetadata: { + openrouter: { + reasoning_details: [ + { + type: 'reasoning.encrypted', + data: 'encrypted_reasoning_data_here', + }, + ], + }, + }, + }, + { + type: 'text', + text: 'Hello!', + }, + ]); + }); + + it('should prioritize reasoning_details over reasoning when both are present', async () => { + prepareJsonResponse({ + content: 'Hello!', + reasoning: 'This should be ignored when reasoning_details is present', + reasoning_details: [ + { + type: ReasoningDetailType.Text, + text: 'Processing from reasoning_details...', + }, + { + type: ReasoningDetailType.Summary, + summary: 'Summary from reasoning_details', + }, + ], + }); + + const result = await model.doGenerate({ + prompt: TEST_PROMPT, + }); + + expect(result.content).toStrictEqual([ + { + type: 'reasoning', + text: 'Processing from reasoning_details...', + providerMetadata: { + openrouter: { + reasoning_details: [ + { + type: 'reasoning.text', + text: 'Processing from reasoning_details...', + }, + ], + }, + }, + }, + { + type: 'reasoning', + text: 'Summary from reasoning_details', + providerMetadata: { + openrouter: { + reasoning_details: [ + { + type: 'reasoning.summary', + summary: 'Summary from reasoning_details', + }, + ], + }, + }, + }, + { + type: 'text', + text: 'Hello!', + }, + ]); + + // Verify that the reasoning field content is not included + expect(result.content).not.toContainEqual({ + type: 'reasoning', + text: 'This should be ignored when reasoning_details is present', + }); + }); + + it('should override finishReason to tool-calls when tool calls and encrypted reasoning are present', async () => { + prepareJsonResponse({ + content: '', + tool_calls: [ + { + id: 'call_123', + type: 'function', + function: { + name: 'get_weather', + arguments: '{"location":"San Francisco"}', + }, + }, + ], + reasoning_details: [ + { + type: ReasoningDetailType.Encrypted, + data: 'encrypted_reasoning_data_here', + }, + ], + // Gemini 3 returns 'stop' instead of 'tool_calls' when using thoughtSignature + finish_reason: 'stop', + }); + + const result = await model.doGenerate({ + prompt: TEST_PROMPT, + }); + + // Should override to 'tool-calls' when encrypted reasoning + tool calls + stop + expect(result.finishReason).toStrictEqual({ + unified: 'tool-calls', + raw: 'stop', + }); + + // Should still have the tool call in content + expect(result.content).toContainEqual( + expect.objectContaining({ + type: 'tool-call', + toolCallId: 'call_123', + toolName: 'get_weather', + }), + ); + }); + + it('should pass the model and the messages', async () => { + prepareJsonResponse({ content: '' }); + + await model.doGenerate({ + prompt: TEST_PROMPT, + }); + + expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ + model: 'anthropic/claude-3.5-sonnet', + messages: [{ role: 'user', content: 'Hello' }], + }); + }); + + it('should pass the models array when provided', async () => { + prepareJsonResponse({ content: '' }); + + const customModel = provider.chat('anthropic/claude-3.5-sonnet', { + models: ['anthropic/claude-2', 'gryphe/mythomax-l2-13b'], + }); + + await customModel.doGenerate({ + prompt: TEST_PROMPT, + }); + + expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ + model: 'anthropic/claude-3.5-sonnet', + models: ['anthropic/claude-2', 'gryphe/mythomax-l2-13b'], + messages: [{ role: 'user', content: 'Hello' }], + }); + }); + + it('should pass settings', async () => { + prepareJsonResponse(); + + await provider + .chat('openai/gpt-3.5-turbo', { + logitBias: { 50256: -100 }, + logprobs: 2, + parallelToolCalls: false, + user: 'test-user-id', + }) + .doGenerate({ + prompt: TEST_PROMPT, + }); + + expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ + model: 'openai/gpt-3.5-turbo', + messages: [{ role: 'user', content: 'Hello' }], + logprobs: true, + top_logprobs: 2, + logit_bias: { 50256: -100 }, + parallel_tool_calls: false, + user: 'test-user-id', + }); + }); + + it('should pass tools and toolChoice', async () => { + prepareJsonResponse({ content: '' }); + + await model.doGenerate({ + prompt: TEST_PROMPT, + tools: [ + { + type: 'function', + name: 'test-tool', + description: 'Test tool', + inputSchema: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', + }, + }, + ], + toolChoice: { + type: 'tool', + toolName: 'test-tool', + }, + }); + + expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ + model: 'anthropic/claude-3.5-sonnet', + messages: [{ role: 'user', content: 'Hello' }], + tools: [ + { + type: 'function', + function: { + name: 'test-tool', + description: 'Test tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', + }, + }, + }, + ], + tool_choice: { + type: 'function', + function: { name: 'test-tool' }, + }, + }); + }); + + it('should pass headers', async () => { + prepareJsonResponse({ content: '' }); + + const provider = createOpenRouter({ + apiKey: 'test-api-key', + headers: { + 'Custom-Provider-Header': 'provider-header-value', + }, + }); + + await provider.chat('openai/gpt-3.5-turbo').doGenerate({ + prompt: TEST_PROMPT, + headers: { + 'Custom-Request-Header': 'request-header-value', + }, + }); + + const requestHeaders = server.calls[0]!.requestHeaders; + + expect(requestHeaders).toMatchObject({ + authorization: 'Bearer test-api-key', + 'content-type': 'application/json', + 'custom-provider-header': 'provider-header-value', + 'custom-request-header': 'request-header-value', + }); + expect(requestHeaders['user-agent']).toContain( + 'ai-sdk/openrouter/0.0.0-test', + ); + }); + + it('should pass responseFormat for JSON schema structured outputs', async () => { + prepareJsonResponse({ content: '{"name": "John", "age": 30}' }); + + const testSchema: JSONSchema7 = { + type: 'object', + properties: { + name: { type: 'string' }, + age: { type: 'number' }, + }, + required: ['name', 'age'], + additionalProperties: false, + }; + + await model.doGenerate({ + prompt: TEST_PROMPT, + responseFormat: { + type: 'json', + schema: testSchema, + name: 'PersonResponse', + description: 'A person object', + }, + }); + + expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ + model: 'anthropic/claude-3.5-sonnet', + messages: [{ role: 'user', content: 'Hello' }], + response_format: { + type: 'json_schema', + json_schema: { + schema: testSchema, + strict: true, + name: 'PersonResponse', + description: 'A person object', + }, + }, + }); + }); + + it('should use default name when name is not provided in responseFormat', async () => { + prepareJsonResponse({ content: '{"name": "John", "age": 30}' }); + + const testSchema: JSONSchema7 = { + type: 'object', + properties: { + name: { type: 'string' }, + age: { type: 'number' }, + }, + required: ['name', 'age'], + additionalProperties: false, + }; + + await model.doGenerate({ + prompt: TEST_PROMPT, + responseFormat: { + type: 'json', + schema: testSchema, + }, + }); + + expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ + model: 'anthropic/claude-3.5-sonnet', + messages: [{ role: 'user', content: 'Hello' }], + response_format: { + type: 'json_schema', + json_schema: { + schema: testSchema, + strict: true, + name: 'response', + }, + }, + }); + }); + + it('should pass images', async () => { + prepareJsonResponse({ + content: '', + images: [ + { + type: 'image_url', + image_url: { url: TEST_IMAGE_URL }, + }, + ], + usage: { prompt_tokens: 53, total_tokens: 70, completion_tokens: 17 }, + }); + + const result = await model.doGenerate({ + prompt: TEST_PROMPT, + }); + + expect(result.content).toStrictEqual([ + { + type: 'file', + mediaType: 'image/png', + data: TEST_IMAGE_BASE64, + }, + ]); + }); +}); + +describe('doStream', () => { + const server = createTestServer({ + 'https://openrouter.ai/api/v1/chat/completions': { + response: { type: 'json-value', body: {} }, + }, + }); + + function prepareStreamResponse({ + content, + usage = { + prompt_tokens: 17, + total_tokens: 244, + completion_tokens: 227, + }, + logprobs = null, + finish_reason = 'stop', + }: { + content: string[]; + usage?: { + prompt_tokens: number; + total_tokens: number; + completion_tokens: number; + prompt_tokens_details?: { + cached_tokens: number; + }; + completion_tokens_details?: { + reasoning_tokens: number; + }; + cost?: number; + cost_details?: { + upstream_inference_cost: number; + }; + }; + logprobs?: { + content: + | { + token: string; + logprob: number; + top_logprobs: { token: string; logprob: number }[]; + }[] + | null; + } | null; + finish_reason?: string; + }) { + server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { + type: 'stream-chunks', + chunks: [ + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` + + `"system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null}]}\n\n`, + ...content.flatMap((text) => { + return `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":"${text}"},"finish_reason":null}]}\n\n`; + }), + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"finish_reason":"${finish_reason}","logprobs":${JSON.stringify( + logprobs, + )}}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":${JSON.stringify( + usage, + )}}\n\n`, + 'data: [DONE]\n\n', + ], + }; + } + + it('should stream text deltas', async () => { + prepareStreamResponse({ + content: ['Hello', ', ', 'World!'], + finish_reason: 'stop', + usage: { + prompt_tokens: 17, + total_tokens: 244, + completion_tokens: 227, + }, + logprobs: TEST_LOGPROBS, + }); + + const { stream } = await model.doStream({ + prompt: TEST_PROMPT, + }); + + // note: space moved to last chunk bc of trimming + const elements = await convertReadableStreamToArray(stream); + expect(elements).toStrictEqual([ + { + type: 'response-metadata', + id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + }, + { + type: 'response-metadata', + modelId: 'gpt-3.5-turbo-0613', + }, + { + type: 'response-metadata', + id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + }, + { + type: 'response-metadata', + modelId: 'gpt-3.5-turbo-0613', + }, + { type: 'text-start', id: expect.any(String) }, + { type: 'text-delta', delta: 'Hello', id: expect.any(String) }, + { + type: 'response-metadata', + id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + }, + { + type: 'response-metadata', + modelId: 'gpt-3.5-turbo-0613', + }, + { type: 'text-delta', delta: ', ', id: expect.any(String) }, + { + type: 'response-metadata', + id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + }, + { + type: 'response-metadata', + modelId: 'gpt-3.5-turbo-0613', + }, + { type: 'text-delta', delta: 'World!', id: expect.any(String) }, + { + type: 'response-metadata', + id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + }, + { + type: 'response-metadata', + modelId: 'gpt-3.5-turbo-0613', + }, + { + type: 'response-metadata', + id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + }, + { + type: 'response-metadata', + modelId: 'gpt-3.5-turbo-0613', + }, + { + type: 'text-end', + id: expect.any(String), + }, + { + type: 'finish', + finishReason: { unified: 'stop', raw: 'stop' }, + + providerMetadata: { + openrouter: { + usage: { + completionTokens: 227, + promptTokens: 17, + totalTokens: 244, + cost: undefined, + }, + }, + }, + usage: { + inputTokens: { + total: 17, + noCache: undefined, + cacheRead: undefined, + cacheWrite: undefined, + }, + outputTokens: { + total: 227, + text: undefined, + reasoning: undefined, + }, + }, + }, + ]); + }); + + it('should include upstream inference cost in finish metadata when provided', async () => { + prepareStreamResponse({ + content: ['Hello'], + usage: { + prompt_tokens: 17, + total_tokens: 244, + completion_tokens: 227, + cost_details: { + upstream_inference_cost: 0.0036, + }, + }, + }); + + const { stream } = await model.doStream({ + prompt: TEST_PROMPT, + }); + + const elements = (await convertReadableStreamToArray( + stream, + )) as LanguageModelV3StreamPart[]; + const finishChunk = elements.find( + ( + chunk, + ): chunk is Extract => + chunk.type === 'finish', + ); + const openrouterUsage = ( + finishChunk?.providerMetadata?.openrouter as { + usage?: { + cost?: number; + costDetails?: { upstreamInferenceCost: number }; + }; + } + )?.usage; + expect(openrouterUsage?.costDetails).toStrictEqual({ + upstreamInferenceCost: 0.0036, + }); + }); + + it('should handle both normal cost and upstream inference cost in finish metadata when both are provided', async () => { + prepareStreamResponse({ + content: ['Hello'], + usage: { + prompt_tokens: 17, + total_tokens: 244, + completion_tokens: 227, + cost: 0.0042, + cost_details: { + upstream_inference_cost: 0.0036, + }, + }, + }); + + const { stream } = await model.doStream({ + prompt: TEST_PROMPT, + }); + + const elements = (await convertReadableStreamToArray( + stream, + )) as LanguageModelV3StreamPart[]; + const finishChunk = elements.find( + ( + chunk, + ): chunk is Extract => + chunk.type === 'finish', + ); + const openrouterUsage = ( + finishChunk?.providerMetadata?.openrouter as { + usage?: { + cost?: number; + costDetails?: { upstreamInferenceCost: number }; + }; + } + )?.usage; + expect(openrouterUsage?.costDetails).toStrictEqual({ + upstreamInferenceCost: 0.0036, + }); + expect(openrouterUsage?.cost).toBe(0.0042); + }); + + it('should prioritize reasoning_details over reasoning when both are present in streaming', async () => { + // This test verifies that when the API returns both 'reasoning' and 'reasoning_details' fields, + // we prioritize reasoning_details and ignore the reasoning field to avoid duplicates. + server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { + type: 'stream-chunks', + chunks: [ + // First chunk: both reasoning and reasoning_details with different content + `data: {"id":"chatcmpl-reasoning","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"role":"assistant","content":"",` + + `"reasoning":"This should be ignored...",` + + `"reasoning_details":[{"type":"${ReasoningDetailType.Text}","text":"Let me think about this..."}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + // Second chunk: reasoning_details with multiple types + `data: {"id":"chatcmpl-reasoning","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{` + + `"reasoning":"Also ignored",` + + `"reasoning_details":[{"type":"${ReasoningDetailType.Summary}","summary":"User wants a greeting"},{"type":"${ReasoningDetailType.Encrypted}","data":"secret"}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + // Third chunk: only reasoning field (should be processed) + `data: {"id":"chatcmpl-reasoning","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{` + + `"reasoning":"This reasoning is used"},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + // Content chunk + `data: {"id":"chatcmpl-reasoning","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"content":"Hello!"},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + // Finish chunk + `data: {"id":"chatcmpl-reasoning","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{},` + + `"logprobs":null,"finish_reason":"stop"}]}\n\n`, + `data: {"id":"chatcmpl-reasoning","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":17,"completion_tokens":30,"total_tokens":47}}\n\n`, + 'data: [DONE]\n\n', + ], + }; + + const { stream } = await model.doStream({ + prompt: TEST_PROMPT, + }); + + const elements = await convertReadableStreamToArray(stream); + + // Filter for reasoning-related elements + const reasoningElements = elements.filter( + (el) => + el.type === 'reasoning-start' || + el.type === 'reasoning-delta' || + el.type === 'reasoning-end', + ); + + // Debug output to see what we're getting + // console.log('Reasoning elements count:', reasoningElements.length); + // console.log('Reasoning element types:', reasoningElements.map(el => el.type)); + + // We should get reasoning content from reasoning_details when present, not reasoning field + // start + 4 deltas (text, summary, encrypted, reasoning-only) + end = 6 + expect(reasoningElements).toHaveLength(6); + + // Verify the content comes from reasoning_details, not reasoning field + const reasoningDeltas = reasoningElements + .filter(isReasoningDeltaPart) + .map((el) => el.delta); + + expect(reasoningDeltas).toEqual([ + 'Let me think about this...', // from reasoning_details text + 'User wants a greeting', // from reasoning_details summary + '[REDACTED]', // from reasoning_details encrypted + 'This reasoning is used', // from reasoning field (no reasoning_details) + ]); + + // Verify that "This should be ignored..." and "Also ignored" are NOT in the output + expect(reasoningDeltas).not.toContain('This should be ignored...'); + expect(reasoningDeltas).not.toContain('Also ignored'); + + // Verify that reasoning-delta chunks include providerMetadata with reasoning_details + const reasoningDeltaElements = elements.filter(isReasoningDeltaPart); + + // First delta should have reasoning_details from first chunk + expect(reasoningDeltaElements[0]?.providerMetadata).toEqual({ + openrouter: { + reasoning_details: [ + { + type: ReasoningDetailType.Text, + text: 'Let me think about this...', + }, + ], + }, + }); + + // Second and third deltas should have reasoning_details from second chunk + expect(reasoningDeltaElements[1]?.providerMetadata).toEqual({ + openrouter: { + reasoning_details: [ + { + type: ReasoningDetailType.Summary, + summary: 'User wants a greeting', + }, + { + type: ReasoningDetailType.Encrypted, + data: 'secret', + }, + ], + }, + }); + + expect(reasoningDeltaElements[2]?.providerMetadata).toEqual({ + openrouter: { + reasoning_details: [ + { + type: ReasoningDetailType.Summary, + summary: 'User wants a greeting', + }, + { + type: ReasoningDetailType.Encrypted, + data: 'secret', + }, + ], + }, + }); + + // Fourth delta (from reasoning field only) should not have providerMetadata + expect(reasoningDeltaElements[3]?.providerMetadata).toBeUndefined(); + }); + + it('should emit reasoning_details in providerMetadata for all reasoning delta chunks', async () => { + // This test verifies that reasoning_details are included in providerMetadata + // for all reasoning-delta chunks, enabling users to accumulate them for multi-turn conversations + server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { + type: 'stream-chunks', + chunks: [ + // First chunk: reasoning_details with Text type + `data: {"id":"chatcmpl-metadata-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"role":"assistant","content":"",` + + `"reasoning_details":[{"type":"${ReasoningDetailType.Text}","text":"First reasoning chunk"}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + // Second chunk: reasoning_details with Summary type + `data: {"id":"chatcmpl-metadata-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{` + + `"reasoning_details":[{"type":"${ReasoningDetailType.Summary}","summary":"Summary reasoning"}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + // Third chunk: reasoning_details with Encrypted type + `data: {"id":"chatcmpl-metadata-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{` + + `"reasoning_details":[{"type":"${ReasoningDetailType.Encrypted}","data":"encrypted_data"}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + // Finish chunk + `data: {"id":"chatcmpl-metadata-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{},` + + `"logprobs":null,"finish_reason":"stop"}]}\n\n`, + `data: {"id":"chatcmpl-metadata-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":17,"completion_tokens":30,"total_tokens":47}}\n\n`, + 'data: [DONE]\n\n', + ], + }; + + const { stream } = await model.doStream({ + prompt: TEST_PROMPT, + }); + + const elements = await convertReadableStreamToArray(stream); + + const reasoningDeltaElements = elements.filter(isReasoningDeltaPart); + + expect(reasoningDeltaElements).toHaveLength(3); + + // Verify each delta has the correct reasoning_details in providerMetadata + expect(reasoningDeltaElements[0]?.providerMetadata).toEqual({ + openrouter: { + reasoning_details: [ + { + type: ReasoningDetailType.Text, + text: 'First reasoning chunk', + }, + ], + }, + }); + + expect(reasoningDeltaElements[1]?.providerMetadata).toEqual({ + openrouter: { + reasoning_details: [ + { + type: ReasoningDetailType.Summary, + summary: 'Summary reasoning', + }, + ], + }, + }); + + expect(reasoningDeltaElements[2]?.providerMetadata).toEqual({ + openrouter: { + reasoning_details: [ + { + type: ReasoningDetailType.Encrypted, + data: 'encrypted_data', + }, + ], + }, + }); + + // Verify reasoning-start also has providerMetadata when first delta includes it + const reasoningStart = elements.find(isReasoningStartPart); + + expect(reasoningStart?.providerMetadata).toEqual({ + openrouter: { + reasoning_details: [ + { + type: ReasoningDetailType.Text, + text: 'First reasoning chunk', + }, + ], + }, + }); + }); + + it('should maintain correct reasoning order when content comes after reasoning (issue #7824)', async () => { + // This test reproduces the issue where reasoning appears first but then gets "pushed down" + // by content that comes later in the stream + server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { + type: 'stream-chunks', + chunks: [ + // First chunk: Start with reasoning + `data: {"id":"chatcmpl-order-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"role":"assistant",` + + `"reasoning":"I need to think about this step by step..."},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + // Second chunk: More reasoning + `data: {"id":"chatcmpl-order-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{` + + `"reasoning":" First, I should analyze the request."},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + // Third chunk: Even more reasoning + `data: {"id":"chatcmpl-order-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{` + + `"reasoning":" Then I should provide a helpful response."},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + // Fourth chunk: Content starts + `data: {"id":"chatcmpl-order-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"content":"Hello! "},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + // Fifth chunk: More content + `data: {"id":"chatcmpl-order-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"content":"How can I help you today?"},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + // Finish chunk + `data: {"id":"chatcmpl-order-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{},` + + `"logprobs":null,"finish_reason":"stop"}]}\n\n`, + `data: {"id":"chatcmpl-order-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":17,"completion_tokens":30,"total_tokens":47}}\n\n`, + 'data: [DONE]\n\n', + ], + }; + + const { stream } = await model.doStream({ + prompt: TEST_PROMPT, + }); + + const elements = await convertReadableStreamToArray(stream); + + // The expected order should be: + // 1. reasoning-start + // 2. reasoning-delta (3 times) + // 3. reasoning-end (when text starts) + // 4. text-start + // 5. text-delta (2 times) + // 6. text-end (when stream finishes) + + const streamOrder = elements.map((el) => el.type); + + // Find the positions of key events + const reasoningStartIndex = streamOrder.indexOf('reasoning-start'); + const reasoningEndIndex = streamOrder.indexOf('reasoning-end'); + const textStartIndex = streamOrder.indexOf('text-start'); + + // Reasoning should come before text and end before text starts + expect(reasoningStartIndex).toBeLessThan(textStartIndex); + expect(reasoningEndIndex).toBeLessThan(textStartIndex); + + // Verify reasoning content + const reasoningDeltas = elements + .filter(isReasoningDeltaPart) + .map((el) => el.delta); + + expect(reasoningDeltas).toEqual([ + 'I need to think about this step by step...', + ' First, I should analyze the request.', + ' Then I should provide a helpful response.', + ]); + + // Verify text content + const textDeltas = elements.filter(isTextDeltaPart).map((el) => el.delta); + + expect(textDeltas).toEqual(['Hello! ', 'How can I help you today?']); + }); + + it('should stream tool deltas', async () => { + server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { + type: 'stream-chunks', + chunks: [ + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"role":"assistant","content":null,` + + `"tool_calls":[{"index":0,"id":"call_O17Uplv4lJvD6DVdIvFFeRMw","type":"function","function":{"name":"test-tool","arguments":""}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\\""}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"value"}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\\":\\""}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Spark"}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"le"}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" Day"}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\\"}"}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":53,"completion_tokens":17,"total_tokens":70}}\n\n`, + 'data: [DONE]\n\n', + ], + }; + + const { stream } = await model.doStream({ + tools: [ + { + type: 'function', + name: 'test-tool', + inputSchema: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', + }, + }, + ], + prompt: TEST_PROMPT, + }); + + expect(await convertReadableStreamToArray(stream)).toStrictEqual([ + { + type: 'response-metadata', + id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + }, + { + type: 'response-metadata', + modelId: 'gpt-3.5-turbo-0125', + }, + { + type: 'response-metadata', + id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + }, + { + type: 'response-metadata', + modelId: 'gpt-3.5-turbo-0125', + }, + { + id: 'call_O17Uplv4lJvD6DVdIvFFeRMw', + toolName: 'test-tool', + type: 'tool-input-start', + }, + { + type: 'tool-input-delta', + id: 'call_O17Uplv4lJvD6DVdIvFFeRMw', + delta: '{"', + }, + { + type: 'response-metadata', + id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + }, + { + type: 'response-metadata', + modelId: 'gpt-3.5-turbo-0125', + }, + { + type: 'tool-input-delta', + id: 'call_O17Uplv4lJvD6DVdIvFFeRMw', + delta: 'value', + }, + { + type: 'response-metadata', + id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + }, + { + type: 'response-metadata', + modelId: 'gpt-3.5-turbo-0125', + }, + { + type: 'tool-input-delta', + id: 'call_O17Uplv4lJvD6DVdIvFFeRMw', + delta: '":"', + }, + { + type: 'response-metadata', + id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + }, + { + type: 'response-metadata', + modelId: 'gpt-3.5-turbo-0125', + }, + { + type: 'tool-input-delta', + id: 'call_O17Uplv4lJvD6DVdIvFFeRMw', + delta: 'Spark', + }, + { + type: 'response-metadata', + id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + }, + { + type: 'response-metadata', + modelId: 'gpt-3.5-turbo-0125', + }, + { + type: 'tool-input-delta', + id: 'call_O17Uplv4lJvD6DVdIvFFeRMw', + delta: 'le', + }, + { + type: 'response-metadata', + id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + }, + { + type: 'response-metadata', + modelId: 'gpt-3.5-turbo-0125', + }, + { + type: 'tool-input-delta', + id: 'call_O17Uplv4lJvD6DVdIvFFeRMw', + delta: ' Day', + }, + { + type: 'response-metadata', + id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + }, + { + type: 'response-metadata', + modelId: 'gpt-3.5-turbo-0125', + }, + { + type: 'tool-input-delta', + id: 'call_O17Uplv4lJvD6DVdIvFFeRMw', + delta: '"}', + }, + { + type: 'tool-call', + toolCallId: 'call_O17Uplv4lJvD6DVdIvFFeRMw', + toolName: 'test-tool', + input: '{"value":"Sparkle Day"}', + providerMetadata: { + openrouter: { + reasoning_details: [], + }, + }, + }, + { + type: 'response-metadata', + id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + }, + { + type: 'response-metadata', + modelId: 'gpt-3.5-turbo-0125', + }, + { + type: 'response-metadata', + id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + }, + { + type: 'response-metadata', + modelId: 'gpt-3.5-turbo-0125', + }, + { + type: 'finish', + finishReason: { unified: 'tool-calls', raw: 'tool_calls' }, + providerMetadata: { + openrouter: { + usage: { + completionTokens: 17, + promptTokens: 53, + totalTokens: 70, + cost: undefined, + }, + }, + }, + usage: { + inputTokens: { + total: 53, + noCache: undefined, + cacheRead: undefined, + cacheWrite: undefined, + }, + outputTokens: { + total: 17, + text: undefined, + reasoning: undefined, + }, + }, + }, + ]); + }); + + it('should stream tool call that is sent in one chunk', async () => { + server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { + type: 'stream-chunks', + chunks: [ + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"role":"assistant","content":null,` + + `"tool_calls":[{"index":0,"id":"call_O17Uplv4lJvD6DVdIvFFeRMw","type":"function","function":{"name":"test-tool","arguments":"{\\"value\\":\\"Sparkle Day\\"}"}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":53,"completion_tokens":17,"total_tokens":70}}\n\n`, + 'data: [DONE]\n\n', + ], + }; + + const { stream } = await model.doStream({ + tools: [ + { + type: 'function', + name: 'test-tool', + inputSchema: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', + }, + }, + ], + prompt: TEST_PROMPT, + }); + + const elements = await convertReadableStreamToArray(stream); + expect(elements).toStrictEqual([ + { + type: 'response-metadata', + id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + }, + { + type: 'response-metadata', + modelId: 'gpt-3.5-turbo-0125', + }, + { + type: 'tool-input-start', + id: 'call_O17Uplv4lJvD6DVdIvFFeRMw', + toolName: 'test-tool', + }, + { + type: 'tool-input-delta', + id: 'call_O17Uplv4lJvD6DVdIvFFeRMw', + delta: '{"value":"Sparkle Day"}', + }, + { + type: 'tool-input-end', + id: 'call_O17Uplv4lJvD6DVdIvFFeRMw', + }, + { + type: 'tool-call', + toolCallId: 'call_O17Uplv4lJvD6DVdIvFFeRMw', + toolName: 'test-tool', + input: '{"value":"Sparkle Day"}', + providerMetadata: { + openrouter: { + reasoning_details: [], + }, + }, + }, + { + type: 'response-metadata', + id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + }, + { + type: 'response-metadata', + modelId: 'gpt-3.5-turbo-0125', + }, + { + type: 'response-metadata', + id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + }, + { + type: 'response-metadata', + modelId: 'gpt-3.5-turbo-0125', + }, + { + type: 'finish', + finishReason: { unified: 'tool-calls', raw: 'tool_calls' }, + providerMetadata: { + openrouter: { + usage: { + completionTokens: 17, + promptTokens: 53, + totalTokens: 70, + cost: undefined, + }, + }, + }, + usage: { + inputTokens: { + total: 53, + noCache: undefined, + cacheRead: undefined, + cacheWrite: undefined, + }, + outputTokens: { + total: 17, + text: undefined, + reasoning: undefined, + }, + }, + }, + ]); + }); + + it('should override finishReason to tool-calls in streaming when tool calls and encrypted reasoning are present', async () => { + server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { + type: 'stream-chunks', + chunks: [ + // First chunk: reasoning_details with encrypted data + `data: {"id":"chatcmpl-gemini3","object":"chat.completion.chunk","created":1711357598,"model":"google/gemini-3-pro",` + + `"system_fingerprint":"fp_gemini3","choices":[{"index":0,"delta":{"role":"assistant","content":null,` + + `"reasoning_details":[{"type":"reasoning.encrypted","data":"encrypted_thoughtsig_data"}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + // Second chunk: tool call + `data: {"id":"chatcmpl-gemini3","object":"chat.completion.chunk","created":1711357598,"model":"google/gemini-3-pro",` + + `"system_fingerprint":"fp_gemini3","choices":[{"index":0,"delta":{` + + `"tool_calls":[{"index":0,"id":"call_gemini3_123","type":"function","function":{"name":"get_weather","arguments":"{\\"location\\":\\"SF\\"}"}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + // Final chunk: finish_reason is "stop" (Gemini 3 bug) - should be overridden to "tool-calls" + `data: {"id":"chatcmpl-gemini3","object":"chat.completion.chunk","created":1711357598,"model":"google/gemini-3-pro",` + + `"system_fingerprint":"fp_gemini3","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}\n\n`, + `data: {"id":"chatcmpl-gemini3","object":"chat.completion.chunk","created":1711357598,"model":"google/gemini-3-pro",` + + `"system_fingerprint":"fp_gemini3","choices":[],"usage":{"prompt_tokens":10,"completion_tokens":20,"total_tokens":30}}\n\n`, + 'data: [DONE]\n\n', + ], + }; + + const { stream } = await model.doStream({ + tools: [ + { + type: 'function', + name: 'get_weather', + inputSchema: { + type: 'object', + properties: { location: { type: 'string' } }, + required: ['location'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', + }, + }, + ], + prompt: TEST_PROMPT, + }); + + const elements = await convertReadableStreamToArray(stream); + + // Find the finish event + const finishEvent = elements.find( + (el): el is LanguageModelV3StreamPart & { type: 'finish' } => + el.type === 'finish', + ); + + // Should override to 'tool-calls' when encrypted reasoning + tool calls + stop + expect(finishEvent?.finishReason).toStrictEqual({ + unified: 'tool-calls', + raw: 'stop', + }); + + // Should have the tool call + const toolCallEvent = elements.find( + (el): el is LanguageModelV3StreamPart & { type: 'tool-call' } => + el.type === 'tool-call', + ); + expect(toolCallEvent?.toolName).toBe('get_weather'); + expect(toolCallEvent?.toolCallId).toBe('call_gemini3_123'); + }); + + it('should stream images', async () => { + server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { + type: 'stream-chunks', + chunks: [ + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"role":"assistant","content":"",` + + `"images":[{"type":"image_url","image_url":{"url":"${TEST_IMAGE_URL}"},"index":0}]},` + + `"logprobs":null,"finish_reason":"stop"}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":53,"completion_tokens":17,"total_tokens":70}}\n\n`, + 'data: [DONE]\n\n', + ], + }; + + const { stream } = await model.doStream({ + prompt: TEST_PROMPT, + }); + + expect(await convertReadableStreamToArray(stream)).toStrictEqual([ + { + type: 'response-metadata', + id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + }, + { + type: 'response-metadata', + modelId: 'gpt-3.5-turbo-0125', + }, + { + type: 'file', + mediaType: 'image/png', + data: TEST_IMAGE_BASE64, + }, + { + type: 'response-metadata', + id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + }, + { + type: 'response-metadata', + modelId: 'gpt-3.5-turbo-0125', + }, + { + type: 'finish', + finishReason: { unified: 'stop', raw: 'stop' }, + providerMetadata: { + openrouter: { + usage: { + completionTokens: 17, + promptTokens: 53, + totalTokens: 70, + cost: undefined, + }, + }, + }, + usage: { + inputTokens: { + total: 53, + noCache: undefined, + cacheRead: undefined, + cacheWrite: undefined, + }, + outputTokens: { + total: 17, + text: undefined, + reasoning: undefined, + }, + }, + }, + ]); + }); + + it('should handle error stream parts', async () => { + server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { + type: 'stream-chunks', + chunks: [ + `data: {"error":{"message": "The server had an error processing your request. Sorry about that! You can retry your request, or contact us through our ` + + `help center at help.openrouter.com if you keep seeing this error.","type":"server_error","param":null,"code":null}}\n\n`, + 'data: [DONE]\n\n', + ], + }; + + const { stream } = await model.doStream({ + prompt: TEST_PROMPT, + }); + + expect(await convertReadableStreamToArray(stream)).toStrictEqual([ + { + type: 'error', + error: { + message: + 'The server had an error processing your request. Sorry about that! ' + + 'You can retry your request, or contact us through our help center at ' + + 'help.openrouter.com if you keep seeing this error.', + type: 'server_error', + code: null, + param: null, + }, + }, + { + finishReason: { unified: 'error', raw: undefined }, + providerMetadata: { + openrouter: { + usage: {}, + }, + }, + type: 'finish', + usage: { + inputTokens: { + total: undefined, + noCache: undefined, + cacheRead: undefined, + cacheWrite: undefined, + }, + outputTokens: { + total: undefined, + text: undefined, + reasoning: undefined, + }, + }, + }, + ]); + }); + + it('should handle unparsable stream parts', async () => { + server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { + type: 'stream-chunks', + chunks: ['data: {unparsable}\n\n', 'data: [DONE]\n\n'], + }; + + const { stream } = await model.doStream({ + prompt: TEST_PROMPT, + }); + + const elements = await convertReadableStreamToArray(stream); + + expect(elements.length).toBe(2); + expect(elements[0]?.type).toBe('error'); + expect(elements[1]).toStrictEqual({ + finishReason: { unified: 'error', raw: undefined }, + + type: 'finish', + providerMetadata: { + openrouter: { + usage: {}, + }, + }, + usage: { + inputTokens: { + total: undefined, + noCache: undefined, + cacheRead: undefined, + cacheWrite: undefined, + }, + outputTokens: { + total: undefined, + text: undefined, + reasoning: undefined, + }, + }, + }); + }); + + it('should pass the messages and the model', async () => { + prepareStreamResponse({ content: [] }); + + await model.doStream({ + prompt: TEST_PROMPT, + }); + + expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ + stream: true, + stream_options: { include_usage: true }, + model: 'anthropic/claude-3.5-sonnet', + messages: [{ role: 'user', content: 'Hello' }], + }); + }); + + it('should pass headers', async () => { + prepareStreamResponse({ content: [] }); + + const provider = createOpenRouter({ + apiKey: 'test-api-key', + headers: { + 'Custom-Provider-Header': 'provider-header-value', + }, + }); + + await provider.chat('openai/gpt-3.5-turbo').doStream({ + prompt: TEST_PROMPT, + headers: { + 'Custom-Request-Header': 'request-header-value', + }, + }); + + const requestHeaders = server.calls[0]!.requestHeaders; + + expect(requestHeaders).toMatchObject({ + authorization: 'Bearer test-api-key', + 'content-type': 'application/json', + 'custom-provider-header': 'provider-header-value', + 'custom-request-header': 'request-header-value', + }); + expect(requestHeaders['user-agent']).toContain( + 'ai-sdk/openrouter/0.0.0-test', + ); + }); + + it('should pass extra body', async () => { + prepareStreamResponse({ content: [] }); + + const provider = createOpenRouter({ + apiKey: 'test-api-key', + extraBody: { + custom_field: 'custom_value', + providers: { + anthropic: { + custom_field: 'custom_value', + }, + }, + }, + }); + + await provider.chat('anthropic/claude-3.5-sonnet').doStream({ + prompt: TEST_PROMPT, + }); + + const requestBody = await server.calls[0]!.requestBodyJson; + + expect(requestBody).toHaveProperty('custom_field', 'custom_value'); + expect(requestBody).toHaveProperty( + 'providers.anthropic.custom_field', + 'custom_value', + ); + }); + + it('should pass responseFormat for JSON schema structured outputs', async () => { + prepareStreamResponse({ content: ['{"name": "John", "age": 30}'] }); + + const testSchema: JSONSchema7 = { + type: 'object', + properties: { + name: { type: 'string' }, + age: { type: 'number' }, + }, + required: ['name', 'age'], + additionalProperties: false, + }; + + await model.doStream({ + prompt: TEST_PROMPT, + responseFormat: { + type: 'json', + schema: testSchema, + name: 'PersonResponse', + description: 'A person object', + }, + }); + + expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ + stream: true, + stream_options: { include_usage: true }, + model: 'anthropic/claude-3.5-sonnet', + messages: [{ role: 'user', content: 'Hello' }], + response_format: { + type: 'json_schema', + json_schema: { + schema: testSchema, + strict: true, + name: 'PersonResponse', + description: 'A person object', + }, + }, + }); + }); + + it('should pass responseFormat AND tools together', async () => { + prepareStreamResponse({ content: ['{"name": "John", "age": 30}'] }); + + const testSchema: JSONSchema7 = { + type: 'object', + properties: { + name: { type: 'string' }, + age: { type: 'number' }, + }, + required: ['name', 'age'], + additionalProperties: false, + }; + + await model.doStream({ + prompt: TEST_PROMPT, + responseFormat: { + type: 'json', + schema: testSchema, + name: 'PersonResponse', + description: 'A person object', + }, + tools: [ + { + type: 'function', + name: 'test-tool', + description: 'Test tool', + inputSchema: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', + }, + }, + ], + toolChoice: { + type: 'tool', + toolName: 'test-tool', + }, + }); + + expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ + stream: true, + stream_options: { include_usage: true }, + model: 'anthropic/claude-3.5-sonnet', + messages: [{ role: 'user', content: 'Hello' }], + response_format: { + type: 'json_schema', + json_schema: { + schema: testSchema, + strict: true, + name: 'PersonResponse', + description: 'A person object', + }, + }, + tools: [ + { + type: 'function', + function: { + name: 'test-tool', + description: 'Test tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', + }, + }, + }, + ], + tool_choice: { + type: 'function', + function: { name: 'test-tool' }, + }, + }); + }); + + it('should pass debug settings', async () => { + prepareStreamResponse({ content: ['Hello'] }); + + const debugModel = provider.chat('anthropic/claude-3.5-sonnet', { + debug: { + echo_upstream_body: true, + }, + }); + + await debugModel.doStream({ + prompt: TEST_PROMPT, + }); + + expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ + stream: true, + stream_options: { include_usage: true }, + model: 'anthropic/claude-3.5-sonnet', + messages: [{ role: 'user', content: 'Hello' }], + debug: { + echo_upstream_body: true, + }, + }); + }); + + it('should include file annotations in finish metadata when streamed', async () => { + // This test verifies that file annotations from FileParserPlugin are accumulated + // during streaming and included in the finish event's providerMetadata + server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { + type: 'stream-chunks', + chunks: [ + // First chunk with role and content + `data: {"id":"chatcmpl-file-annotations","object":"chat.completion.chunk","created":1711357598,"model":"gpt-4o-mini",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"role":"assistant","content":"The title is Bitcoin."},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + // Chunk with file annotation + `data: {"id":"chatcmpl-file-annotations","object":"chat.completion.chunk","created":1711357598,"model":"gpt-4o-mini",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{` + + `"annotations":[{"type":"file","file":{"hash":"abc123def456","name":"bitcoin.pdf","content":[{"type":"text","text":"Page 1 content"},{"type":"text","text":"Page 2 content"}]}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + // Finish chunk + `data: {"id":"chatcmpl-file-annotations","object":"chat.completion.chunk","created":1711357598,"model":"gpt-4o-mini",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{},` + + `"logprobs":null,"finish_reason":"stop"}]}\n\n`, + `data: {"id":"chatcmpl-file-annotations","object":"chat.completion.chunk","created":1711357598,"model":"gpt-4o-mini",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":100,"completion_tokens":20,"total_tokens":120}}\n\n`, + 'data: [DONE]\n\n', + ], + }; + + const { stream } = await model.doStream({ + prompt: TEST_PROMPT, + }); + + const elements = (await convertReadableStreamToArray( + stream, + )) as LanguageModelV3StreamPart[]; + + // Find the finish chunk + const finishChunk = elements.find( + ( + chunk, + ): chunk is Extract => + chunk.type === 'finish', + ); + + expect(finishChunk).toBeDefined(); + + // Verify file annotations are included in providerMetadata + const openrouterMetadata = finishChunk?.providerMetadata?.openrouter as { + annotations?: Array<{ + type: 'file'; + file: { + hash: string; + name: string; + content?: Array<{ type: string; text?: string }>; + }; + }>; + }; + + expect(openrouterMetadata?.annotations).toStrictEqual([ + { + type: 'file', + file: { + hash: 'abc123def456', + name: 'bitcoin.pdf', + content: [ + { type: 'text', text: 'Page 1 content' }, + { type: 'text', text: 'Page 2 content' }, + ], + }, + }, + ]); + }); + + it('should accumulate multiple file annotations from stream', async () => { + // This test verifies that multiple file annotations are accumulated correctly + server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { + type: 'stream-chunks', + chunks: [ + // First chunk with content + `data: {"id":"chatcmpl-multi-files","object":"chat.completion.chunk","created":1711357598,"model":"gpt-4o-mini",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"role":"assistant","content":"Comparing two documents."},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + // First file annotation + `data: {"id":"chatcmpl-multi-files","object":"chat.completion.chunk","created":1711357598,"model":"gpt-4o-mini",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{` + + `"annotations":[{"type":"file","file":{"hash":"hash1","name":"doc1.pdf","content":[{"type":"text","text":"Doc 1"}]}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + // Second file annotation + `data: {"id":"chatcmpl-multi-files","object":"chat.completion.chunk","created":1711357598,"model":"gpt-4o-mini",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{` + + `"annotations":[{"type":"file","file":{"hash":"hash2","name":"doc2.pdf","content":[{"type":"text","text":"Doc 2"}]}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + // Finish chunk + `data: {"id":"chatcmpl-multi-files","object":"chat.completion.chunk","created":1711357598,"model":"gpt-4o-mini",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{},` + + `"logprobs":null,"finish_reason":"stop"}]}\n\n`, + `data: {"id":"chatcmpl-multi-files","object":"chat.completion.chunk","created":1711357598,"model":"gpt-4o-mini",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":100,"completion_tokens":20,"total_tokens":120}}\n\n`, + 'data: [DONE]\n\n', + ], + }; + + const { stream } = await model.doStream({ + prompt: TEST_PROMPT, + }); + + const elements = (await convertReadableStreamToArray( + stream, + )) as LanguageModelV3StreamPart[]; + + const finishChunk = elements.find( + ( + chunk, + ): chunk is Extract => + chunk.type === 'finish', + ); + + const openrouterMetadata = finishChunk?.providerMetadata?.openrouter as { + annotations?: Array<{ + type: 'file'; + file: { + hash: string; + name: string; + content?: Array<{ type: string; text?: string }>; + }; + }>; + }; + + // Both file annotations should be accumulated + expect(openrouterMetadata?.annotations).toHaveLength(2); + expect(openrouterMetadata?.annotations?.[0]?.file.hash).toBe('hash1'); + expect(openrouterMetadata?.annotations?.[1]?.file.hash).toBe('hash2'); + }); +}); + +describe('debug settings', () => { + const server = createTestServer({ + 'https://openrouter.ai/api/v1/chat/completions': { + response: { type: 'json-value', body: {} }, + }, + }); + + function prepareJsonResponse({ content = '' }: { content?: string } = {}) { + server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { + type: 'json-value', + body: { + id: 'chatcmpl-test', + object: 'chat.completion', + created: 1711115037, + model: 'anthropic/claude-3.5-sonnet', + choices: [ + { + index: 0, + message: { + role: 'assistant', + content, + }, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 4, + total_tokens: 34, + completion_tokens: 30, + }, + }, + }; + } + + it('should pass debug settings in doGenerate', async () => { + prepareJsonResponse({ content: 'Hello!' }); + + const debugModel = provider.chat('anthropic/claude-3.5-sonnet', { + debug: { + echo_upstream_body: true, + }, + }); + + await debugModel.doGenerate({ + prompt: TEST_PROMPT, + }); + + expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ + model: 'anthropic/claude-3.5-sonnet', + messages: [{ role: 'user', content: 'Hello' }], + debug: { + echo_upstream_body: true, + }, + }); + }); + + it('should not include debug when not set', async () => { + prepareJsonResponse({ content: 'Hello!' }); + + await model.doGenerate({ + prompt: TEST_PROMPT, + }); + + const requestBody = await server.calls[0]!.requestBodyJson; + expect(requestBody).not.toHaveProperty('debug'); + }); +}); diff --git a/packages/ai-sdk-provider-2/src/chat/index.ts b/packages/ai-sdk-provider-2/src/chat/index.ts new file mode 100644 index 0000000..673ace7 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/chat/index.ts @@ -0,0 +1,1100 @@ +import type { + LanguageModelV3, + LanguageModelV3CallOptions, + LanguageModelV3Content, + LanguageModelV3FinishReason, + LanguageModelV3FunctionTool, + LanguageModelV3ResponseMetadata, + LanguageModelV3StreamPart, + LanguageModelV3Usage, + SharedV3Headers, + SharedV3ProviderMetadata, + SharedV3Warning, +} from '@ai-sdk/provider'; +import type { ParseResult } from '@ai-sdk/provider-utils'; +import type { z } from 'zod/v4'; +import type { ReasoningDetailUnion } from '@/src/schemas/reasoning-details'; +import type { OpenRouterUsageAccounting } from '@/src/types/index'; +import type { FileAnnotation } from '../schemas/provider-metadata'; +import type { + OpenRouterChatModelId, + OpenRouterChatSettings, +} from '../types/openrouter-chat-settings'; + +import { + APICallError, + InvalidResponseDataError, + NoContentGeneratedError, +} from '@ai-sdk/provider'; +import { + combineHeaders, + createEventSourceResponseHandler, + createJsonResponseHandler, + generateId, + isParsableJson, + postJsonToApi, +} from '@ai-sdk/provider-utils'; +import { ReasoningDetailType } from '@/src/schemas/reasoning-details'; +import { openrouterFailedResponseHandler } from '../schemas/error-response'; +import { OpenRouterProviderMetadataSchema } from '../schemas/provider-metadata'; +import { + createFinishReason, + mapOpenRouterFinishReason, +} from '../utils/map-finish-reason'; +import { convertToOpenRouterChatMessages } from './convert-to-openrouter-chat-messages'; +import { getBase64FromDataUrl, getMediaType } from './file-url-utils'; +import { getChatCompletionToolChoice } from './get-tool-choice'; +import { + OpenRouterNonStreamChatCompletionResponseSchema, + OpenRouterStreamChatCompletionChunkSchema, +} from './schemas'; + +type OpenRouterChatConfig = { + provider: string; + compatibility: 'strict' | 'compatible'; + headers: () => Record; + url: (options: { modelId: string; path: string }) => string; + fetch?: typeof fetch; + extraBody?: Record; +}; + +export class OpenRouterChatLanguageModel implements LanguageModelV3 { + readonly specificationVersion = 'v3' as const; + readonly provider = 'openrouter'; + readonly defaultObjectGenerationMode = 'tool' as const; + + readonly modelId: OpenRouterChatModelId; + readonly supportsImageUrls = true; + readonly supportedUrls: Record = { + 'image/*': [ + /^data:image\/[a-zA-Z]+;base64,/, + /^https?:\/\/.+\.(jpg|jpeg|png|gif|webp)$/i, + ], + // 'text/*': [/^data:text\//, /^https?:\/\/.+$/], + 'application/*': [/^data:application\//, /^https?:\/\/.+$/], + }; + readonly settings: OpenRouterChatSettings; + + private readonly config: OpenRouterChatConfig; + + constructor( + modelId: OpenRouterChatModelId, + settings: OpenRouterChatSettings, + config: OpenRouterChatConfig, + ) { + this.modelId = modelId; + this.settings = settings; + this.config = config; + } + + private getArgs({ + prompt, + maxOutputTokens, + temperature, + topP, + frequencyPenalty, + presencePenalty, + seed, + stopSequences, + responseFormat, + topK, + tools, + toolChoice, + }: LanguageModelV3CallOptions) { + const baseArgs = { + // model id: + model: this.modelId, + models: this.settings.models, + + // model specific settings: + logit_bias: this.settings.logitBias, + logprobs: + this.settings.logprobs === true || + typeof this.settings.logprobs === 'number' + ? true + : undefined, + top_logprobs: + typeof this.settings.logprobs === 'number' + ? this.settings.logprobs + : typeof this.settings.logprobs === 'boolean' + ? this.settings.logprobs + ? 0 + : undefined + : undefined, + user: this.settings.user, + parallel_tool_calls: this.settings.parallelToolCalls, + + // standardized settings: + max_tokens: maxOutputTokens, + temperature, + top_p: topP, + frequency_penalty: frequencyPenalty, + presence_penalty: presencePenalty, + seed, + + stop: stopSequences, + response_format: + responseFormat?.type === 'json' + ? responseFormat.schema != null + ? { + type: 'json_schema', + json_schema: { + schema: responseFormat.schema, + strict: true, + name: responseFormat.name ?? 'response', + ...(responseFormat.description && { + description: responseFormat.description, + }), + }, + } + : { type: 'json_object' } + : undefined, + top_k: topK, + + // messages: + messages: convertToOpenRouterChatMessages(prompt), + + // OpenRouter specific settings: + include_reasoning: this.settings.includeReasoning, + reasoning: this.settings.reasoning, + usage: this.settings.usage, + + // Web search settings: + plugins: this.settings.plugins, + web_search_options: this.settings.web_search_options, + // Provider routing settings: + provider: this.settings.provider, + // Debug settings: + debug: this.settings.debug, + + // extra body: + ...this.config.extraBody, + ...this.settings.extraBody, + }; + + if (tools && tools.length > 0) { + // TODO: support built-in tools + const mappedTools = tools + .filter( + (tool): tool is LanguageModelV3FunctionTool => + tool.type === 'function', + ) + .map((tool) => ({ + type: 'function' as const, + function: { + name: tool.name, + description: tool.description, + parameters: tool.inputSchema, + }, + })); + + return { + ...baseArgs, + tools: mappedTools, + tool_choice: toolChoice + ? getChatCompletionToolChoice(toolChoice) + : undefined, + }; + } + + return baseArgs; + } + + async doGenerate(options: LanguageModelV3CallOptions): Promise<{ + content: Array; + finishReason: LanguageModelV3FinishReason; + usage: LanguageModelV3Usage; + warnings: Array; + providerMetadata?: { + openrouter: { + provider: string; + reasoning_details?: ReasoningDetailUnion[]; + usage: OpenRouterUsageAccounting; + }; + }; + request?: { body?: unknown }; + response?: LanguageModelV3ResponseMetadata & { + headers?: SharedV3Headers; + body?: unknown; + }; + }> { + const providerOptions = options.providerOptions || {}; + const openrouterOptions = providerOptions.openrouter || {}; + + const args = { + ...this.getArgs(options), + ...openrouterOptions, + }; + + const { value: responseValue, responseHeaders } = await postJsonToApi({ + url: this.config.url({ + path: '/chat/completions', + modelId: this.modelId, + }), + headers: combineHeaders(this.config.headers(), options.headers), + body: args, + failedResponseHandler: openrouterFailedResponseHandler, + successfulResponseHandler: createJsonResponseHandler( + OpenRouterNonStreamChatCompletionResponseSchema, + ), + abortSignal: options.abortSignal, + fetch: this.config.fetch, + }); + + // Check if response is an error (HTTP 200 with error payload) + if ('error' in responseValue) { + const errorData = responseValue.error as { + message: string; + code?: string; + }; + throw new APICallError({ + message: errorData.message, + url: this.config.url({ + path: '/chat/completions', + modelId: this.modelId, + }), + requestBodyValues: args, + statusCode: 200, + responseHeaders, + data: errorData, + }); + } + + // Now TypeScript knows this is the success response + const response = responseValue; + + const choice = response.choices[0]; + + if (!choice) { + throw new NoContentGeneratedError({ + message: 'No choice in response', + }); + } + + // Extract detailed usage information + const usageInfo: LanguageModelV3Usage = response.usage + ? { + inputTokens: { + total: response.usage.prompt_tokens ?? 0, + noCache: undefined, + cacheRead: + response.usage.prompt_tokens_details?.cached_tokens ?? undefined, + cacheWrite: undefined, + }, + outputTokens: { + total: response.usage.completion_tokens ?? 0, + text: undefined, + reasoning: + response.usage.completion_tokens_details?.reasoning_tokens ?? + undefined, + }, + } + : { + inputTokens: { + total: 0, + noCache: undefined, + cacheRead: undefined, + cacheWrite: undefined, + }, + outputTokens: { + total: 0, + text: undefined, + reasoning: undefined, + }, + }; + + const reasoningDetails = choice.message.reasoning_details ?? []; + + const reasoning: Array = + reasoningDetails.length > 0 + ? (reasoningDetails + .map((detail) => { + switch (detail.type) { + case ReasoningDetailType.Text: { + if (detail.text) { + return { + type: 'reasoning' as const, + text: detail.text, + providerMetadata: { + openrouter: { + reasoning_details: [detail], + }, + }, + }; + } + break; + } + case ReasoningDetailType.Summary: { + if (detail.summary) { + return { + type: 'reasoning' as const, + text: detail.summary, + providerMetadata: { + openrouter: { + reasoning_details: [detail], + }, + }, + }; + } + break; + } + case ReasoningDetailType.Encrypted: { + // For encrypted reasoning, we include a redacted placeholder + if (detail.data) { + return { + type: 'reasoning' as const, + text: '[REDACTED]', + providerMetadata: { + openrouter: { + reasoning_details: [detail], + }, + }, + }; + } + break; + } + default: { + detail satisfies never; + } + } + return null; + }) + .filter((p) => p !== null) as Array) + : choice.message.reasoning + ? [ + { + type: 'reasoning' as const, + text: choice.message.reasoning, + }, + ] + : []; + + const content: Array = []; + + // Add reasoning content first + content.push(...reasoning); + + if (choice.message.content) { + content.push({ + type: 'text' as const, + text: choice.message.content, + }); + } + + if (choice.message.tool_calls) { + for (const toolCall of choice.message.tool_calls) { + content.push({ + type: 'tool-call' as const, + toolCallId: toolCall.id ?? generateId(), + toolName: toolCall.function.name, + input: toolCall.function.arguments, + providerMetadata: { + openrouter: { + reasoning_details: reasoningDetails, + }, + }, + }); + } + } + + if (choice.message.images) { + for (const image of choice.message.images) { + content.push({ + type: 'file' as const, + mediaType: getMediaType(image.image_url.url, 'image/jpeg'), + data: getBase64FromDataUrl(image.image_url.url), + }); + } + } + + if (choice.message.annotations) { + for (const annotation of choice.message.annotations) { + if (annotation.type === 'url_citation') { + content.push({ + type: 'source' as const, + sourceType: 'url' as const, + id: annotation.url_citation.url, + url: annotation.url_citation.url, + title: annotation.url_citation.title, + providerMetadata: { + openrouter: { + content: annotation.url_citation.content || '', + }, + }, + }); + } + } + } + + // Extract file annotations to expose in providerMetadata + const fileAnnotations = choice.message.annotations?.filter( + ( + a, + ): a is { + type: 'file'; + file: { + hash: string; + name: string; + content?: Array<{ type: string; text?: string }>; + }; + } => a.type === 'file', + ); + + // Fix for Gemini 3 thoughtSignature: when there are tool calls with encrypted + // reasoning (thoughtSignature), the model returns 'stop' but expects continuation. + // Override to 'tool-calls' so the SDK knows to continue the conversation. + const hasToolCalls = + choice.message.tool_calls && choice.message.tool_calls.length > 0; + const hasEncryptedReasoning = reasoningDetails.some( + (d) => d.type === ReasoningDetailType.Encrypted && d.data, + ); + const shouldOverrideFinishReason = + hasToolCalls && hasEncryptedReasoning && choice.finish_reason === 'stop'; + + const effectiveFinishReason = shouldOverrideFinishReason + ? createFinishReason('tool-calls', choice.finish_reason ?? undefined) + : mapOpenRouterFinishReason(choice.finish_reason); + + return { + content, + finishReason: effectiveFinishReason, + usage: usageInfo, + warnings: [], + providerMetadata: { + openrouter: OpenRouterProviderMetadataSchema.parse({ + provider: response.provider ?? '', + reasoning_details: choice.message.reasoning_details ?? [], + annotations: + fileAnnotations && fileAnnotations.length > 0 + ? fileAnnotations + : undefined, + usage: { + promptTokens: usageInfo.inputTokens.total ?? 0, + completionTokens: usageInfo.outputTokens.total ?? 0, + totalTokens: + (usageInfo.inputTokens.total ?? 0) + + (usageInfo.outputTokens.total ?? 0), + cost: response.usage?.cost, + ...(response.usage?.prompt_tokens_details?.cached_tokens != null + ? { + promptTokensDetails: { + cachedTokens: + response.usage.prompt_tokens_details.cached_tokens, + }, + } + : {}), + ...(response.usage?.completion_tokens_details?.reasoning_tokens != + null + ? { + completionTokensDetails: { + reasoningTokens: + response.usage.completion_tokens_details.reasoning_tokens, + }, + } + : {}), + ...(response.usage?.cost_details?.upstream_inference_cost != null + ? { + costDetails: { + upstreamInferenceCost: + response.usage.cost_details.upstream_inference_cost, + }, + } + : {}), + }, + }), + }, + request: { body: args }, + response: { + id: response.id, + modelId: response.model, + headers: responseHeaders, + }, + }; + } + + async doStream(options: LanguageModelV3CallOptions): Promise<{ + stream: ReadableStream; + warnings: Array; + request?: { body?: unknown }; + response?: LanguageModelV3ResponseMetadata & { + headers?: SharedV3Headers; + body?: unknown; + }; + }> { + const providerOptions = options.providerOptions || {}; + const openrouterOptions = providerOptions.openrouter || {}; + + const args = { + ...this.getArgs(options), + ...openrouterOptions, + }; + + const { value: response, responseHeaders } = await postJsonToApi({ + url: this.config.url({ + path: '/chat/completions', + modelId: this.modelId, + }), + headers: combineHeaders(this.config.headers(), options.headers), + body: { + ...args, + stream: true, + + // only include stream_options when in strict compatibility mode: + stream_options: + this.config.compatibility === 'strict' + ? { + include_usage: true, + // If user has requested usage accounting, make sure we get it in the stream + ...(this.settings.usage?.include + ? { include_usage: true } + : {}), + } + : undefined, + }, + failedResponseHandler: openrouterFailedResponseHandler, + successfulResponseHandler: createEventSourceResponseHandler( + OpenRouterStreamChatCompletionChunkSchema, + ), + abortSignal: options.abortSignal, + fetch: this.config.fetch, + }); + + const toolCalls: Array<{ + id: string; + type: 'function'; + function: { + name: string; + arguments: string; + }; + inputStarted: boolean; + sent: boolean; + }> = []; + + let finishReason: LanguageModelV3FinishReason = createFinishReason('other'); + const usage: LanguageModelV3Usage = { + inputTokens: { + total: undefined, + noCache: undefined, + cacheRead: undefined, + cacheWrite: undefined, + }, + outputTokens: { + total: undefined, + text: undefined, + reasoning: undefined, + }, + }; + + // Track provider-specific usage information + const openrouterUsage: Partial = {}; + + // Track reasoning details to preserve for multi-turn conversations + const accumulatedReasoningDetails: ReasoningDetailUnion[] = []; + + // Track file annotations to expose in providerMetadata + const accumulatedFileAnnotations: FileAnnotation[] = []; + + let textStarted = false; + let reasoningStarted = false; + let textId: string | undefined; + let reasoningId: string | undefined; + let openrouterResponseId: string | undefined; + let provider: string | undefined; + + return { + stream: response.pipeThrough( + new TransformStream< + ParseResult< + z.infer + >, + LanguageModelV3StreamPart + >({ + transform(chunk, controller) { + // handle failed chunk parsing / validation: + if (!chunk.success) { + finishReason = createFinishReason('error'); + controller.enqueue({ type: 'error', error: chunk.error }); + return; + } + + const value = chunk.value; + + // handle error chunks: + if ('error' in value) { + finishReason = createFinishReason('error'); + controller.enqueue({ type: 'error', error: value.error }); + return; + } + + if (value.provider) { + provider = value.provider; + } + + if (value.id) { + openrouterResponseId = value.id; + controller.enqueue({ + type: 'response-metadata', + id: value.id, + }); + } + + if (value.model) { + controller.enqueue({ + type: 'response-metadata', + modelId: value.model, + }); + } + + if (value.usage != null) { + usage.inputTokens.total = value.usage.prompt_tokens; + usage.outputTokens.total = value.usage.completion_tokens; + + // Collect OpenRouter specific usage information + openrouterUsage.promptTokens = value.usage.prompt_tokens; + + if (value.usage.prompt_tokens_details) { + const cachedInputTokens = + value.usage.prompt_tokens_details.cached_tokens ?? 0; + + usage.inputTokens.cacheRead = cachedInputTokens; + openrouterUsage.promptTokensDetails = { + cachedTokens: cachedInputTokens, + }; + } + + openrouterUsage.completionTokens = value.usage.completion_tokens; + if (value.usage.completion_tokens_details) { + const reasoningTokens = + value.usage.completion_tokens_details.reasoning_tokens ?? 0; + + usage.outputTokens.reasoning = reasoningTokens; + openrouterUsage.completionTokensDetails = { + reasoningTokens, + }; + } + + openrouterUsage.cost = value.usage.cost; + openrouterUsage.totalTokens = value.usage.total_tokens; + const upstreamInferenceCost = + value.usage.cost_details?.upstream_inference_cost; + if (upstreamInferenceCost != null) { + openrouterUsage.costDetails = { + upstreamInferenceCost, + }; + } + } + + const choice = value.choices[0]; + + if (choice?.finish_reason != null) { + finishReason = mapOpenRouterFinishReason(choice.finish_reason); + } + + if (choice?.delta == null) { + return; + } + + const delta = choice.delta; + + const emitReasoningChunk = ( + chunkText: string, + providerMetadata?: SharedV3ProviderMetadata, + ) => { + if (!reasoningStarted) { + reasoningId = openrouterResponseId || generateId(); + controller.enqueue({ + providerMetadata, + type: 'reasoning-start', + id: reasoningId, + }); + reasoningStarted = true; + } + controller.enqueue({ + providerMetadata, + type: 'reasoning-delta', + delta: chunkText, + id: reasoningId || generateId(), + }); + }; + + if (delta.reasoning_details && delta.reasoning_details.length > 0) { + // Accumulate reasoning_details to preserve for multi-turn conversations + // Merge consecutive reasoning.text items into a single entry + for (const detail of delta.reasoning_details) { + if (detail.type === ReasoningDetailType.Text) { + const lastDetail = + accumulatedReasoningDetails[ + accumulatedReasoningDetails.length - 1 + ]; + if (lastDetail?.type === ReasoningDetailType.Text) { + // Merge with the previous text detail + lastDetail.text = + (lastDetail.text || '') + (detail.text || ''); + + lastDetail.signature = + lastDetail.signature || detail.signature; + + lastDetail.format = lastDetail.format || detail.format; + } else { + // Start a new text detail + accumulatedReasoningDetails.push({ ...detail }); + } + } else { + // Non-text details (encrypted, summary) are pushed as-is + accumulatedReasoningDetails.push(detail); + } + } + + // Emit reasoning_details in providerMetadata for each delta chunk + // so users can accumulate them on their end before sending back + const reasoningMetadata: SharedV3ProviderMetadata = { + openrouter: { + reasoning_details: delta.reasoning_details, + }, + }; + + for (const detail of delta.reasoning_details) { + switch (detail.type) { + case ReasoningDetailType.Text: { + if (detail.text) { + emitReasoningChunk(detail.text, reasoningMetadata); + } + break; + } + case ReasoningDetailType.Encrypted: { + if (detail.data) { + emitReasoningChunk('[REDACTED]', reasoningMetadata); + } + break; + } + case ReasoningDetailType.Summary: { + if (detail.summary) { + emitReasoningChunk(detail.summary, reasoningMetadata); + } + break; + } + default: { + detail satisfies never; + break; + } + } + } + } else if (delta.reasoning) { + emitReasoningChunk(delta.reasoning); + } + + if (delta.content) { + // If reasoning was previously active and now we're starting text content, + // we should end the reasoning first to maintain proper order + if (reasoningStarted && !textStarted) { + controller.enqueue({ + type: 'reasoning-end', + id: reasoningId || generateId(), + }); + reasoningStarted = false; // Mark as ended so we don't end it again in flush + } + + if (!textStarted) { + textId = openrouterResponseId || generateId(); + controller.enqueue({ + type: 'text-start', + id: textId, + }); + textStarted = true; + } + controller.enqueue({ + type: 'text-delta', + delta: delta.content, + id: textId || generateId(), + }); + } + + if (delta.annotations) { + for (const annotation of delta.annotations) { + if (annotation.type === 'url_citation') { + controller.enqueue({ + type: 'source', + sourceType: 'url' as const, + id: annotation.url_citation.url, + url: annotation.url_citation.url, + title: annotation.url_citation.title, + providerMetadata: { + openrouter: { + content: annotation.url_citation.content || '', + }, + }, + }); + } else if (annotation.type === 'file') { + // Accumulate file annotations to expose in providerMetadata + // Type guard to validate structure matches expected shape + const file = (annotation as { file?: unknown }).file; + if ( + file && + typeof file === 'object' && + 'hash' in file && + 'name' in file + ) { + accumulatedFileAnnotations.push( + annotation as FileAnnotation, + ); + } + } + } + } + + if (delta.tool_calls != null) { + for (const toolCallDelta of delta.tool_calls) { + const index = toolCallDelta.index ?? toolCalls.length - 1; + + // Tool call start. OpenRouter returns all information except the arguments in the first chunk. + if (toolCalls[index] == null) { + if (toolCallDelta.type !== 'function') { + throw new InvalidResponseDataError({ + data: toolCallDelta, + message: `Expected 'function' type.`, + }); + } + + if (toolCallDelta.id == null) { + throw new InvalidResponseDataError({ + data: toolCallDelta, + message: `Expected 'id' to be a string.`, + }); + } + + if (toolCallDelta.function?.name == null) { + throw new InvalidResponseDataError({ + data: toolCallDelta, + message: `Expected 'function.name' to be a string.`, + }); + } + + toolCalls[index] = { + id: toolCallDelta.id, + type: 'function', + function: { + name: toolCallDelta.function.name, + arguments: toolCallDelta.function.arguments ?? '', + }, + inputStarted: false, + sent: false, + }; + + const toolCall = toolCalls[index]; + + if (toolCall == null) { + throw new InvalidResponseDataError({ + data: { index, toolCallsLength: toolCalls.length }, + message: `Tool call at index ${index} is missing after creation.`, + }); + } + + // check if tool call is complete (some providers send the full tool call in one chunk) + if ( + toolCall.function?.name != null && + toolCall.function?.arguments != null && + isParsableJson(toolCall.function.arguments) + ) { + toolCall.inputStarted = true; + + controller.enqueue({ + type: 'tool-input-start', + id: toolCall.id, + toolName: toolCall.function.name, + }); + + // send delta + controller.enqueue({ + type: 'tool-input-delta', + id: toolCall.id, + delta: toolCall.function.arguments, + }); + + controller.enqueue({ + type: 'tool-input-end', + id: toolCall.id, + }); + + // send tool call + controller.enqueue({ + type: 'tool-call', + toolCallId: toolCall.id, + toolName: toolCall.function.name, + input: toolCall.function.arguments, + providerMetadata: { + openrouter: { + reasoning_details: accumulatedReasoningDetails, + }, + }, + }); + + toolCall.sent = true; + } + + continue; + } + + // existing tool call, merge + const toolCall = toolCalls[index]; + + if (toolCall == null) { + throw new InvalidResponseDataError({ + data: { + index, + toolCallsLength: toolCalls.length, + toolCallDelta, + }, + message: `Tool call at index ${index} is missing during merge.`, + }); + } + + if (!toolCall.inputStarted) { + toolCall.inputStarted = true; + controller.enqueue({ + type: 'tool-input-start', + id: toolCall.id, + toolName: toolCall.function.name, + }); + } + + if (toolCallDelta.function?.arguments != null) { + toolCall.function.arguments += + toolCallDelta.function?.arguments ?? ''; + } + + // send delta + controller.enqueue({ + type: 'tool-input-delta', + id: toolCall.id, + delta: toolCallDelta.function.arguments ?? '', + }); + + // check if tool call is complete + if ( + toolCall.function?.name != null && + toolCall.function?.arguments != null && + isParsableJson(toolCall.function.arguments) + ) { + controller.enqueue({ + type: 'tool-call', + toolCallId: toolCall.id ?? generateId(), + toolName: toolCall.function.name, + input: toolCall.function.arguments, + providerMetadata: { + openrouter: { + reasoning_details: accumulatedReasoningDetails, + }, + }, + }); + + toolCall.sent = true; + } + } + } + + if (delta.images != null) { + for (const image of delta.images) { + controller.enqueue({ + type: 'file', + mediaType: getMediaType(image.image_url.url, 'image/jpeg'), + data: getBase64FromDataUrl(image.image_url.url), + }); + } + } + }, + + flush(controller) { + // Fix for Gemini 3 thoughtSignature: when there are tool calls with encrypted + // reasoning (thoughtSignature), the model returns 'stop' but expects continuation. + // Override to 'tool-calls' so the SDK knows to continue the conversation. + const hasToolCalls = toolCalls.length > 0; + const hasEncryptedReasoning = accumulatedReasoningDetails.some( + (d) => d.type === ReasoningDetailType.Encrypted && d.data, + ); + if ( + hasToolCalls && + hasEncryptedReasoning && + finishReason.unified === 'stop' + ) { + finishReason = createFinishReason('tool-calls', finishReason.raw); + } + + // Forward any unsent tool calls if finish reason is 'tool-calls' + if (finishReason.unified === 'tool-calls') { + for (const toolCall of toolCalls) { + if (toolCall && !toolCall.sent) { + controller.enqueue({ + type: 'tool-call', + toolCallId: toolCall.id ?? generateId(), + toolName: toolCall.function.name, + // Coerce invalid arguments to an empty JSON object + input: isParsableJson(toolCall.function.arguments) + ? toolCall.function.arguments + : '{}', + providerMetadata: { + openrouter: { + reasoning_details: accumulatedReasoningDetails, + }, + }, + }); + toolCall.sent = true; + } + } + } + + // End reasoning first if it was started, to maintain proper order + if (reasoningStarted) { + controller.enqueue({ + type: 'reasoning-end', + id: reasoningId || generateId(), + }); + } + if (textStarted) { + controller.enqueue({ + type: 'text-end', + id: textId || generateId(), + }); + } + + const openrouterMetadata: { + usage: Partial; + provider?: string; + reasoning_details?: ReasoningDetailUnion[]; + annotations?: FileAnnotation[]; + } = { + usage: openrouterUsage, + }; + + // Only include provider if it's actually set + if (provider !== undefined) { + openrouterMetadata.provider = provider; + } + + // Include accumulated reasoning_details if any were received + if (accumulatedReasoningDetails.length > 0) { + openrouterMetadata.reasoning_details = + accumulatedReasoningDetails; + } + + // Include accumulated file annotations if any were received + if (accumulatedFileAnnotations.length > 0) { + openrouterMetadata.annotations = accumulatedFileAnnotations; + } + + controller.enqueue({ + type: 'finish', + finishReason, + usage, + providerMetadata: { + openrouter: openrouterMetadata, + }, + }); + }, + }), + ), + warnings: [], + request: { body: args }, + response: { headers: responseHeaders }, + }; + } +} diff --git a/packages/ai-sdk-provider-2/src/chat/is-url.ts b/packages/ai-sdk-provider-2/src/chat/is-url.ts new file mode 100644 index 0000000..3de7f0f --- /dev/null +++ b/packages/ai-sdk-provider-2/src/chat/is-url.ts @@ -0,0 +1,15 @@ +export function isUrl({ + url, + protocols, +}: { + url: string | URL; + protocols: Set<`${string}:`>; +}): boolean { + try { + const urlObj = new URL(url); + // Cast to the literal string due to Set inferred input type + return protocols.has(urlObj.protocol as `${string}:`); + } catch (_) { + return false; + } +} diff --git a/packages/ai-sdk-provider-2/src/chat/large-pdf-response.test.ts b/packages/ai-sdk-provider-2/src/chat/large-pdf-response.test.ts new file mode 100644 index 0000000..c623df4 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/chat/large-pdf-response.test.ts @@ -0,0 +1,104 @@ +import type { LanguageModelV3Prompt } from '@ai-sdk/provider'; + +import { describe, expect, it } from 'vitest'; +import { createOpenRouter } from '../provider'; +import { createTestServer } from '../test-utils/test-server'; + +const TEST_PROMPT: LanguageModelV3Prompt = [ + { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, +]; + +const provider = createOpenRouter({ + baseURL: 'https://test.openrouter.ai/api/v1', + apiKey: 'test-api-key', +}); + +const server = createTestServer({ + 'https://test.openrouter.ai/api/v1/chat/completions': {}, +}); + +describe('Large PDF Response Handling', () => { + describe('doGenerate', () => { + it('should handle HTTP 200 responses with error payloads (500 internal errors)', async () => { + // This is the actual response OpenRouter returns for large PDF failures + // HTTP 200 status but contains error object instead of choices + server.urls[ + 'https://test.openrouter.ai/api/v1/chat/completions' + ]!.response = { + type: 'json-value', + body: { + error: { + message: 'Internal Server Error', + code: 500, + }, + user_id: 'org_abc123', + }, + }; + + const model = provider('anthropic/claude-3.5-sonnet'); + + await expect( + model.doGenerate({ + prompt: TEST_PROMPT, + }), + ).rejects.toThrow('Internal Server Error'); + }); + + it('should parse successful large PDF responses with file annotations', async () => { + // Successful response with file annotations from FileParserPlugin + server.urls[ + 'https://test.openrouter.ai/api/v1/chat/completions' + ]!.response = { + type: 'json-value', + body: { + id: 'gen-123', + model: 'anthropic/claude-3.5-sonnet', + provider: 'Anthropic', + choices: [ + { + index: 0, + message: { + role: 'assistant', + content: 'LARGE-M9N3T', + annotations: [ + { + type: 'file_annotation', + file_annotation: { + file_id: 'file_abc123', + quote: 'extracted text', + }, + }, + ], + }, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 100, + completion_tokens: 20, + total_tokens: 120, + }, + }, + }; + + const model = provider('anthropic/claude-3.5-sonnet', { + usage: { include: true }, + }); + + const result = await model.doGenerate({ + prompt: TEST_PROMPT, + }); + + expect(result.content).toMatchObject([ + { + type: 'text', + text: 'LARGE-M9N3T', + }, + ]); + expect( + (result.usage.inputTokens?.total ?? 0) + + (result.usage.outputTokens?.total ?? 0), + ).toBe(120); + }); + }); +}); diff --git a/packages/ai-sdk-provider-2/src/chat/payload-comparison.test.ts b/packages/ai-sdk-provider-2/src/chat/payload-comparison.test.ts new file mode 100644 index 0000000..6eeac7a --- /dev/null +++ b/packages/ai-sdk-provider-2/src/chat/payload-comparison.test.ts @@ -0,0 +1,154 @@ +import type { LanguageModelV3Prompt } from '@ai-sdk/provider'; +import type { OpenRouterChatCompletionsInput } from '../types/openrouter-chat-completions-input'; +import type { OpenRouterChatSettings } from '../types/openrouter-chat-settings'; + +import { describe, expect, it, vi } from 'vitest'; +import { createOpenRouter } from '../provider'; + +describe('Payload Comparison - Large PDF', () => { + it('should send payload matching fetch baseline for large PDFs', async () => { + interface CapturedRequestBody { + model: string; + messages: OpenRouterChatCompletionsInput; + plugins?: OpenRouterChatSettings['plugins']; + usage?: { include: boolean }; + } + + // Capture what the provider actually sends + let capturedRequestBody: CapturedRequestBody | null = null; + + const mockFetch = vi.fn(async (_url: string, init?: RequestInit) => { + // Capture the request body + if (init?.body) { + capturedRequestBody = JSON.parse( + init.body as string, + ) as CapturedRequestBody; + } + + // Return a minimal success response + return new Response( + JSON.stringify({ + id: 'test-123', + model: 'anthropic/claude-3.5-sonnet', + choices: [ + { + message: { + role: 'assistant', + content: 'Test response', + }, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 10, + completion_tokens: 5, + total_tokens: 15, + }, + }), + { + status: 200, + headers: { 'Content-Type': 'application/json' }, + }, + ); + }) as typeof fetch; + + const provider = createOpenRouter({ + apiKey: 'test-key', + fetch: mockFetch, + }); + + // Simulate a large PDF (use a small base64 for testing, but structure matters) + const smallPdfBase64 = 'JVBERi0xLjQKJeLjz9MKM...(truncated)'; + const dataUrl = `data:application/pdf;base64,${smallPdfBase64}`; + + const prompt: LanguageModelV3Prompt = [ + { + role: 'user', + content: [ + { + type: 'text', + text: 'Extract the verification code. Reply with ONLY the code.', + }, + { + type: 'file', + data: dataUrl, + mediaType: 'application/pdf', + }, + ], + }, + ]; + + const model = provider('anthropic/claude-3.5-sonnet', { + plugins: [{ id: 'file-parser', pdf: { engine: 'mistral-ocr' } }], + usage: { include: true }, + }); + + await model.doGenerate({ prompt }); + + // Now assert the payload structure matches fetch baseline + expect(capturedRequestBody).toBeDefined(); + expect(capturedRequestBody).not.toBeNull(); + + // Expected structure based on fetch example: + // { + // model: 'anthropic/claude-3.5-sonnet', + // messages: [{ + // role: 'user', + // content: [ + // { type: 'file', file: { filename: '...', file_data: 'data:...' } }, + // { type: 'text', text: '...' } + // ] + // }], + // plugins: [{ id: 'file-parser', pdf: { engine: 'mistral-ocr' } }], + // usage: { include: true } + // } + + const messages = capturedRequestBody!.messages; + expect(messages).toHaveLength(1); + expect(messages[0]?.role).toBe('user'); + expect(messages[0]?.content).toBeInstanceOf(Array); + + const content = messages[0]?.content; + if (!Array.isArray(content)) { + throw new Error('Content should be an array'); + } + + // Find the file part + const filePart = content.find((part) => part.type === 'file'); + expect(filePart).toBeDefined(); + + // CRITICAL ASSERTION: The file part should have a nested 'file' object with 'file_data' + // This is what the fetch example sends and what OpenRouter expects + expect(filePart).toMatchObject({ + type: 'file', + file: { + file_data: expect.stringContaining('data:application/pdf;base64,'), + }, + }); + + // Find the text part + const textPart = content.find((part) => part.type === 'text'); + expect(textPart).toMatchObject({ + type: 'text', + text: 'Extract the verification code. Reply with ONLY the code.', + }); + + // Check for plugins array + expect(capturedRequestBody!.plugins).toBeDefined(); + expect(capturedRequestBody!.plugins).toBeInstanceOf(Array); + + const { plugins } = capturedRequestBody!; + if (!plugins) { + throw new Error('Plugins should be defined'); + } + + const fileParserPlugin = plugins.find((p) => p.id === 'file-parser'); + expect(fileParserPlugin).toBeDefined(); + expect(fileParserPlugin).toMatchObject({ + id: 'file-parser', + pdf: { + engine: expect.stringMatching(/^(mistral-ocr|pdf-text|native)$/), + }, + }); + }); +}); diff --git a/packages/ai-sdk-provider-2/src/chat/schemas.ts b/packages/ai-sdk-provider-2/src/chat/schemas.ts new file mode 100644 index 0000000..6367a04 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/chat/schemas.ts @@ -0,0 +1,285 @@ +import { z } from 'zod/v4'; +import { OpenRouterErrorResponseSchema } from '../schemas/error-response'; +import { ImageResponseArraySchema } from '../schemas/image'; +import { ReasoningDetailArraySchema } from '../schemas/reasoning-details'; + +const OpenRouterChatCompletionBaseResponseSchema = z + .object({ + id: z.string().optional(), + model: z.string().optional(), + provider: z.string().optional(), + usage: z + .object({ + prompt_tokens: z.number(), + prompt_tokens_details: z + .object({ + cached_tokens: z.number(), + }) + .passthrough() + .nullish(), + completion_tokens: z.number(), + completion_tokens_details: z + .object({ + reasoning_tokens: z.number(), + }) + .passthrough() + .nullish(), + total_tokens: z.number(), + cost: z.number().optional(), + cost_details: z + .object({ + upstream_inference_cost: z.number().nullish(), + }) + .passthrough() + .nullish(), + }) + .passthrough() + .nullish(), + }) + .passthrough(); +// limited version of the schema, focussed on what is needed for the implementation +// this approach limits breakages when the API changes and increases efficiency +export const OpenRouterNonStreamChatCompletionResponseSchema = z.union([ + // Success response with choices + OpenRouterChatCompletionBaseResponseSchema.extend({ + choices: z.array( + z + .object({ + message: z + .object({ + role: z.literal('assistant'), + content: z.string().nullable().optional(), + reasoning: z.string().nullable().optional(), + reasoning_details: ReasoningDetailArraySchema.nullish(), + images: ImageResponseArraySchema.nullish(), + + tool_calls: z + .array( + z + .object({ + id: z.string().optional().nullable(), + type: z.literal('function'), + function: z + .object({ + name: z.string(), + arguments: z.string(), + }) + .passthrough(), + }) + .passthrough(), + ) + .optional(), + + annotations: z + .array( + z.union([ + // URL citation from web search + z + .object({ + type: z.literal('url_citation'), + url_citation: z + .object({ + end_index: z.number(), + start_index: z.number(), + title: z.string(), + url: z.string(), + content: z.string().optional(), + }) + .passthrough(), + }) + .passthrough(), + // File annotation from FileParserPlugin (old format) + z + .object({ + type: z.literal('file_annotation'), + file_annotation: z + .object({ + file_id: z.string(), + quote: z.string().optional(), + }) + .passthrough(), + }) + .passthrough(), + // File annotation from FileParserPlugin (new format) + z + .object({ + type: z.literal('file'), + file: z + .object({ + hash: z.string(), + name: z.string(), + content: z + .array( + z + .object({ + type: z.string(), + text: z.string().optional(), + }) + .passthrough(), + ) + .optional(), + }) + .passthrough(), + }) + .passthrough(), + ]), + ) + .nullish(), + }) + .passthrough(), + index: z.number().nullish(), + logprobs: z + .object({ + content: z + .array( + z + .object({ + token: z.string(), + logprob: z.number(), + top_logprobs: z.array( + z + .object({ + token: z.string(), + logprob: z.number(), + }) + .passthrough(), + ), + }) + .passthrough(), + ) + .nullable(), + }) + .passthrough() + .nullable() + .optional(), + finish_reason: z.string().optional().nullable(), + }) + .passthrough(), + ), + }), + // Error response (HTTP 200 with error payload) + OpenRouterErrorResponseSchema.extend({ + user_id: z.string().optional(), + }), +]); +// limited version of the schema, focussed on what is needed for the implementation +// this approach limits breakages when the API changes and increases efficiency +export const OpenRouterStreamChatCompletionChunkSchema = z.union([ + OpenRouterChatCompletionBaseResponseSchema.extend({ + choices: z.array( + z + .object({ + delta: z + .object({ + role: z.enum(['assistant']).optional(), + content: z.string().nullish(), + reasoning: z.string().nullish().optional(), + reasoning_details: ReasoningDetailArraySchema.nullish(), + images: ImageResponseArraySchema.nullish(), + tool_calls: z + .array( + z + .object({ + index: z.number().nullish(), + id: z.string().nullish(), + type: z.literal('function').optional(), + function: z + .object({ + name: z.string().nullish(), + arguments: z.string().nullish(), + }) + .passthrough(), + }) + .passthrough(), + ) + .nullish(), + + annotations: z + .array( + z.union([ + // URL citation from web search + z + .object({ + type: z.literal('url_citation'), + url_citation: z + .object({ + end_index: z.number(), + start_index: z.number(), + title: z.string(), + url: z.string(), + content: z.string().optional(), + }) + .passthrough(), + }) + .passthrough(), + // File annotation from FileParserPlugin (old format) + z + .object({ + type: z.literal('file_annotation'), + file_annotation: z + .object({ + file_id: z.string(), + quote: z.string().optional(), + }) + .passthrough(), + }) + .passthrough(), + // File annotation from FileParserPlugin (new format) + z + .object({ + type: z.literal('file'), + file: z + .object({ + hash: z.string(), + name: z.string(), + content: z + .array( + z + .object({ + type: z.string(), + text: z.string().optional(), + }) + .passthrough(), + ) + .optional(), + }) + .passthrough(), + }) + .passthrough(), + ]), + ) + .nullish(), + }) + .passthrough() + .nullish(), + logprobs: z + .object({ + content: z + .array( + z + .object({ + token: z.string(), + logprob: z.number(), + top_logprobs: z.array( + z + .object({ + token: z.string(), + logprob: z.number(), + }) + .passthrough(), + ), + }) + .passthrough(), + ) + .nullable(), + }) + .passthrough() + .nullish(), + finish_reason: z.string().nullable().optional(), + index: z.number().nullish(), + }) + .passthrough(), + ), + }), + OpenRouterErrorResponseSchema, +]); diff --git a/packages/ai-sdk-provider-2/src/completion/convert-to-openrouter-completion-prompt.ts b/packages/ai-sdk-provider-2/src/completion/convert-to-openrouter-completion-prompt.ts new file mode 100644 index 0000000..b9af689 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/completion/convert-to-openrouter-completion-prompt.ts @@ -0,0 +1,150 @@ +import type { + LanguageModelV3FilePart, + LanguageModelV3Prompt, + LanguageModelV3ReasoningPart, + LanguageModelV3TextPart, + LanguageModelV3ToolCallPart, + LanguageModelV3ToolResultPart, +} from '@ai-sdk/provider'; + +import { + InvalidPromptError, + UnsupportedFunctionalityError, +} from '@ai-sdk/provider'; + +export function convertToOpenRouterCompletionPrompt({ + prompt, + inputFormat, + user = 'user', + assistant = 'assistant', +}: { + prompt: LanguageModelV3Prompt; + inputFormat: 'prompt' | 'messages'; + user?: string; + assistant?: string; +}): { + prompt: string; +} { + // When the user supplied a prompt input, we don't transform it: + if ( + inputFormat === 'prompt' && + prompt.length === 1 && + prompt[0] && + prompt[0].role === 'user' && + prompt[0].content.length === 1 && + prompt[0].content[0] && + prompt[0].content[0].type === 'text' + ) { + return { prompt: prompt[0].content[0].text }; + } + + // otherwise transform to a chat message format: + let text = ''; + + // if first message is a system message, add it to the text: + if (prompt[0] && prompt[0].role === 'system') { + text += `${prompt[0].content}\n\n`; + prompt = prompt.slice(1); + } + + for (const { role, content } of prompt) { + switch (role) { + case 'system': { + throw new InvalidPromptError({ + message: `Unexpected system message in prompt: ${content}`, + prompt, + }); + } + + case 'user': { + const userMessage = content + .map((part: LanguageModelV3TextPart | LanguageModelV3FilePart) => { + switch (part.type) { + case 'text': { + return part.text; + } + + case 'file': { + throw new UnsupportedFunctionalityError({ + functionality: 'file attachments', + }); + } + default: { + return ''; + } + } + }) + .join(''); + + text += `${user}:\n${userMessage}\n\n`; + break; + } + + case 'assistant': { + const assistantMessage = content + .map( + ( + part: + | LanguageModelV3TextPart + | LanguageModelV3FilePart + | LanguageModelV3ReasoningPart + | LanguageModelV3ToolCallPart + | LanguageModelV3ToolResultPart, + ) => { + switch (part.type) { + case 'text': { + return part.text; + } + case 'tool-call': { + throw new UnsupportedFunctionalityError({ + functionality: 'tool-call messages', + }); + } + case 'tool-result': { + throw new UnsupportedFunctionalityError({ + functionality: 'tool-result messages', + }); + } + case 'reasoning': { + throw new UnsupportedFunctionalityError({ + functionality: 'reasoning messages', + }); + } + + case 'file': { + throw new UnsupportedFunctionalityError({ + functionality: 'file attachments', + }); + } + + default: { + return ''; + } + } + }, + ) + .join(''); + + text += `${assistant}:\n${assistantMessage}\n\n`; + break; + } + + case 'tool': { + throw new UnsupportedFunctionalityError({ + functionality: 'tool messages', + }); + } + + default: { + break; + } + } + } + + // Assistant message prefix: + text += `${assistant}:\n`; + + return { + prompt: text, + }; +} diff --git a/packages/ai-sdk-provider-2/src/completion/index.test.ts b/packages/ai-sdk-provider-2/src/completion/index.test.ts new file mode 100644 index 0000000..46da64b --- /dev/null +++ b/packages/ai-sdk-provider-2/src/completion/index.test.ts @@ -0,0 +1,598 @@ +import type { + LanguageModelV3Prompt, + LanguageModelV3StreamPart, +} from '@ai-sdk/provider'; + +import { vi } from 'vitest'; +import { createOpenRouter } from '../provider'; +import { + convertReadableStreamToArray, + createTestServer, +} from '../test-utils/test-server'; + +vi.mock('@/src/version', () => ({ + VERSION: '0.0.0-test', +})); + +const TEST_PROMPT: LanguageModelV3Prompt = [ + { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, +]; + +const TEST_LOGPROBS = { + tokens: [' ever', ' after', '.\n\n', 'The', ' end', '.'], + token_logprobs: [ + -0.0664508, -0.014520033, -1.3820221, -0.7890417, -0.5323165, -0.10247037, + ], + top_logprobs: [ + { + ' ever': -0.0664508, + }, + { + ' after': -0.014520033, + }, + { + '.\n\n': -1.3820221, + }, + { + The: -0.7890417, + }, + { + ' end': -0.5323165, + }, + { + '.': -0.10247037, + }, + ] as Record[], +}; + +const provider = createOpenRouter({ + apiKey: 'test-api-key', + compatibility: 'strict', +}); + +const model = provider.completion('openai/gpt-3.5-turbo-instruct'); + +describe('doGenerate', () => { + const server = createTestServer({ + 'https://openrouter.ai/api/v1/completions': { + response: { type: 'json-value', body: {} }, + }, + }); + + function prepareJsonResponse({ + content = '', + usage = { + prompt_tokens: 4, + total_tokens: 34, + completion_tokens: 30, + }, + logprobs = null, + finish_reason = 'stop', + }: { + content?: string; + usage?: { + prompt_tokens: number; + total_tokens: number; + completion_tokens: number; + }; + logprobs?: { + tokens: string[]; + token_logprobs: number[]; + top_logprobs: Record[]; + } | null; + finish_reason?: string; + }) { + server.urls['https://openrouter.ai/api/v1/completions']!.response = { + type: 'json-value', + body: { + id: 'cmpl-96cAM1v77r4jXa4qb2NSmRREV5oWB', + object: 'text_completion', + created: 1711363706, + model: 'openai/gpt-3.5-turbo-instruct', + choices: [ + { + text: content, + index: 0, + logprobs, + finish_reason, + }, + ], + usage, + }, + }; + } + + it('should extract text response', async () => { + prepareJsonResponse({ content: 'Hello, World!' }); + + const { content } = await model.doGenerate({ + prompt: TEST_PROMPT, + }); + + const text = content[0]?.type === 'text' ? content[0].text : ''; + + expect(text).toStrictEqual('Hello, World!'); + }); + + it('should extract usage', async () => { + prepareJsonResponse({ + content: '', + usage: { prompt_tokens: 20, total_tokens: 25, completion_tokens: 5 }, + }); + + const { usage } = await model.doGenerate({ + prompt: TEST_PROMPT, + }); + + expect(usage).toStrictEqual({ + inputTokens: { + total: 20, + noCache: undefined, + cacheRead: undefined, + cacheWrite: undefined, + }, + outputTokens: { + total: 5, + text: undefined, + reasoning: undefined, + }, + }); + }); + + it('should extract logprobs', async () => { + prepareJsonResponse({ logprobs: TEST_LOGPROBS }); + + const provider = createOpenRouter({ apiKey: 'test-api-key' }); + + await provider + .completion('openai/gpt-3.5-turbo', { logprobs: 1 }) + .doGenerate({ + prompt: TEST_PROMPT, + }); + }); + + it('should extract finish reason', async () => { + prepareJsonResponse({ + content: '', + finish_reason: 'stop', + }); + + const { finishReason } = await provider + .completion('openai/gpt-3.5-turbo-instruct') + .doGenerate({ + prompt: TEST_PROMPT, + }); + + expect(finishReason).toStrictEqual({ unified: 'stop', raw: 'stop' }); + }); + + it('should support unknown finish reason', async () => { + prepareJsonResponse({ + content: '', + finish_reason: 'eos', + }); + + const { finishReason } = await provider + .completion('openai/gpt-3.5-turbo-instruct') + .doGenerate({ + prompt: TEST_PROMPT, + }); + + expect(finishReason).toStrictEqual({ unified: 'other', raw: 'eos' }); + }); + + it('should pass the model and the prompt', async () => { + prepareJsonResponse({ content: '' }); + + await model.doGenerate({ + prompt: TEST_PROMPT, + }); + + expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ + model: 'openai/gpt-3.5-turbo-instruct', + prompt: 'Hello', + }); + }); + + it('should pass the models array when provided', async () => { + prepareJsonResponse({ content: '' }); + + const customModel = provider.completion('openai/gpt-3.5-turbo-instruct', { + models: ['openai/gpt-4', 'anthropic/claude-2'], + }); + + await customModel.doGenerate({ + prompt: TEST_PROMPT, + }); + + expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ + model: 'openai/gpt-3.5-turbo-instruct', + models: ['openai/gpt-4', 'anthropic/claude-2'], + prompt: 'Hello', + }); + }); + + it('should pass headers', async () => { + prepareJsonResponse({ content: '' }); + + const provider = createOpenRouter({ + apiKey: 'test-api-key', + headers: { + 'Custom-Provider-Header': 'provider-header-value', + }, + }); + + await provider.completion('openai/gpt-3.5-turbo-instruct').doGenerate({ + prompt: TEST_PROMPT, + headers: { + 'Custom-Request-Header': 'request-header-value', + }, + }); + + const requestHeaders = server.calls[0]!.requestHeaders; + + expect(requestHeaders).toMatchObject({ + authorization: 'Bearer test-api-key', + 'content-type': 'application/json', + 'custom-provider-header': 'provider-header-value', + 'custom-request-header': 'request-header-value', + }); + expect(requestHeaders['user-agent']).toContain( + 'ai-sdk/openrouter/0.0.0-test', + ); + }); +}); + +describe('doStream', () => { + const server = createTestServer({ + 'https://openrouter.ai/api/v1/completions': { + response: { type: 'stream-chunks', chunks: [] }, + }, + }); + + function prepareStreamResponse({ + content, + finish_reason = 'stop', + usage = { + prompt_tokens: 10, + total_tokens: 372, + completion_tokens: 362, + }, + logprobs = null, + }: { + content: string[]; + usage?: { + prompt_tokens: number; + total_tokens: number; + completion_tokens: number; + prompt_tokens_details?: { + cached_tokens: number; + }; + completion_tokens_details?: { + reasoning_tokens: number; + }; + cost?: number; + cost_details?: { + upstream_inference_cost: number; + }; + }; + logprobs?: { + tokens: string[]; + token_logprobs: number[]; + top_logprobs: Record[]; + } | null; + finish_reason?: string; + }) { + server.urls['https://openrouter.ai/api/v1/completions']!.response = { + type: 'stream-chunks', + chunks: [ + ...content.map((text) => { + return `data: {"id":"cmpl-96c64EdfhOw8pjFFgVpLuT8k2MtdT","object":"text_completion","created":1711363440,"choices":[{"text":"${text}","index":0,"logprobs":null,"finish_reason":null}],"model":"openai/gpt-3.5-turbo-instruct"}\n\n`; + }), + `data: {"id":"cmpl-96c3yLQE1TtZCd6n6OILVmzev8M8H","object":"text_completion","created":1711363310,"choices":[{"text":"","index":0,"logprobs":${JSON.stringify( + logprobs, + )},"finish_reason":"${finish_reason}"}],"model":"openai/gpt-3.5-turbo-instruct"}\n\n`, + `data: {"id":"cmpl-96c3yLQE1TtZCd6n6OILVmzev8M8H","object":"text_completion","created":1711363310,"model":"openai/gpt-3.5-turbo-instruct","usage":${JSON.stringify( + usage, + )},"choices":[]}\n\n`, + 'data: [DONE]\n\n', + ], + }; + } + + it('should stream text deltas', async () => { + prepareStreamResponse({ + content: ['Hello', ', ', 'World!'], + finish_reason: 'stop', + usage: { + prompt_tokens: 10, + total_tokens: 372, + completion_tokens: 362, + }, + logprobs: TEST_LOGPROBS, + }); + + const { stream } = await model.doStream({ + prompt: TEST_PROMPT, + }); + + // note: space moved to last chunk bc of trimming + const elements = await convertReadableStreamToArray(stream); + expect(elements).toStrictEqual([ + { type: 'text-delta', delta: 'Hello', id: expect.any(String) }, + { type: 'text-delta', delta: ', ', id: expect.any(String) }, + { type: 'text-delta', delta: 'World!', id: expect.any(String) }, + { type: 'text-delta', delta: '', id: expect.any(String) }, + { + type: 'finish', + finishReason: { unified: 'stop', raw: 'stop' }, + providerMetadata: { + openrouter: { + usage: { + promptTokens: 10, + completionTokens: 362, + totalTokens: 372, + cost: undefined, + }, + }, + }, + usage: { + inputTokens: { + total: 10, + noCache: undefined, + cacheRead: undefined, + cacheWrite: undefined, + }, + outputTokens: { + total: 362, + text: undefined, + reasoning: undefined, + }, + }, + }, + ]); + }); + + it('should include upstream inference cost when provided', async () => { + prepareStreamResponse({ + content: ['Hello'], + usage: { + prompt_tokens: 5, + total_tokens: 15, + completion_tokens: 10, + cost_details: { + upstream_inference_cost: 0.0036, + }, + }, + }); + + const { stream } = await model.doStream({ + prompt: TEST_PROMPT, + }); + + const elements = (await convertReadableStreamToArray( + stream, + )) as LanguageModelV3StreamPart[]; + const finishChunk = elements.find( + ( + element, + ): element is Extract => + element.type === 'finish', + ); + const openrouterUsage = ( + finishChunk?.providerMetadata?.openrouter as { + usage?: { + cost?: number; + costDetails?: { upstreamInferenceCost: number }; + }; + } + )?.usage; + expect(openrouterUsage?.costDetails).toStrictEqual({ + upstreamInferenceCost: 0.0036, + }); + }); + + it('should handle both normal cost and upstream inference cost in finish metadata when both are provided', async () => { + prepareStreamResponse({ + content: ['Hello'], + usage: { + prompt_tokens: 5, + total_tokens: 15, + completion_tokens: 10, + cost: 0.0025, + cost_details: { + upstream_inference_cost: 0.0036, + }, + }, + }); + + const { stream } = await model.doStream({ + prompt: TEST_PROMPT, + }); + + const elements = (await convertReadableStreamToArray( + stream, + )) as LanguageModelV3StreamPart[]; + const finishChunk = elements.find( + ( + element, + ): element is Extract => + element.type === 'finish', + ); + const openrouterUsage = ( + finishChunk?.providerMetadata?.openrouter as { + usage?: { + cost?: number; + costDetails?: { upstreamInferenceCost: number }; + }; + } + )?.usage; + expect(openrouterUsage?.costDetails).toStrictEqual({ + upstreamInferenceCost: 0.0036, + }); + expect(openrouterUsage?.cost).toBe(0.0025); + }); + + it('should handle error stream parts', async () => { + server.urls['https://openrouter.ai/api/v1/completions']!.response = { + type: 'stream-chunks', + chunks: [ + `data: {"error":{"message": "The server had an error processing your request. Sorry about that! You can retry your request, or contact us through our ` + + `help center at help.openrouter.com if you keep seeing this error.","type":"server_error","param":null,"code":null}}\n\n`, + 'data: [DONE]\n\n', + ], + }; + + const { stream } = await model.doStream({ + prompt: TEST_PROMPT, + }); + + expect(await convertReadableStreamToArray(stream)).toStrictEqual([ + { + type: 'error', + error: { + message: + 'The server had an error processing your request. Sorry about that! ' + + 'You can retry your request, or contact us through our help center at ' + + 'help.openrouter.com if you keep seeing this error.', + type: 'server_error', + code: null, + param: null, + }, + }, + { + finishReason: { unified: 'error', raw: undefined }, + providerMetadata: { + openrouter: { + usage: {}, + }, + }, + type: 'finish', + usage: { + inputTokens: { + total: undefined, + noCache: undefined, + cacheRead: undefined, + cacheWrite: undefined, + }, + outputTokens: { + total: undefined, + text: undefined, + reasoning: undefined, + }, + }, + }, + ]); + }); + + it('should handle unparsable stream parts', async () => { + server.urls['https://openrouter.ai/api/v1/completions']!.response = { + type: 'stream-chunks', + chunks: ['data: {unparsable}\n\n', 'data: [DONE]\n\n'], + }; + + const { stream } = await model.doStream({ + prompt: TEST_PROMPT, + }); + + const elements = await convertReadableStreamToArray(stream); + + expect(elements.length).toBe(2); + expect(elements[0]?.type).toBe('error'); + expect(elements[1]).toStrictEqual({ + finishReason: { unified: 'error', raw: undefined }, + providerMetadata: { + openrouter: { + usage: {}, + }, + }, + type: 'finish', + usage: { + inputTokens: { + total: undefined, + noCache: undefined, + cacheRead: undefined, + cacheWrite: undefined, + }, + outputTokens: { + total: undefined, + text: undefined, + reasoning: undefined, + }, + }, + }); + }); + + it('should pass the model and the prompt', async () => { + prepareStreamResponse({ content: [] }); + + await model.doStream({ + prompt: TEST_PROMPT, + }); + + expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ + stream: true, + stream_options: { include_usage: true }, + model: 'openai/gpt-3.5-turbo-instruct', + prompt: 'Hello', + }); + }); + + it('should pass headers', async () => { + prepareStreamResponse({ content: [] }); + + const provider = createOpenRouter({ + apiKey: 'test-api-key', + headers: { + 'Custom-Provider-Header': 'provider-header-value', + }, + }); + + await provider.completion('openai/gpt-3.5-turbo-instruct').doStream({ + prompt: TEST_PROMPT, + headers: { + 'Custom-Request-Header': 'request-header-value', + }, + }); + + const requestHeaders = server.calls[0]!.requestHeaders; + + expect(requestHeaders).toMatchObject({ + authorization: 'Bearer test-api-key', + 'content-type': 'application/json', + 'custom-provider-header': 'provider-header-value', + 'custom-request-header': 'request-header-value', + }); + expect(requestHeaders['user-agent']).toContain( + 'ai-sdk/openrouter/0.0.0-test', + ); + }); + + it('should pass extra body', async () => { + prepareStreamResponse({ content: [] }); + + const provider = createOpenRouter({ + apiKey: 'test-api-key', + extraBody: { + custom_field: 'custom_value', + providers: { + anthropic: { + custom_field: 'custom_value', + }, + }, + }, + }); + + await provider.completion('openai/gpt-4o').doStream({ + prompt: TEST_PROMPT, + }); + + const requestBody = await server.calls[0]!.requestBodyJson; + + expect(requestBody).toHaveProperty('custom_field', 'custom_value'); + expect(requestBody).toHaveProperty( + 'providers.anthropic.custom_field', + 'custom_value', + ); + }); +}); diff --git a/packages/ai-sdk-provider-2/src/completion/index.ts b/packages/ai-sdk-provider-2/src/completion/index.ts new file mode 100644 index 0000000..482f77a --- /dev/null +++ b/packages/ai-sdk-provider-2/src/completion/index.ts @@ -0,0 +1,374 @@ +import type { + LanguageModelV3, + LanguageModelV3CallOptions, + LanguageModelV3FinishReason, + LanguageModelV3StreamPart, + LanguageModelV3Usage, +} from '@ai-sdk/provider'; +import type { ParseResult } from '@ai-sdk/provider-utils'; +import type { z } from 'zod/v4'; +import type { OpenRouterUsageAccounting } from '../types'; +import type { + OpenRouterCompletionModelId, + OpenRouterCompletionSettings, +} from '../types/openrouter-completion-settings'; + +import { + APICallError, + NoContentGeneratedError, + UnsupportedFunctionalityError, +} from '@ai-sdk/provider'; +import { + combineHeaders, + createEventSourceResponseHandler, + createJsonResponseHandler, + generateId, + postJsonToApi, +} from '@ai-sdk/provider-utils'; +import { openrouterFailedResponseHandler } from '../schemas/error-response'; +import { + createFinishReason, + mapOpenRouterFinishReason, +} from '../utils/map-finish-reason'; +import { convertToOpenRouterCompletionPrompt } from './convert-to-openrouter-completion-prompt'; +import { OpenRouterCompletionChunkSchema } from './schemas'; + +type OpenRouterCompletionConfig = { + provider: string; + compatibility: 'strict' | 'compatible'; + headers: () => Record; + url: (options: { modelId: string; path: string }) => string; + fetch?: typeof fetch; + extraBody?: Record; +}; + +export class OpenRouterCompletionLanguageModel implements LanguageModelV3 { + readonly specificationVersion = 'v3' as const; + readonly provider = 'openrouter'; + readonly modelId: OpenRouterCompletionModelId; + readonly supportsImageUrls = true; + readonly supportedUrls: Record = { + 'image/*': [ + /^data:image\/[a-zA-Z]+;base64,/, + /^https?:\/\/.+\.(jpg|jpeg|png|gif|webp)$/i, + ], + 'text/*': [/^data:text\//, /^https?:\/\/.+$/], + 'application/*': [/^data:application\//, /^https?:\/\/.+$/], + }; + readonly defaultObjectGenerationMode = undefined; + readonly settings: OpenRouterCompletionSettings; + + private readonly config: OpenRouterCompletionConfig; + + constructor( + modelId: OpenRouterCompletionModelId, + settings: OpenRouterCompletionSettings, + config: OpenRouterCompletionConfig, + ) { + this.modelId = modelId; + this.settings = settings; + this.config = config; + } + + private getArgs({ + prompt, + maxOutputTokens, + temperature, + topP, + frequencyPenalty, + presencePenalty, + seed, + responseFormat, + topK, + stopSequences, + tools, + toolChoice, + }: LanguageModelV3CallOptions) { + const { prompt: completionPrompt } = convertToOpenRouterCompletionPrompt({ + prompt, + inputFormat: 'prompt', + }); + + if (tools?.length) { + throw new UnsupportedFunctionalityError({ + functionality: 'tools', + }); + } + + if (toolChoice) { + throw new UnsupportedFunctionalityError({ + functionality: 'toolChoice', + }); + } + + return { + // model id: + model: this.modelId, + models: this.settings.models, + + // model specific settings: + logit_bias: this.settings.logitBias, + logprobs: + typeof this.settings.logprobs === 'number' + ? this.settings.logprobs + : typeof this.settings.logprobs === 'boolean' + ? this.settings.logprobs + ? 0 + : undefined + : undefined, + suffix: this.settings.suffix, + user: this.settings.user, + + // standardized settings: + max_tokens: maxOutputTokens, + temperature, + top_p: topP, + frequency_penalty: frequencyPenalty, + presence_penalty: presencePenalty, + seed, + + stop: stopSequences, + response_format: responseFormat, + top_k: topK, + + // prompt: + prompt: completionPrompt, + + // OpenRouter specific settings: + include_reasoning: this.settings.includeReasoning, + reasoning: this.settings.reasoning, + + // extra body: + ...this.config.extraBody, + ...this.settings.extraBody, + }; + } + + async doGenerate( + options: LanguageModelV3CallOptions, + ): Promise>> { + const providerOptions = options.providerOptions || {}; + const openrouterOptions = providerOptions.openrouter || {}; + + const args = { + ...this.getArgs(options), + ...openrouterOptions, + }; + + const { value: response, responseHeaders } = await postJsonToApi({ + url: this.config.url({ + path: '/completions', + modelId: this.modelId, + }), + headers: combineHeaders(this.config.headers(), options.headers), + body: args, + failedResponseHandler: openrouterFailedResponseHandler, + successfulResponseHandler: createJsonResponseHandler( + OpenRouterCompletionChunkSchema, + ), + abortSignal: options.abortSignal, + fetch: this.config.fetch, + }); + + if ('error' in response) { + const errorData = response.error as { message: string; code?: string }; + throw new APICallError({ + message: errorData.message, + url: this.config.url({ + path: '/completions', + modelId: this.modelId, + }), + requestBodyValues: args, + statusCode: 200, + responseHeaders, + data: errorData, + }); + } + + const choice = response.choices[0]; + + if (!choice) { + throw new NoContentGeneratedError({ + message: 'No choice in OpenRouter completion response', + }); + } + + return { + content: [ + { + type: 'text', + text: choice.text ?? '', + }, + ], + finishReason: mapOpenRouterFinishReason(choice.finish_reason), + usage: { + inputTokens: { + total: response.usage?.prompt_tokens ?? 0, + noCache: undefined, + cacheRead: + response.usage?.prompt_tokens_details?.cached_tokens ?? undefined, + cacheWrite: undefined, + }, + outputTokens: { + total: response.usage?.completion_tokens ?? 0, + text: undefined, + reasoning: + response.usage?.completion_tokens_details?.reasoning_tokens ?? + undefined, + }, + }, + warnings: [], + response: { + headers: responseHeaders, + }, + }; + } + + async doStream( + options: LanguageModelV3CallOptions, + ): Promise>> { + const providerOptions = options.providerOptions || {}; + const openrouterOptions = providerOptions.openrouter || {}; + + const args = { + ...this.getArgs(options), + ...openrouterOptions, + }; + + const { value: response, responseHeaders } = await postJsonToApi({ + url: this.config.url({ + path: '/completions', + modelId: this.modelId, + }), + headers: combineHeaders(this.config.headers(), options.headers), + body: { + ...args, + stream: true, + + // only include stream_options when in strict compatibility mode: + stream_options: + this.config.compatibility === 'strict' + ? { include_usage: true } + : undefined, + }, + failedResponseHandler: openrouterFailedResponseHandler, + successfulResponseHandler: createEventSourceResponseHandler( + OpenRouterCompletionChunkSchema, + ), + abortSignal: options.abortSignal, + fetch: this.config.fetch, + }); + + let finishReason: LanguageModelV3FinishReason = createFinishReason('other'); + const usage: LanguageModelV3Usage = { + inputTokens: { + total: undefined, + noCache: undefined, + cacheRead: undefined, + cacheWrite: undefined, + }, + outputTokens: { + total: undefined, + text: undefined, + reasoning: undefined, + }, + }; + + const openrouterUsage: Partial = {}; + return { + stream: response.pipeThrough( + new TransformStream< + ParseResult>, + LanguageModelV3StreamPart + >({ + transform(chunk, controller) { + // handle failed chunk parsing / validation: + if (!chunk.success) { + finishReason = createFinishReason('error'); + controller.enqueue({ type: 'error', error: chunk.error }); + return; + } + + const value = chunk.value; + + // handle error chunks: + if ('error' in value) { + finishReason = createFinishReason('error'); + controller.enqueue({ type: 'error', error: value.error }); + return; + } + + if (value.usage != null) { + usage.inputTokens.total = value.usage.prompt_tokens; + usage.outputTokens.total = value.usage.completion_tokens; + + // Collect OpenRouter specific usage information + openrouterUsage.promptTokens = value.usage.prompt_tokens; + + if (value.usage.prompt_tokens_details) { + const cachedInputTokens = + value.usage.prompt_tokens_details.cached_tokens ?? 0; + + usage.inputTokens.cacheRead = cachedInputTokens; + openrouterUsage.promptTokensDetails = { + cachedTokens: cachedInputTokens, + }; + } + + openrouterUsage.completionTokens = value.usage.completion_tokens; + if (value.usage.completion_tokens_details) { + const reasoningTokens = + value.usage.completion_tokens_details.reasoning_tokens ?? 0; + + usage.outputTokens.reasoning = reasoningTokens; + openrouterUsage.completionTokensDetails = { + reasoningTokens, + }; + } + + openrouterUsage.cost = value.usage.cost; + openrouterUsage.totalTokens = value.usage.total_tokens; + const upstreamInferenceCost = + value.usage.cost_details?.upstream_inference_cost; + if (upstreamInferenceCost != null) { + openrouterUsage.costDetails = { + upstreamInferenceCost, + }; + } + } + + const choice = value.choices[0]; + + if (choice?.finish_reason != null) { + finishReason = mapOpenRouterFinishReason(choice.finish_reason); + } + + if (choice?.text != null) { + controller.enqueue({ + type: 'text-delta', + delta: choice.text, + id: generateId(), + }); + } + }, + + flush(controller) { + controller.enqueue({ + type: 'finish', + finishReason, + usage, + providerMetadata: { + openrouter: { + usage: openrouterUsage, + }, + }, + }); + }, + }), + ), + response: { + headers: responseHeaders, + }, + }; + } +} diff --git a/packages/ai-sdk-provider-2/src/completion/schemas.ts b/packages/ai-sdk-provider-2/src/completion/schemas.ts new file mode 100644 index 0000000..c38f00e --- /dev/null +++ b/packages/ai-sdk-provider-2/src/completion/schemas.ts @@ -0,0 +1,65 @@ +import { z } from 'zod/v4'; +import { OpenRouterErrorResponseSchema } from '../schemas/error-response'; +import { ReasoningDetailArraySchema } from '../schemas/reasoning-details'; + +// limited version of the schema, focussed on what is needed for the implementation +// this approach limits breakages when the API changes and increases efficiency +export const OpenRouterCompletionChunkSchema = z.union([ + z + .object({ + id: z.string().optional(), + model: z.string().optional(), + choices: z.array( + z + .object({ + text: z.string(), + reasoning: z.string().nullish().optional(), + reasoning_details: ReasoningDetailArraySchema.nullish(), + + finish_reason: z.string().nullish(), + index: z.number().nullish(), + logprobs: z + .object({ + tokens: z.array(z.string()), + token_logprobs: z.array(z.number()), + top_logprobs: z + .array(z.record(z.string(), z.number())) + .nullable(), + }) + .passthrough() + .nullable() + .optional(), + }) + .passthrough(), + ), + usage: z + .object({ + prompt_tokens: z.number(), + prompt_tokens_details: z + .object({ + cached_tokens: z.number(), + }) + .passthrough() + .nullish(), + completion_tokens: z.number(), + completion_tokens_details: z + .object({ + reasoning_tokens: z.number(), + }) + .passthrough() + .nullish(), + total_tokens: z.number(), + cost: z.number().optional(), + cost_details: z + .object({ + upstream_inference_cost: z.number().nullish(), + }) + .passthrough() + .nullish(), + }) + .passthrough() + .nullish(), + }) + .passthrough(), + OpenRouterErrorResponseSchema, +]); diff --git a/packages/ai-sdk-provider-2/src/convert-to-hyperbolic-chat-messages.test.ts b/packages/ai-sdk-provider-2/src/convert-to-hyperbolic-chat-messages.test.ts deleted file mode 100644 index af2e058..0000000 --- a/packages/ai-sdk-provider-2/src/convert-to-hyperbolic-chat-messages.test.ts +++ /dev/null @@ -1,437 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import { describe, expect, it } from "vitest"; - -import { convertToHyperbolicChatMessages } from "./convert-to-hyperbolic-chat-messages"; - -describe("user messages", () => { - it("should convert messages with image parts to multiple parts", async () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [ - { type: "text", text: "Hello" }, - { - type: "image", - image: new Uint8Array([0, 1, 2, 3]), - mimeType: "image/png", - }, - ], - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: [ - { type: "text", text: "Hello" }, - { - type: "image_url", - image_url: { url: "data:image/png;base64,AAECAw==" }, - }, - ], - }, - ]); - }); - - it("should convert messages with only a text part to a string content", async () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [{ type: "text", text: "Hello" }], - }, - ]); - - expect(result).toEqual([{ role: "user", content: "Hello" }]); - }); -}); - -describe("cache control", () => { - it("should pass cache control from system message provider metadata", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "system", - content: "System prompt", - providerMetadata: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "system", - content: "System prompt", - cache_control: { type: "ephemeral" }, - }, - ]); - }); - - it("should pass cache control from user message provider metadata (single text part)", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [{ type: "text", text: "Hello" }], - providerMetadata: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: "Hello", - cache_control: { type: "ephemeral" }, - }, - ]); - }); - - it("should pass cache control from user message provider metadata (multiple parts)", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [ - { type: "text", text: "Hello" }, - { - type: "image", - image: new Uint8Array([0, 1, 2, 3]), - mimeType: "image/png", - }, - ], - providerMetadata: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: [ - { - type: "text", - text: "Hello", - cache_control: undefined, - }, - { - type: "image_url", - image_url: { url: "data:image/png;base64,AAECAw==" }, - cache_control: { type: "ephemeral" }, - }, - ], - }, - ]); - }); - - it("should pass cache control to multiple image parts from user message provider metadata", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [ - { type: "text", text: "Hello" }, - { - type: "image", - image: new Uint8Array([0, 1, 2, 3]), - mimeType: "image/png", - }, - { - type: "image", - image: new Uint8Array([4, 5, 6, 7]), - mimeType: "image/jpeg", - }, - ], - providerMetadata: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: [ - { - type: "text", - text: "Hello", - cache_control: undefined, - }, - { - type: "image_url", - image_url: { url: "data:image/png;base64,AAECAw==" }, - cache_control: { type: "ephemeral" }, - }, - { - type: "image_url", - image_url: { url: "data:image/jpeg;base64,BAUGBw==" }, - cache_control: { type: "ephemeral" }, - }, - ], - }, - ]); - }); - - it("should pass cache control to file parts from user message provider metadata", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [ - { type: "text", text: "Hello" }, - { - type: "file", - data: "file content", - mimeType: "text/plain", - }, - ], - providerMetadata: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: [ - { - type: "text", - text: "Hello", - cache_control: undefined, - }, - { - type: "text", - text: "file content", - cache_control: { type: "ephemeral" }, - }, - ], - }, - ]); - }); - - it("should handle mixed part-specific and message-level cache control for multiple parts", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [ - { - type: "text", - text: "Hello", - // No part-specific provider metadata - }, - { - type: "image", - image: new Uint8Array([0, 1, 2, 3]), - mimeType: "image/png", - providerMetadata: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - { - type: "file", - data: "file content", - mimeType: "text/plain", - // No part-specific provider metadata - }, - ], - providerMetadata: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: [ - { - type: "text", - text: "Hello", - cache_control: undefined, - }, - { - type: "image_url", - image_url: { url: "data:image/png;base64,AAECAw==" }, - cache_control: { type: "ephemeral" }, - }, - { - type: "text", - text: "file content", - cache_control: { type: "ephemeral" }, - }, - ], - }, - ]); - }); - - it("should pass cache control from individual content part provider metadata", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [ - { - type: "text", - text: "Hello", - providerMetadata: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - { - type: "image", - image: new Uint8Array([0, 1, 2, 3]), - mimeType: "image/png", - }, - ], - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: [ - { - type: "text", - text: "Hello", - cache_control: { type: "ephemeral" }, - }, - { - type: "image_url", - image_url: { url: "data:image/png;base64,AAECAw==" }, - }, - ], - }, - ]); - }); - - it("should pass cache control from assistant message provider metadata", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "assistant", - content: [{ type: "text", text: "Assistant response" }], - providerMetadata: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "assistant", - content: "Assistant response", - cache_control: { type: "ephemeral" }, - }, - ]); - }); - - it("should pass cache control from tool message provider metadata", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "tool", - content: [ - { - type: "tool-result", - toolCallId: "call-123", - toolName: "calculator", - result: { answer: 42 }, - isError: false, - }, - ], - providerMetadata: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "tool", - tool_call_id: "call-123", - content: JSON.stringify({ answer: 42 }), - cache_control: { type: "ephemeral" }, - }, - ]); - }); - - it("should support the alias cache_control field", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "system", - content: "System prompt", - providerMetadata: { - anthropic: { - cache_control: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "system", - content: "System prompt", - cache_control: { type: "ephemeral" }, - }, - ]); - }); - - it("should support cache control on last message in content array", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "system", - content: "System prompt", - }, - { - role: "user", - content: [ - { type: "text", text: "User prompt" }, - { - type: "text", - text: "User prompt 2", - providerMetadata: { - anthropic: { cacheControl: { type: "ephemeral" } }, - }, - }, - ], - }, - ]); - - expect(result).toEqual([ - { - role: "system", - content: "System prompt", - }, - { - role: "user", - content: [ - { type: "text", text: "User prompt" }, - { - type: "text", - text: "User prompt 2", - cache_control: { type: "ephemeral" }, - }, - ], - }, - ]); - }); -}); diff --git a/packages/ai-sdk-provider-2/src/convert-to-hyperbolic-chat-messages.ts b/packages/ai-sdk-provider-2/src/convert-to-hyperbolic-chat-messages.ts deleted file mode 100644 index 30bae0b..0000000 --- a/packages/ai-sdk-provider-2/src/convert-to-hyperbolic-chat-messages.ts +++ /dev/null @@ -1,165 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { LanguageModelV1Prompt, LanguageModelV1ProviderMetadata } from "@ai-sdk/provider"; -import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils"; - -import type { ChatCompletionContentPart, HyperbolicChatPrompt } from "./hyperbolic-chat-prompt"; - -// Type for Hyperbolic Cache Control following Anthropic's pattern -export type HyperbolicCacheControl = { type: "ephemeral" }; - -export function convertToHyperbolicChatMessages( - prompt: LanguageModelV1Prompt, -): HyperbolicChatPrompt { - const messages: HyperbolicChatPrompt = []; - - function getCacheControl( - providerMetadata: LanguageModelV1ProviderMetadata | undefined, - ): HyperbolicCacheControl | undefined { - const anthropic = providerMetadata?.anthropic; - - // Allow both cacheControl and cache_control: - const cacheControlValue = anthropic?.cacheControl ?? anthropic?.cache_control; - - // Return the cache control object if it exists - return cacheControlValue as HyperbolicCacheControl | undefined; - } - - for (const { role, content, providerMetadata } of prompt) { - switch (role) { - case "system": { - messages.push({ - role: "system", - content, - cache_control: getCacheControl(providerMetadata), - }); - break; - } - - case "user": { - if (content.length === 1 && content[0]?.type === "text") { - messages.push({ - role: "user", - content: content[0].text, - cache_control: - getCacheControl(providerMetadata) ?? getCacheControl(content[0].providerMetadata), - }); - break; - } - - // Get message level cache control - const messageCacheControl = getCacheControl(providerMetadata); - - const contentParts: ChatCompletionContentPart[] = content.map((part) => { - switch (part.type) { - case "text": - return { - type: "text" as const, - text: part.text, - // For text parts, only use part-specific cache control - cache_control: getCacheControl(part.providerMetadata), - }; - case "image": - return { - type: "image_url" as const, - image_url: { - url: - part.image instanceof URL - ? part.image.toString() - : `data:${part.mimeType ?? "image/jpeg"};base64,${convertUint8ArrayToBase64( - part.image, - )}`, - }, - // For image parts, use part-specific or message-level cache control - cache_control: getCacheControl(part.providerMetadata) ?? messageCacheControl, - }; - case "file": - return { - type: "text" as const, - text: part.data instanceof URL ? part.data.toString() : part.data, - cache_control: getCacheControl(part.providerMetadata) ?? messageCacheControl, - }; - default: { - const _exhaustiveCheck: never = part; - throw new Error(`Unsupported content part type: ${_exhaustiveCheck}`); - } - } - }); - - // For multi-part messages, don't add cache_control at the root level - messages.push({ - role: "user", - content: contentParts, - }); - - break; - } - - case "assistant": { - let text = ""; - const toolCalls: Array<{ - id: string; - type: "function"; - function: { name: string; arguments: string }; - }> = []; - - for (const part of content) { - switch (part.type) { - case "text": { - text += part.text; - break; - } - case "tool-call": { - toolCalls.push({ - id: part.toolCallId, - type: "function", - function: { - name: part.toolName, - arguments: JSON.stringify(part.args), - }, - }); - break; - } - // TODO: Handle reasoning and redacted-reasoning - case "reasoning": - case "redacted-reasoning": - break; - default: { - throw new Error(`Unsupported part: ${part}`); - } - } - } - - messages.push({ - role: "assistant", - content: text, - tool_calls: toolCalls.length > 0 ? toolCalls : undefined, - cache_control: getCacheControl(providerMetadata), - }); - - break; - } - - case "tool": { - for (const toolResponse of content) { - messages.push({ - role: "tool", - tool_call_id: toolResponse.toolCallId, - content: JSON.stringify(toolResponse.result), - cache_control: - getCacheControl(providerMetadata) ?? getCacheControl(toolResponse.providerMetadata), - }); - } - break; - } - - default: { - throw new Error(`Unsupported role: ${role}`); - } - } - } - - return messages; -} diff --git a/packages/ai-sdk-provider-2/src/convert-to-hyperbolic-completion-prompt.ts b/packages/ai-sdk-provider-2/src/convert-to-hyperbolic-completion-prompt.ts deleted file mode 100644 index c241b77..0000000 --- a/packages/ai-sdk-provider-2/src/convert-to-hyperbolic-completion-prompt.ts +++ /dev/null @@ -1,134 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { LanguageModelV1Prompt } from "@ai-sdk/provider"; -import { InvalidPromptError, UnsupportedFunctionalityError } from "@ai-sdk/provider"; - -export function convertToHyperbolicCompletionPrompt({ - prompt, - inputFormat, - user = "user", - assistant = "assistant", -}: { - prompt: LanguageModelV1Prompt; - inputFormat: "prompt" | "messages"; - user?: string; - assistant?: string; -}): { - prompt: string; -} { - // When the user supplied a prompt input, we don't transform it: - if ( - inputFormat === "prompt" && - prompt.length === 1 && - prompt[0] && - prompt[0].role === "user" && - prompt[0].content.length === 1 && - prompt[0].content[0] && - prompt[0].content[0].type === "text" - ) { - return { prompt: prompt[0].content[0].text }; - } - - // otherwise transform to a chat message format: - let text = ""; - - // if first message is a system message, add it to the text: - if (prompt[0] && prompt[0].role === "system") { - text += `${prompt[0].content}\n\n`; - prompt = prompt.slice(1); - } - - for (const { role, content } of prompt) { - switch (role) { - case "system": { - throw new InvalidPromptError({ - message: "Unexpected system message in prompt: ${content}", - prompt, - }); - } - - case "user": { - const userMessage = content - .map((part) => { - switch (part.type) { - case "text": { - return part.text; - } - case "image": { - throw new UnsupportedFunctionalityError({ - functionality: "images", - }); - } - case "file": { - throw new UnsupportedFunctionalityError({ - functionality: "file attachments", - }); - } - default: { - const _exhaustiveCheck: never = part; - throw new Error(`Unsupported content type: ${_exhaustiveCheck}`); - } - } - }) - .join(""); - - text += `${user}:\n${userMessage}\n\n`; - break; - } - - case "assistant": { - const assistantMessage = content - .map((part) => { - switch (part.type) { - case "text": { - return part.text; - } - case "tool-call": { - throw new UnsupportedFunctionalityError({ - functionality: "tool-call messages", - }); - } - case "reasoning": { - throw new UnsupportedFunctionalityError({ - functionality: "reasoning messages", - }); - } - - case "redacted-reasoning": { - throw new UnsupportedFunctionalityError({ - functionality: "redacted reasoning messages", - }); - } - - default: { - throw new Error(`Unsupported content type: ${part}`); - } - } - }) - .join(""); - - text += `${assistant}:\n${assistantMessage}\n\n`; - break; - } - - case "tool": { - throw new UnsupportedFunctionalityError({ - functionality: "tool messages", - }); - } - - default: { - throw new Error(`Unsupported role: ${role}`); - } - } - } - - // Assistant message prefix: - text += `${assistant}:\n`; - - return { - prompt: text, - }; -} diff --git a/packages/ai-sdk-provider-2/src/embedding/index.test.ts b/packages/ai-sdk-provider-2/src/embedding/index.test.ts new file mode 100644 index 0000000..bad731a --- /dev/null +++ b/packages/ai-sdk-provider-2/src/embedding/index.test.ts @@ -0,0 +1,253 @@ +import { describe, expect, it } from 'vitest'; +import { createOpenRouter } from '../provider'; +import { OpenRouterEmbeddingModel } from './index'; + +describe('OpenRouterEmbeddingModel', () => { + const mockFetch = async ( + _url: URL | RequestInfo, + _init?: RequestInit, + ): Promise => { + return new Response( + JSON.stringify({ + id: 'test-id', + object: 'list', + data: [ + { + object: 'embedding', + embedding: new Array(1536).fill(0.1), + index: 0, + }, + ], + model: 'openai/text-embedding-3-small', + usage: { + prompt_tokens: 5, + total_tokens: 5, + cost: 0.00001, + }, + }), + { + status: 200, + headers: { + 'content-type': 'application/json', + }, + }, + ); + }; + + describe('provider methods', () => { + it('should expose textEmbeddingModel method', () => { + const provider = createOpenRouter({ apiKey: 'test-key' }); + expect(provider.textEmbeddingModel).toBeDefined(); + expect(typeof provider.textEmbeddingModel).toBe('function'); + }); + + it('should expose embedding method (deprecated)', () => { + const provider = createOpenRouter({ apiKey: 'test-key' }); + expect(provider.embedding).toBeDefined(); + expect(typeof provider.embedding).toBe('function'); + }); + + it('should create an embedding model instance', () => { + const provider = createOpenRouter({ apiKey: 'test-key' }); + const model = provider.textEmbeddingModel( + 'openai/text-embedding-3-small', + ); + expect(model).toBeInstanceOf(OpenRouterEmbeddingModel); + expect(model.modelId).toBe('openai/text-embedding-3-small'); + expect(model.provider).toBe('openrouter'); + expect(model.specificationVersion).toBe('v3'); + }); + }); + + describe('doEmbed', () => { + it('should embed a single value', async () => { + const provider = createOpenRouter({ + apiKey: 'test-key', + fetch: mockFetch, + }); + const model = provider.textEmbeddingModel( + 'openai/text-embedding-3-small', + ); + + const result = await model.doEmbed({ + values: ['sunny day at the beach'], + }); + + expect(result.embeddings).toHaveLength(1); + expect(result.embeddings[0]).toHaveLength(1536); + expect(result.usage).toEqual({ tokens: 5 }); + expect( + (result.providerMetadata?.openrouter as { usage?: { cost?: number } }) + ?.usage?.cost, + ).toBe(0.00001); + }); + + it('should embed multiple values', async () => { + const mockFetchMultiple = async ( + _url: URL | RequestInfo, + _init?: RequestInit, + ): Promise => { + return new Response( + JSON.stringify({ + object: 'list', + data: [ + { + object: 'embedding', + embedding: new Array(1536).fill(0.1), + index: 0, + }, + { + object: 'embedding', + embedding: new Array(1536).fill(0.2), + index: 1, + }, + { + object: 'embedding', + embedding: new Array(1536).fill(0.3), + index: 2, + }, + ], + model: 'openai/text-embedding-3-small', + usage: { + prompt_tokens: 15, + total_tokens: 15, + }, + }), + { + status: 200, + headers: { + 'content-type': 'application/json', + }, + }, + ); + }; + + const provider = createOpenRouter({ + apiKey: 'test-key', + fetch: mockFetchMultiple, + }); + const model = provider.textEmbeddingModel( + 'openai/text-embedding-3-small', + ); + + const result = await model.doEmbed({ + values: [ + 'sunny day at the beach', + 'rainy day in the city', + 'snowy mountain peak', + ], + }); + + expect(result.embeddings).toHaveLength(3); + expect(result.embeddings[0]).toHaveLength(1536); + expect(result.embeddings[1]).toHaveLength(1536); + expect(result.embeddings[2]).toHaveLength(1536); + expect(result.usage).toEqual({ tokens: 15 }); + }); + + it('should pass custom settings to API', async () => { + let capturedRequest: Record | undefined; + + const mockFetchWithCapture = async ( + _url: URL | RequestInfo, + init?: RequestInit, + ): Promise => { + capturedRequest = JSON.parse(init?.body as string); + return new Response( + JSON.stringify({ + object: 'list', + data: [ + { + object: 'embedding', + embedding: new Array(1536).fill(0.1), + index: 0, + }, + ], + model: 'openai/text-embedding-3-small', + usage: { + prompt_tokens: 5, + total_tokens: 5, + }, + }), + { + status: 200, + headers: { + 'content-type': 'application/json', + }, + }, + ); + }; + + const provider = createOpenRouter({ + apiKey: 'test-key', + fetch: mockFetchWithCapture, + }); + + const model = provider.textEmbeddingModel( + 'openai/text-embedding-3-small', + { + user: 'test-user-123', + provider: { + order: ['openai'], + allow_fallbacks: false, + }, + }, + ); + + await model.doEmbed({ + values: ['test input'], + }); + + expect(capturedRequest?.user).toBe('test-user-123'); + expect(capturedRequest?.provider).toEqual({ + order: ['openai'], + allow_fallbacks: false, + }); + expect(capturedRequest?.model).toBe('openai/text-embedding-3-small'); + expect(capturedRequest?.input).toEqual(['test input']); + }); + + it('should handle response without usage information', async () => { + const mockFetchNoUsage = async ( + _url: URL | RequestInfo, + _init?: RequestInit, + ): Promise => { + return new Response( + JSON.stringify({ + object: 'list', + data: [ + { + object: 'embedding', + embedding: new Array(1536).fill(0.1), + index: 0, + }, + ], + model: 'openai/text-embedding-3-small', + }), + { + status: 200, + headers: { + 'content-type': 'application/json', + }, + }, + ); + }; + + const provider = createOpenRouter({ + apiKey: 'test-key', + fetch: mockFetchNoUsage, + }); + const model = provider.textEmbeddingModel( + 'openai/text-embedding-3-small', + ); + + const result = await model.doEmbed({ + values: ['test'], + }); + + expect(result.embeddings).toHaveLength(1); + expect(result.usage).toBeUndefined(); + expect(result.providerMetadata).toBeUndefined(); + }); + }); +}); diff --git a/packages/ai-sdk-provider-2/src/embedding/index.ts b/packages/ai-sdk-provider-2/src/embedding/index.ts new file mode 100644 index 0000000..a88e325 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/embedding/index.ts @@ -0,0 +1,108 @@ +import type { + EmbeddingModelV3, + SharedV3Headers, + SharedV3ProviderMetadata, +} from '@ai-sdk/provider'; +import type { + OpenRouterEmbeddingModelId, + OpenRouterEmbeddingSettings, +} from '../types/openrouter-embedding-settings'; + +import { + combineHeaders, + createJsonResponseHandler, + postJsonToApi, +} from '@ai-sdk/provider-utils'; +import { openrouterFailedResponseHandler } from '../schemas/error-response'; +import { OpenRouterEmbeddingResponseSchema } from './schemas'; + +type OpenRouterEmbeddingConfig = { + provider: string; + headers: () => Record; + url: (options: { modelId: string; path: string }) => string; + fetch?: typeof fetch; + extraBody?: Record; +}; + +export class OpenRouterEmbeddingModel implements EmbeddingModelV3 { + readonly specificationVersion = 'v3' as const; + readonly provider = 'openrouter'; + readonly modelId: OpenRouterEmbeddingModelId; + readonly settings: OpenRouterEmbeddingSettings; + readonly maxEmbeddingsPerCall = undefined; + readonly supportsParallelCalls = true; + + private readonly config: OpenRouterEmbeddingConfig; + + constructor( + modelId: OpenRouterEmbeddingModelId, + settings: OpenRouterEmbeddingSettings, + config: OpenRouterEmbeddingConfig, + ) { + this.modelId = modelId; + this.settings = settings; + this.config = config; + } + + async doEmbed(options: { + values: Array; + abortSignal?: AbortSignal; + headers?: Record; + }): Promise<{ + embeddings: Array>; + usage?: { tokens: number }; + providerMetadata?: SharedV3ProviderMetadata; + response?: { + headers?: SharedV3Headers; + body?: unknown; + }; + warnings: Array; + }> { + const { values, abortSignal, headers } = options; + + const args = { + model: this.modelId, + input: values, + user: this.settings.user, + provider: this.settings.provider, + ...this.config.extraBody, + ...this.settings.extraBody, + }; + + const { value: responseValue, responseHeaders } = await postJsonToApi({ + url: this.config.url({ + path: '/embeddings', + modelId: this.modelId, + }), + headers: combineHeaders(this.config.headers(), headers), + body: args, + failedResponseHandler: openrouterFailedResponseHandler, + successfulResponseHandler: createJsonResponseHandler( + OpenRouterEmbeddingResponseSchema, + ), + abortSignal, + fetch: this.config.fetch, + }); + + return { + embeddings: responseValue.data.map((item) => item.embedding), + usage: responseValue.usage + ? { tokens: responseValue.usage.prompt_tokens } + : undefined, + providerMetadata: responseValue.usage?.cost + ? { + openrouter: { + usage: { + cost: responseValue.usage.cost, + }, + }, + } + : undefined, + response: { + headers: responseHeaders, + body: responseValue, + }, + warnings: [], + }; + } +} diff --git a/packages/ai-sdk-provider-2/src/embedding/schemas.ts b/packages/ai-sdk-provider-2/src/embedding/schemas.ts new file mode 100644 index 0000000..55dcfa3 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/embedding/schemas.ts @@ -0,0 +1,25 @@ +import { z } from 'zod/v4'; + +const openrouterEmbeddingUsageSchema = z.object({ + prompt_tokens: z.number(), + total_tokens: z.number(), + cost: z.number().optional(), +}); + +const openrouterEmbeddingDataSchema = z.object({ + object: z.literal('embedding'), + embedding: z.array(z.number()), + index: z.number().optional(), +}); + +export const OpenRouterEmbeddingResponseSchema = z.object({ + id: z.string().optional(), + object: z.literal('list'), + data: z.array(openrouterEmbeddingDataSchema), + model: z.string(), + usage: openrouterEmbeddingUsageSchema.optional(), +}); + +export type OpenRouterEmbeddingResponse = z.infer< + typeof OpenRouterEmbeddingResponseSchema +>; diff --git a/packages/ai-sdk-provider-2/src/facade.ts b/packages/ai-sdk-provider-2/src/facade.ts new file mode 100644 index 0000000..cb08ca9 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/facade.ts @@ -0,0 +1,117 @@ +import type { OpenRouterProviderSettings } from './provider'; +import type { + OpenRouterChatModelId, + OpenRouterChatSettings, +} from './types/openrouter-chat-settings'; +import type { + OpenRouterCompletionModelId, + OpenRouterCompletionSettings, +} from './types/openrouter-completion-settings'; +import type { + OpenRouterEmbeddingModelId, + OpenRouterEmbeddingSettings, +} from './types/openrouter-embedding-settings'; + +import { loadApiKey, withoutTrailingSlash } from '@ai-sdk/provider-utils'; +import { OpenRouterChatLanguageModel } from './chat'; +import { OpenRouterCompletionLanguageModel } from './completion'; +import { OpenRouterEmbeddingModel } from './embedding'; + +/** +@deprecated Use `createOpenRouter` instead. + */ +export class OpenRouter { + /** +Use a different URL prefix for API calls, e.g. to use proxy servers. +The default prefix is `https://openrouter.ai/api/v1`. + */ + readonly baseURL: string; + + /** +API key that is being sent using the `Authorization` header. +It defaults to the `OPENROUTER_API_KEY` environment variable. + */ + readonly apiKey?: string; + + /** +Custom headers to include in the requests. + */ + readonly headers?: Record; + + /** + * Record of provider slugs to API keys for injecting into provider routing. + */ + readonly api_keys?: Record; + + /** + * Creates a new OpenRouter provider instance. + */ + constructor(options: OpenRouterProviderSettings = {}) { + this.baseURL = + withoutTrailingSlash(options.baseURL ?? options.baseUrl) ?? + 'https://openrouter.ai/api/v1'; + this.apiKey = options.apiKey; + this.headers = options.headers; + this.api_keys = options.api_keys; + } + + private get baseConfig() { + return { + baseURL: this.baseURL, + headers: () => ({ + Authorization: `Bearer ${loadApiKey({ + apiKey: this.apiKey, + environmentVariableName: 'OPENROUTER_API_KEY', + description: 'OpenRouter', + })}`, + ...this.headers, + ...(this.api_keys && + Object.keys(this.api_keys).length > 0 && { + 'X-Provider-API-Keys': JSON.stringify(this.api_keys), + }), + }), + }; + } + + chat(modelId: OpenRouterChatModelId, settings: OpenRouterChatSettings = {}) { + return new OpenRouterChatLanguageModel(modelId, settings, { + provider: 'openrouter.chat', + ...this.baseConfig, + compatibility: 'strict', + url: ({ path }) => `${this.baseURL}${path}`, + }); + } + + completion( + modelId: OpenRouterCompletionModelId, + settings: OpenRouterCompletionSettings = {}, + ) { + return new OpenRouterCompletionLanguageModel(modelId, settings, { + provider: 'openrouter.completion', + ...this.baseConfig, + compatibility: 'strict', + url: ({ path }) => `${this.baseURL}${path}`, + }); + } + + textEmbeddingModel( + modelId: OpenRouterEmbeddingModelId, + settings: OpenRouterEmbeddingSettings = {}, + ) { + return new OpenRouterEmbeddingModel(modelId, settings, { + provider: 'openrouter.embedding', + ...this.baseConfig, + url: ({ path }) => `${this.baseURL}${path}`, + }); + } + + /** + * @deprecated Use textEmbeddingModel instead + */ + embedding( + modelId: OpenRouterEmbeddingModelId, + settings: OpenRouterEmbeddingSettings = {}, + ) { + return this.textEmbeddingModel(modelId, settings); + } +} diff --git a/packages/ai-sdk-provider-2/src/hyperbolic-chat-language-model.test.ts b/packages/ai-sdk-provider-2/src/hyperbolic-chat-language-model.test.ts deleted file mode 100644 index 944ccce..0000000 --- a/packages/ai-sdk-provider-2/src/hyperbolic-chat-language-model.test.ts +++ /dev/null @@ -1,990 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { LanguageModelV1Prompt } from "@ai-sdk/provider"; -import { - convertReadableStreamToArray, - JsonTestServer, - StreamingTestServer, -} from "@ai-sdk/provider-utils/test"; -import { describe, expect, it } from "vitest"; - -import { createHyperbolic } from "./hyperbolic-provider"; -import { mapHyperbolicChatLogProbsOutput } from "./map-hyperbolic-chat-logprobs"; - -const TEST_PROMPT: LanguageModelV1Prompt = [ - { role: "user", content: [{ type: "text", text: "Hello" }] }, -]; - -const TEST_LOGPROBS = { - content: [ - { - token: "Hello", - logprob: -0.0009994634, - top_logprobs: [ - { - token: "Hello", - logprob: -0.0009994634, - }, - ], - }, - { - token: "!", - logprob: -0.13410144, - top_logprobs: [ - { - token: "!", - logprob: -0.13410144, - }, - ], - }, - { - token: " How", - logprob: -0.0009250381, - top_logprobs: [ - { - token: " How", - logprob: -0.0009250381, - }, - ], - }, - { - token: " can", - logprob: -0.047709424, - top_logprobs: [ - { - token: " can", - logprob: -0.047709424, - }, - ], - }, - { - token: " I", - logprob: -0.000009014684, - top_logprobs: [ - { - token: " I", - logprob: -0.000009014684, - }, - ], - }, - { - token: " assist", - logprob: -0.009125131, - top_logprobs: [ - { - token: " assist", - logprob: -0.009125131, - }, - ], - }, - { - token: " you", - logprob: -0.0000066306106, - top_logprobs: [ - { - token: " you", - logprob: -0.0000066306106, - }, - ], - }, - { - token: " today", - logprob: -0.00011093382, - top_logprobs: [ - { - token: " today", - logprob: -0.00011093382, - }, - ], - }, - { - token: "?", - logprob: -0.00004596782, - top_logprobs: [ - { - token: "?", - logprob: -0.00004596782, - }, - ], - }, - ], -}; - -const provider = createHyperbolic({ - apiKey: "test-api-key", - compatibility: "strict", -}); - -const model = provider.chat("anthropic/claude-3.5-sonnet"); - -describe("doGenerate", () => { - const server = new JsonTestServer("https://api.hyperbolic.xyz/v1/chat/completions"); - - server.setupTestEnvironment(); - - function prepareJsonResponse({ - content = "", - usage = { - prompt_tokens: 4, - total_tokens: 34, - completion_tokens: 30, - }, - logprobs = null, - finish_reason = "stop", - }: { - content?: string; - usage?: { - prompt_tokens: number; - total_tokens: number; - completion_tokens: number; - }; - logprobs?: { - content: - | { - token: string; - logprob: number; - top_logprobs: { token: string; logprob: number }[]; - }[] - | null; - } | null; - finish_reason?: string; - } = {}) { - server.responseBodyJson = { - id: "chatcmpl-95ZTZkhr0mHNKqerQfiwkuox3PHAd", - object: "chat.completion", - created: 1711115037, - model: "gpt-3.5-turbo-0125", - choices: [ - { - index: 0, - message: { - role: "assistant", - content, - }, - logprobs, - finish_reason, - }, - ], - usage, - system_fingerprint: "fp_3bc1b5746c", - }; - } - - it("should extract text response", async () => { - prepareJsonResponse({ content: "Hello, World!" }); - - const { text } = await model.doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(text).toStrictEqual("Hello, World!"); - }); - - it("should extract usage", async () => { - prepareJsonResponse({ - content: "", - usage: { prompt_tokens: 20, total_tokens: 25, completion_tokens: 5 }, - }); - - const { usage } = await model.doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(usage).toStrictEqual({ - promptTokens: 20, - completionTokens: 5, - }); - }); - - it("should extract logprobs", async () => { - prepareJsonResponse({ - logprobs: TEST_LOGPROBS, - }); - - const response = await provider.chat("openai/gpt-3.5-turbo", { logprobs: 1 }).doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - expect(response.logprobs).toStrictEqual(mapHyperbolicChatLogProbsOutput(TEST_LOGPROBS)); - }); - - it("should extract finish reason", async () => { - prepareJsonResponse({ - content: "", - finish_reason: "stop", - }); - - const response = await model.doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(response.finishReason).toStrictEqual("stop"); - }); - - it("should support unknown finish reason", async () => { - prepareJsonResponse({ - content: "", - finish_reason: "eos", - }); - - const response = await model.doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(response.finishReason).toStrictEqual("unknown"); - }); - - it("should expose the raw response headers", async () => { - prepareJsonResponse({ content: "" }); - - server.responseHeaders = { - "test-header": "test-value", - }; - - const { rawResponse } = await model.doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(rawResponse?.headers).toStrictEqual({ - // default headers: - "content-length": "337", - "content-type": "application/json", - - // custom header - "test-header": "test-value", - }); - }); - - it("should pass the model and the messages", async () => { - prepareJsonResponse({ content: "" }); - - await model.doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(await server.getRequestBodyJson()).toStrictEqual({ - model: "anthropic/claude-3.5-sonnet", - messages: [{ role: "user", content: "Hello" }], - }); - }); - - it("should pass the models array when provided", async () => { - prepareJsonResponse({ content: "" }); - - const customModel = provider.chat("anthropic/claude-3.5-sonnet", { - models: ["anthropic/claude-2", "gryphe/mythomax-l2-13b"], - }); - - await customModel.doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(await server.getRequestBodyJson()).toStrictEqual({ - model: "anthropic/claude-3.5-sonnet", - models: ["anthropic/claude-2", "gryphe/mythomax-l2-13b"], - messages: [{ role: "user", content: "Hello" }], - }); - }); - - it("should pass settings", async () => { - prepareJsonResponse(); - - await provider - .chat("openai/gpt-3.5-turbo", { - logitBias: { 50256: -100 }, - logprobs: 2, - parallelToolCalls: false, - user: "test-user-id", - }) - .doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(await server.getRequestBodyJson()).toStrictEqual({ - model: "openai/gpt-3.5-turbo", - messages: [{ role: "user", content: "Hello" }], - logprobs: true, - top_logprobs: 2, - logit_bias: { 50256: -100 }, - parallel_tool_calls: false, - user: "test-user-id", - }); - }); - - it("should pass tools and toolChoice", async () => { - prepareJsonResponse({ content: "" }); - - await model.doGenerate({ - inputFormat: "prompt", - mode: { - type: "regular", - tools: [ - { - type: "function", - name: "test-tool", - parameters: { - type: "object", - properties: { value: { type: "string" } }, - required: ["value"], - additionalProperties: false, - $schema: "http://json-schema.org/draft-07/schema#", - }, - }, - ], - toolChoice: { - type: "tool", - toolName: "test-tool", - }, - }, - prompt: TEST_PROMPT, - }); - - expect(await server.getRequestBodyJson()).toStrictEqual({ - model: "anthropic/claude-3.5-sonnet", - messages: [{ role: "user", content: "Hello" }], - tools: [ - { - type: "function", - function: { - name: "test-tool", - parameters: { - type: "object", - properties: { value: { type: "string" } }, - required: ["value"], - additionalProperties: false, - $schema: "http://json-schema.org/draft-07/schema#", - }, - }, - }, - ], - tool_choice: { - type: "function", - function: { name: "test-tool" }, - }, - }); - }); - - it("should pass headers", async () => { - prepareJsonResponse({ content: "" }); - - const provider = createHyperbolic({ - apiKey: "test-api-key", - headers: { - "Custom-Provider-Header": "provider-header-value", - }, - }); - - await provider.chat("openai/gpt-3.5-turbo").doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - headers: { - "Custom-Request-Header": "request-header-value", - }, - }); - - const requestHeaders = await server.getRequestHeaders(); - - expect(requestHeaders).toStrictEqual({ - authorization: "Bearer test-api-key", - "content-type": "application/json", - "custom-provider-header": "provider-header-value", - "custom-request-header": "request-header-value", - }); - }); -}); - -describe("doStream", () => { - const server = new StreamingTestServer("https://api.hyperbolic.xyz/v1/chat/completions"); - - server.setupTestEnvironment(); - - function prepareStreamResponse({ - content, - usage = { - prompt_tokens: 17, - total_tokens: 244, - completion_tokens: 227, - }, - logprobs = null, - finish_reason = "stop", - }: { - content: string[]; - usage?: { - prompt_tokens: number; - total_tokens: number; - completion_tokens: number; - }; - logprobs?: { - content: - | { - token: string; - logprob: number; - top_logprobs: { token: string; logprob: number }[]; - }[] - | null; - } | null; - finish_reason?: string; - }) { - server.responseChunks = [ - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` + - `"system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null}]}\n\n`, - ...content.flatMap((text) => { - return ( - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` + - `"system_fingerprint":null,"choices":[{"index":1,"delta":{"content":"${text}"},"finish_reason":null}]}\n\n` - ); - }), - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` + - `"system_fingerprint":null,"choices":[{"index":0,"delta":{},"finish_reason":"${finish_reason}","logprobs":${JSON.stringify( - logprobs, - )}}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":${JSON.stringify(usage)}}\n\n`, - "data: [DONE]\n\n", - ]; - } - - it("should stream text deltas", async () => { - prepareStreamResponse({ - content: ["Hello", ", ", "World!"], - finish_reason: "stop", - usage: { - prompt_tokens: 17, - total_tokens: 244, - completion_tokens: 227, - }, - logprobs: TEST_LOGPROBS, - }); - - const { stream } = await model.doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - // note: space moved to last chunk bc of trimming - expect(await convertReadableStreamToArray(stream)).toStrictEqual([ - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0613", - }, - { type: "text-delta", textDelta: "" }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0613", - }, - { type: "text-delta", textDelta: "Hello" }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0613", - }, - { type: "text-delta", textDelta: ", " }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0613", - }, - { type: "text-delta", textDelta: "World!" }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0613", - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0613", - }, - { - type: "finish", - finishReason: "stop", - logprobs: mapHyperbolicChatLogProbsOutput(TEST_LOGPROBS), - usage: { promptTokens: 17, completionTokens: 227 }, - }, - ]); - }); - - it("should stream tool deltas", async () => { - server.responseChunks = [ - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"role":"assistant","content":null,` + - `"tool_calls":[{"index":0,"id":"call_O17Uplv4lJvD6DVdIvFFeRMw","type":"function","function":{"name":"test-tool","arguments":""}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\\""}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"value"}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\\":\\""}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Spark"}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"le"}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" Day"}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\\"}"}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":53,"completion_tokens":17,"total_tokens":70}}\n\n`, - "data: [DONE]\n\n", - ]; - - const { stream } = await model.doStream({ - inputFormat: "prompt", - mode: { - type: "regular", - tools: [ - { - type: "function", - name: "test-tool", - parameters: { - type: "object", - properties: { value: { type: "string" } }, - required: ["value"], - additionalProperties: false, - $schema: "http://json-schema.org/draft-07/schema#", - }, - }, - ], - }, - prompt: TEST_PROMPT, - }); - - expect(await convertReadableStreamToArray(stream)).toStrictEqual([ - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "tool-call-delta", - toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", - toolCallType: "function", - toolName: "test-tool", - argsTextDelta: '{"', - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "tool-call-delta", - toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", - toolCallType: "function", - toolName: "test-tool", - argsTextDelta: "value", - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "tool-call-delta", - toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", - toolCallType: "function", - toolName: "test-tool", - argsTextDelta: '":"', - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "tool-call-delta", - toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", - toolCallType: "function", - toolName: "test-tool", - argsTextDelta: "Spark", - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "tool-call-delta", - toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", - toolCallType: "function", - toolName: "test-tool", - argsTextDelta: "le", - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "tool-call-delta", - toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", - toolCallType: "function", - toolName: "test-tool", - argsTextDelta: " Day", - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "tool-call-delta", - toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", - toolCallType: "function", - toolName: "test-tool", - argsTextDelta: '"}', - }, - { - type: "tool-call", - toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", - toolCallType: "function", - toolName: "test-tool", - args: '{"value":"Sparkle Day"}', - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "finish", - finishReason: "tool-calls", - logprobs: undefined, - usage: { promptTokens: 53, completionTokens: 17 }, - }, - ]); - }); - - it("should stream tool call that is sent in one chunk", async () => { - server.responseChunks = [ - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"role":"assistant","content":null,` + - `"tool_calls":[{"index":0,"id":"call_O17Uplv4lJvD6DVdIvFFeRMw","type":"function","function":{"name":"test-tool","arguments":"{\\"value\\":\\"Sparkle Day\\"}"}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":53,"completion_tokens":17,"total_tokens":70}}\n\n`, - "data: [DONE]\n\n", - ]; - - const { stream } = await model.doStream({ - inputFormat: "prompt", - mode: { - type: "regular", - tools: [ - { - type: "function", - name: "test-tool", - parameters: { - type: "object", - properties: { value: { type: "string" } }, - required: ["value"], - additionalProperties: false, - $schema: "http://json-schema.org/draft-07/schema#", - }, - }, - ], - }, - prompt: TEST_PROMPT, - }); - - expect(await convertReadableStreamToArray(stream)).toStrictEqual([ - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "tool-call-delta", - toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", - toolCallType: "function", - toolName: "test-tool", - argsTextDelta: '{"value":"Sparkle Day"}', - }, - { - type: "tool-call", - toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", - toolCallType: "function", - toolName: "test-tool", - args: '{"value":"Sparkle Day"}', - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "finish", - finishReason: "tool-calls", - logprobs: undefined, - usage: { promptTokens: 53, completionTokens: 17 }, - }, - ]); - }); - - it("should handle error stream parts", async () => { - server.responseChunks = [ - `data: {"object": "error", "message": "The server had an error processing your request. Sorry about that! You can retry your request, or contact us through our ` + - `help center at app.hyperbolic.xyz/support if you keep seeing this error.","type":"server_error","param":null,"code":null}\n\n`, - "data: [DONE]\n\n", - ]; - - const { stream } = await model.doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(await convertReadableStreamToArray(stream)).toStrictEqual([ - { - type: "error", - error: { - object: "error", - message: - "The server had an error processing your request. Sorry about that! " + - "You can retry your request, or contact us through our help center at " + - "app.hyperbolic.xyz/support if you keep seeing this error.", - type: "server_error", - code: null, - param: null, - }, - }, - { - finishReason: "error", - logprobs: undefined, - type: "finish", - usage: { - completionTokens: NaN, - promptTokens: NaN, - }, - }, - ]); - }); - - it("should handle unparsable stream parts", async () => { - server.responseChunks = [`data: {unparsable}\n\n`, "data: [DONE]\n\n"]; - - const { stream } = await model.doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - const elements = await convertReadableStreamToArray(stream); - - expect(elements.length).toBe(2); - expect(elements[0]?.type).toBe("error"); - expect(elements[1]).toStrictEqual({ - finishReason: "error", - logprobs: undefined, - type: "finish", - usage: { - completionTokens: NaN, - promptTokens: NaN, - }, - }); - }); - - it("should expose the raw response headers", async () => { - prepareStreamResponse({ content: [] }); - - server.responseHeaders = { - "test-header": "test-value", - }; - - const { rawResponse } = await model.doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(rawResponse?.headers).toStrictEqual({ - // default headers: - "content-type": "text/event-stream", - "cache-control": "no-cache", - connection: "keep-alive", - - // custom header - "test-header": "test-value", - }); - }); - - it("should pass the messages and the model", async () => { - prepareStreamResponse({ content: [] }); - - await model.doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(await server.getRequestBodyJson()).toStrictEqual({ - stream: true, - stream_options: { include_usage: true }, - model: "anthropic/claude-3.5-sonnet", - messages: [{ role: "user", content: "Hello" }], - }); - }); - - it("should pass headers", async () => { - prepareStreamResponse({ content: [] }); - - const provider = createHyperbolic({ - apiKey: "test-api-key", - headers: { - "Custom-Provider-Header": "provider-header-value", - }, - }); - - await provider.chat("openai/gpt-3.5-turbo").doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - headers: { - "Custom-Request-Header": "request-header-value", - }, - }); - - const requestHeaders = await server.getRequestHeaders(); - - expect(requestHeaders).toStrictEqual({ - authorization: "Bearer test-api-key", - "content-type": "application/json", - "custom-provider-header": "provider-header-value", - "custom-request-header": "request-header-value", - }); - }); - - it("should pass extra body", async () => { - prepareStreamResponse({ content: [] }); - - const provider = createHyperbolic({ - apiKey: "test-api-key", - extraBody: { - custom_field: "custom_value", - providers: { - anthropic: { - custom_field: "custom_value", - }, - }, - }, - }); - - await provider.chat("anthropic/claude-3.5-sonnet").doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - const requestBody = await server.getRequestBodyJson(); - - expect(requestBody).toHaveProperty("custom_field", "custom_value"); - expect(requestBody).toHaveProperty("providers.anthropic.custom_field", "custom_value"); - }); -}); diff --git a/packages/ai-sdk-provider-2/src/hyperbolic-chat-language-model.ts b/packages/ai-sdk-provider-2/src/hyperbolic-chat-language-model.ts deleted file mode 100644 index 29d16b2..0000000 --- a/packages/ai-sdk-provider-2/src/hyperbolic-chat-language-model.ts +++ /dev/null @@ -1,659 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { - LanguageModelV1, - LanguageModelV1FinishReason, - LanguageModelV1FunctionTool, - LanguageModelV1LogProbs, - LanguageModelV1ProviderDefinedTool, - LanguageModelV1StreamPart, -} from "@ai-sdk/provider"; -import type { ParseResult } from "@ai-sdk/provider-utils"; -import { InvalidResponseDataError, UnsupportedFunctionalityError } from "@ai-sdk/provider"; -import { - combineHeaders, - createEventSourceResponseHandler, - createJsonResponseHandler, - generateId, - isParsableJson, - postJsonToApi, -} from "@ai-sdk/provider-utils"; -import { z } from "zod"; - -import type { HyperbolicChatModelId, HyperbolicChatSettings } from "./hyperbolic-chat-settings"; -import { convertToHyperbolicChatMessages } from "./convert-to-hyperbolic-chat-messages"; -import { - HyperbolicErrorResponseSchema, - hyperbolicFailedResponseHandler, - isHyperbolicError, - tryParsingHyperbolicError, -} from "./hyperbolic-error"; -import { mapHyperbolicChatLogProbsOutput } from "./map-hyperbolic-chat-logprobs"; -import { mapHyperbolicFinishReason } from "./map-hyperbolic-finish-reason"; - -function isFunctionTool( - tool: LanguageModelV1FunctionTool | LanguageModelV1ProviderDefinedTool, -): tool is LanguageModelV1FunctionTool { - return "parameters" in tool; -} - -type HyperbolicChatConfig = { - provider: string; - compatibility: "strict" | "compatible"; - headers: () => Record; - url: (options: { modelId: string; path: string }) => string; - fetch?: typeof fetch; - extraBody?: Record; -}; - -export class HyperbolicChatLanguageModel implements LanguageModelV1 { - readonly specificationVersion = "v1"; - readonly defaultObjectGenerationMode = "tool"; - - readonly modelId: HyperbolicChatModelId; - readonly settings: HyperbolicChatSettings; - - private readonly config: HyperbolicChatConfig; - - constructor( - modelId: HyperbolicChatModelId, - settings: HyperbolicChatSettings, - config: HyperbolicChatConfig, - ) { - this.modelId = modelId; - this.settings = settings; - this.config = config; - } - - get provider(): string { - return this.config.provider; - } - - private getArgs({ - mode, - prompt, - maxTokens, - temperature, - topP, - frequencyPenalty, - presencePenalty, - seed, - stopSequences, - responseFormat, - topK, - providerMetadata, - }: Parameters[0]) { - const type = mode.type; - const extraCallingBody = providerMetadata?.["hyperbolic"] ?? {}; - - const baseArgs = { - // model id: - model: this.modelId, - models: this.settings.models, - - // model specific settings: - logit_bias: this.settings.logitBias, - logprobs: - this.settings.logprobs === true || typeof this.settings.logprobs === "number" - ? true - : undefined, - top_logprobs: - typeof this.settings.logprobs === "number" - ? this.settings.logprobs - : typeof this.settings.logprobs === "boolean" - ? this.settings.logprobs - ? 0 - : undefined - : undefined, - user: this.settings.user, - parallel_tool_calls: this.settings.parallelToolCalls, - - // standardized settings: - max_tokens: maxTokens, - temperature, - top_p: topP, - frequency_penalty: frequencyPenalty, - presence_penalty: presencePenalty, - seed, - - stop: stopSequences, - response_format: responseFormat, - top_k: topK, - - // messages: - messages: convertToHyperbolicChatMessages(prompt), - - // Hyperbolic specific settings: - include_reasoning: this.settings.includeReasoning, - reasoning: this.settings.reasoning, - - // extra body: - ...this.config.extraBody, - ...this.settings.extraBody, - ...extraCallingBody, - }; - - switch (type) { - case "regular": { - return { ...baseArgs, ...prepareToolsAndToolChoice(mode) }; - } - - case "object-json": { - return { - ...baseArgs, - response_format: { type: "json_object" }, - }; - } - - case "object-tool": { - return { - ...baseArgs, - tool_choice: { type: "function", function: { name: mode.tool.name } }, - tools: [ - { - type: "function", - function: { - name: mode.tool.name, - description: mode.tool.description, - parameters: mode.tool.parameters, - }, - }, - ], - }; - } - - // Handle all non-text types with a single default case - default: { - const _exhaustiveCheck: never = type; - throw new UnsupportedFunctionalityError({ - functionality: `${_exhaustiveCheck} mode`, - }); - } - } - } - async doGenerate( - options: Parameters[0], - ): Promise>> { - const args = this.getArgs(options); - - const { responseHeaders, value: response } = await postJsonToApi({ - url: this.config.url({ - path: "/chat/completions", - modelId: this.modelId, - }), - headers: combineHeaders(this.config.headers(), options.headers), - body: args, - failedResponseHandler: hyperbolicFailedResponseHandler, - successfulResponseHandler: createJsonResponseHandler( - HyperbolicNonStreamChatCompletionResponseSchema, - ), - abortSignal: options.abortSignal, - fetch: this.config.fetch, - }); - - const { messages: rawPrompt, ...rawSettings } = args; - const choice = response.choices[0]; - - if (!choice) { - throw new Error("No choice in response"); - } - - return { - response: { - id: response.id, - modelId: response.model, - }, - text: choice.message.content ?? undefined, - reasoning: choice.message.reasoning ?? undefined, - toolCalls: choice.message.tool_calls?.map((toolCall) => ({ - toolCallType: "function", - toolCallId: toolCall.id ?? generateId(), - toolName: toolCall.function.name, - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - args: toolCall.function.arguments!, - })), - finishReason: mapHyperbolicFinishReason(choice.finish_reason), - usage: { - promptTokens: response.usage?.prompt_tokens ?? 0, - completionTokens: response.usage?.completion_tokens ?? 0, - }, - rawCall: { rawPrompt, rawSettings }, - rawResponse: { headers: responseHeaders }, - warnings: [], - logprobs: mapHyperbolicChatLogProbsOutput(choice.logprobs), - }; - } - - async doStream( - options: Parameters[0], - ): Promise>> { - const args = this.getArgs(options); - - const { responseHeaders, value: response } = await postJsonToApi({ - url: this.config.url({ - path: "/chat/completions", - modelId: this.modelId, - }), - headers: combineHeaders(this.config.headers(), options.headers), - body: { - ...args, - stream: true, - - // only include stream_options when in strict compatibility mode: - stream_options: - this.config.compatibility === "strict" ? { include_usage: true } : undefined, - }, - failedResponseHandler: hyperbolicFailedResponseHandler, - successfulResponseHandler: createEventSourceResponseHandler( - HyperbolicStreamChatCompletionChunkSchema, - ), - abortSignal: options.abortSignal, - fetch: this.config.fetch, - }); - - const { messages: rawPrompt, ...rawSettings } = args; - - const toolCalls: Array<{ - id: string; - type: "function"; - function: { - name: string; - arguments: string; - }; - }> = []; - - let finishReason: LanguageModelV1FinishReason = "other"; - let usage: { promptTokens: number; completionTokens: number } = { - promptTokens: Number.NaN, - completionTokens: Number.NaN, - }; - let logprobs: LanguageModelV1LogProbs; - - return { - stream: response.pipeThrough( - new TransformStream< - ParseResult>, - LanguageModelV1StreamPart - >({ - transform(chunk, controller) { - // handle failed chunk parsing / validation: - if (!chunk.success) { - finishReason = "error"; - - // Error messages from the API are sometimes an ugly combo of text and JSON in a single chunk, so attempt to parse it as a hyperbolic error. - const maybeHyperbolicError = tryParsingHyperbolicError(chunk.error); - if (maybeHyperbolicError) { - controller.enqueue({ type: "error", error: maybeHyperbolicError }); - return; - } - - controller.enqueue({ - type: "error", - error: chunk.error, - }); - return; - } - - const value = chunk.value; - - // handle error chunks: - if (isHyperbolicError(value)) { - finishReason = "error"; - controller.enqueue({ type: "error", error: value }); - return; - } - - if (value.id) { - controller.enqueue({ - type: "response-metadata", - id: value.id, - }); - } - - if (value.model) { - controller.enqueue({ - type: "response-metadata", - modelId: value.model, - }); - } - - if (value.usage != null) { - usage = { - promptTokens: value.usage.prompt_tokens, - completionTokens: value.usage.completion_tokens, - }; - } - - const choice = value.choices[0]; - - if (choice?.finish_reason != null) { - finishReason = mapHyperbolicFinishReason(choice.finish_reason); - } - - if (choice?.delta == null) { - return; - } - - const delta = choice.delta; - - if (delta.content != null) { - controller.enqueue({ - type: "text-delta", - textDelta: delta.content, - }); - } - - if (delta.reasoning != null) { - controller.enqueue({ - type: "reasoning", - textDelta: delta.reasoning, - }); - } - - const mappedLogprobs = mapHyperbolicChatLogProbsOutput(choice?.logprobs); - if (mappedLogprobs?.length) { - if (logprobs === undefined) logprobs = []; - logprobs.push(...mappedLogprobs); - } - - if (delta.tool_calls != null) { - for (const toolCallDelta of delta.tool_calls) { - const index = toolCallDelta.index; - - // Tool call start. Hyperbolic returns all information except the arguments in the first chunk. - if (toolCalls[index] == null) { - if (toolCallDelta.type !== "function") { - throw new InvalidResponseDataError({ - data: toolCallDelta, - message: `Expected 'function' type.`, - }); - } - - if (toolCallDelta.id == null) { - throw new InvalidResponseDataError({ - data: toolCallDelta, - message: `Expected 'id' to be a string.`, - }); - } - - if (toolCallDelta.function?.name == null) { - throw new InvalidResponseDataError({ - data: toolCallDelta, - message: `Expected 'function.name' to be a string.`, - }); - } - - toolCalls[index] = { - id: toolCallDelta.id, - type: "function", - function: { - name: toolCallDelta.function.name, - arguments: toolCallDelta.function.arguments ?? "", - }, - }; - - const toolCall = toolCalls[index]; - - if (toolCall == null) { - throw new Error("Tool call is missing"); - } - - // check if tool call is complete (some providers send the full tool call in one chunk) - if ( - toolCall.function?.name != null && - toolCall.function?.arguments != null && - isParsableJson(toolCall.function.arguments) - ) { - // send delta - controller.enqueue({ - type: "tool-call-delta", - toolCallType: "function", - toolCallId: toolCall.id, - toolName: toolCall.function.name, - argsTextDelta: toolCall.function.arguments, - }); - - // send tool call - controller.enqueue({ - type: "tool-call", - toolCallType: "function", - toolCallId: toolCall.id ?? generateId(), - toolName: toolCall.function.name, - args: toolCall.function.arguments, - }); - } - - continue; - } - - // existing tool call, merge - const toolCall = toolCalls[index]; - - if (toolCall == null) { - throw new Error("Tool call is missing"); - } - - if (toolCallDelta.function?.arguments != null) { - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - toolCall.function!.arguments += toolCallDelta.function?.arguments ?? ""; - } - - // send delta - controller.enqueue({ - type: "tool-call-delta", - toolCallType: "function", - toolCallId: toolCall.id, - toolName: toolCall.function.name, - argsTextDelta: toolCallDelta.function.arguments ?? "", - }); - - // check if tool call is complete - if ( - toolCall.function?.name != null && - toolCall.function?.arguments != null && - isParsableJson(toolCall.function.arguments) - ) { - controller.enqueue({ - type: "tool-call", - toolCallType: "function", - toolCallId: toolCall.id ?? generateId(), - toolName: toolCall.function.name, - args: toolCall.function.arguments, - }); - } - } - } - }, - - flush(controller) { - controller.enqueue({ - type: "finish", - finishReason, - logprobs, - usage, - }); - }, - }), - ), - rawCall: { rawPrompt, rawSettings }, - rawResponse: { headers: responseHeaders }, - warnings: [], - }; - } -} - -const HyperbolicChatCompletionBaseResponseSchema = z.object({ - id: z.string().optional(), - model: z.string().optional(), - usage: z - .object({ - prompt_tokens: z.number(), - completion_tokens: z.number(), - total_tokens: z.number(), - }) - .nullish(), -}); - -// limited version of the schema, focussed on what is needed for the implementation -// this approach limits breakages when the API changes and increases efficiency -const HyperbolicNonStreamChatCompletionResponseSchema = - HyperbolicChatCompletionBaseResponseSchema.extend({ - choices: z.array( - z.object({ - message: z.object({ - role: z.literal("assistant"), - content: z.string().nullable().optional(), - reasoning: z.string().nullable().optional(), - tool_calls: z - .array( - z.object({ - id: z.string().optional().nullable(), - type: z.literal("function"), - function: z.object({ - name: z.string(), - arguments: z.string(), - }), - }), - ) - .optional(), - }), - index: z.number(), - logprobs: z - .object({ - content: z - .array( - z.object({ - token: z.string(), - logprob: z.number(), - top_logprobs: z.array( - z.object({ - token: z.string(), - logprob: z.number(), - }), - ), - }), - ) - .nullable(), - }) - .nullable() - .optional(), - finish_reason: z.string().optional().nullable(), - }), - ), - }); - -// limited version of the schema, focussed on what is needed for the implementation -// this approach limits breakages when the API changes and increases efficiency -const HyperbolicStreamChatCompletionChunkSchema = z.union([ - HyperbolicChatCompletionBaseResponseSchema.extend({ - choices: z.array( - z.object({ - delta: z - .object({ - role: z.enum(["assistant"]).optional(), - content: z.string().nullish(), - reasoning: z.string().nullish().optional(), - tool_calls: z - .array( - z.object({ - index: z.number(), - id: z.string().nullish(), - type: z.literal("function").optional(), - function: z.object({ - name: z.string().nullish(), - arguments: z.string().nullish(), - }), - }), - ) - .nullish(), - }) - .nullish(), - logprobs: z - .object({ - content: z - .array( - z.object({ - token: z.string(), - logprob: z.number(), - top_logprobs: z.array( - z.object({ - token: z.string(), - logprob: z.number(), - }), - ), - }), - ) - .nullable(), - }) - .nullish(), - finish_reason: z.string().nullable().optional(), - index: z.number(), - }), - ), - }), - HyperbolicErrorResponseSchema, -]); - -function prepareToolsAndToolChoice( - mode: Parameters[0]["mode"] & { - type: "regular"; - }, -) { - // when the tools array is empty, change it to undefined to prevent errors: - const tools = mode.tools?.length ? mode.tools : undefined; - - if (tools == null) { - return { tools: undefined, tool_choice: undefined }; - } - - const mappedTools = tools.map((tool) => { - if (isFunctionTool(tool)) { - return { - type: "function" as const, - function: { - name: tool.name, - description: tool.description, - parameters: tool.parameters, - }, - }; - } else { - return { - type: "function" as const, - function: { - name: tool.name, - }, - }; - } - }); - - const toolChoice = mode.toolChoice; - - if (toolChoice == null) { - return { tools: mappedTools, tool_choice: undefined }; - } - - const type = toolChoice.type; - - switch (type) { - case "auto": - case "none": - case "required": - return { tools: mappedTools, tool_choice: type }; - case "tool": - return { - tools: mappedTools, - tool_choice: { - type: "function", - function: { - name: toolChoice.toolName, - }, - }, - }; - default: { - const _exhaustiveCheck: never = type; - throw new Error(`Unsupported tool choice type: ${_exhaustiveCheck}`); - } - } -} diff --git a/packages/ai-sdk-provider-2/src/hyperbolic-chat-prompt.ts b/packages/ai-sdk-provider-2/src/hyperbolic-chat-prompt.ts deleted file mode 100644 index f1c2a1e..0000000 --- a/packages/ai-sdk-provider-2/src/hyperbolic-chat-prompt.ts +++ /dev/null @@ -1,67 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -// Type for Hyperbolic Cache Control following Anthropic's pattern -export type HyperbolicCacheControl = { type: "ephemeral" }; - -export type HyperbolicChatPrompt = Array; - -export type ChatCompletionMessageParam = - | ChatCompletionSystemMessageParam - | ChatCompletionUserMessageParam - | ChatCompletionAssistantMessageParam - | ChatCompletionToolMessageParam; - -export interface ChatCompletionSystemMessageParam { - role: "system"; - content: string; - cache_control?: HyperbolicCacheControl; -} - -export interface ChatCompletionUserMessageParam { - role: "user"; - content: string | Array; - cache_control?: HyperbolicCacheControl; -} - -export type ChatCompletionContentPart = - | ChatCompletionContentPartText - | ChatCompletionContentPartImage; - -export interface ChatCompletionContentPartImage { - type: "image_url"; - image_url: { - url: string; - }; - cache_control?: HyperbolicCacheControl; -} - -export interface ChatCompletionContentPartText { - type: "text"; - text: string; - cache_control?: HyperbolicCacheControl; -} - -export interface ChatCompletionAssistantMessageParam { - role: "assistant"; - content?: string | null; - tool_calls?: Array; - cache_control?: HyperbolicCacheControl; -} - -export interface ChatCompletionMessageToolCall { - type: "function"; - id: string; - function: { - arguments: string; - name: string; - }; -} - -export interface ChatCompletionToolMessageParam { - role: "tool"; - content: string; - tool_call_id: string; - cache_control?: HyperbolicCacheControl; -} diff --git a/packages/ai-sdk-provider-2/src/hyperbolic-chat-settings.ts b/packages/ai-sdk-provider-2/src/hyperbolic-chat-settings.ts deleted file mode 100644 index 76c8fc9..0000000 --- a/packages/ai-sdk-provider-2/src/hyperbolic-chat-settings.ts +++ /dev/null @@ -1,50 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { HyperbolicSharedSettings } from "./types"; - -// https://app.hyperbolic.xyz/models -export type HyperbolicChatModelId = string; - -export type HyperbolicChatSettings = { - /** - * Modify the likelihood of specified tokens appearing in the completion. - * - * Accepts a JSON object that maps tokens (specified by their token ID in - * the GPT tokenizer) to an associated bias value from -100 to 100. You - * can use this tokenizer tool to convert text to token IDs. Mathematically, - * the bias is added to the logits generated by the model prior to sampling. - * The exact effect will vary per model, but values between -1 and 1 should - * decrease or increase likelihood of selection; values like -100 or 100 - * should result in a ban or exclusive selection of the relevant token. - * - * As an example, you can pass {"50256": -100} to prevent the <|endoftext|> - * token from being generated. - */ - logitBias?: Record; - - /** - * Return the log probabilities of the tokens. Including logprobs will increase - * the response size and can slow down response times. However, it can - * be useful to better understand how the model is behaving. - * - * Setting to true will return the log probabilities of the tokens that - * were generated. - * - * Setting to a number will return the log probabilities of the top n - * tokens that were generated. - */ - logprobs?: boolean | number; - - /** - * Whether to enable parallel function calling during tool use. Default to true. - */ - parallelToolCalls?: boolean; - - /** - * A unique identifier representing your end-user, which can help Hyperbolic - * to monitor and detect abuse. Learn more. - */ - user?: string; -} & HyperbolicSharedSettings; diff --git a/packages/ai-sdk-provider-2/src/hyperbolic-completion-language-model.test.ts b/packages/ai-sdk-provider-2/src/hyperbolic-completion-language-model.test.ts deleted file mode 100644 index 9fdaec6..0000000 --- a/packages/ai-sdk-provider-2/src/hyperbolic-completion-language-model.test.ts +++ /dev/null @@ -1,496 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { LanguageModelV1Prompt } from "@ai-sdk/provider"; -import { - convertReadableStreamToArray, - JsonTestServer, - StreamingTestServer, -} from "@ai-sdk/provider-utils/test"; -import { describe, expect, it } from "vitest"; - -import { createHyperbolic } from "./hyperbolic-provider"; -import { mapHyperbolicCompletionLogProbs } from "./map-hyperbolic-completion-logprobs"; - -const TEST_PROMPT: LanguageModelV1Prompt = [ - { role: "user", content: [{ type: "text", text: "Hello" }] }, -]; - -const TEST_LOGPROBS = { - tokens: [" ever", " after", ".\n\n", "The", " end", "."], - token_logprobs: [-0.0664508, -0.014520033, -1.3820221, -0.7890417, -0.5323165, -0.10247037], - top_logprobs: [ - { - " ever": -0.0664508, - }, - { - " after": -0.014520033, - }, - { - ".\n\n": -1.3820221, - }, - { - The: -0.7890417, - }, - { - " end": -0.5323165, - }, - { - ".": -0.10247037, - }, - ] as Record[], -}; - -const provider = createHyperbolic({ - apiKey: "test-api-key", - compatibility: "strict", -}); - -const model = provider.completion("meta-llama/Llama-3.1-405B-FP8"); - -describe("doGenerate", () => { - const server = new JsonTestServer("https://api.hyperbolic.xyz/v1/completions"); - server.setupTestEnvironment(); - - function prepareJsonResponse({ - content = "", - usage = { - prompt_tokens: 4, - total_tokens: 34, - completion_tokens: 30, - }, - logprobs = null, - finish_reason = "stop", - }: { - content?: string; - usage?: { - prompt_tokens: number; - total_tokens: number; - completion_tokens: number; - }; - logprobs?: { - tokens: string[]; - token_logprobs: number[]; - top_logprobs: Record[]; - } | null; - finish_reason?: string; - }) { - server.responseBodyJson = { - id: "cmpl-96cAM1v77r4jXa4qb2NSmRREV5oWB", - object: "text_completion", - created: 1711363706, - model: "meta-llama/Llama-3.1-405B-FP8", - choices: [ - { - text: content, - index: 0, - logprobs, - finish_reason, - }, - ], - usage, - }; - } - - it("should extract text response", async () => { - prepareJsonResponse({ content: "Hello, World!" }); - - const { text } = await model.doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(text).toStrictEqual("Hello, World!"); - }); - - it("should extract usage", async () => { - prepareJsonResponse({ - content: "", - usage: { prompt_tokens: 20, total_tokens: 25, completion_tokens: 5 }, - }); - - const { usage } = await model.doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(usage).toStrictEqual({ - promptTokens: 20, - completionTokens: 5, - }); - }); - - it("should extract logprobs", async () => { - prepareJsonResponse({ logprobs: TEST_LOGPROBS }); - - const provider = createHyperbolic({ apiKey: "test-api-key" }); - - const response = await provider.completion("openai/gpt-3.5-turbo", { logprobs: 1 }).doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - expect(response.logprobs).toStrictEqual(mapHyperbolicCompletionLogProbs(TEST_LOGPROBS)); - }); - - it("should extract finish reason", async () => { - prepareJsonResponse({ - content: "", - finish_reason: "stop", - }); - - const { finishReason } = await provider.completion("meta-llama/Llama-3.1-405B-FP8").doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(finishReason).toStrictEqual("stop"); - }); - - it("should support unknown finish reason", async () => { - prepareJsonResponse({ - content: "", - finish_reason: "eos", - }); - - const { finishReason } = await provider.completion("meta-llama/Llama-3.1-405B-FP8").doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(finishReason).toStrictEqual("unknown"); - }); - - it("should expose the raw response headers", async () => { - prepareJsonResponse({ content: "" }); - - server.responseHeaders = { - "test-header": "test-value", - }; - - const { rawResponse } = await model.doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(rawResponse?.headers).toStrictEqual({ - // default headers: - "content-length": "273", - "content-type": "application/json", - - // custom header - "test-header": "test-value", - }); - }); - - it("should pass the model and the prompt", async () => { - prepareJsonResponse({ content: "" }); - - await model.doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(await server.getRequestBodyJson()).toStrictEqual({ - model: "meta-llama/Llama-3.1-405B-FP8", - prompt: "Hello", - }); - }); - - it("should pass the models array when provided", async () => { - prepareJsonResponse({ content: "" }); - - const customModel = provider.completion("meta-llama/Llama-3.1-405B-FP8", { - models: ["openai/gpt-4", "anthropic/claude-2"], - }); - - await customModel.doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(await server.getRequestBodyJson()).toStrictEqual({ - model: "meta-llama/Llama-3.1-405B-FP8", - models: ["openai/gpt-4", "anthropic/claude-2"], - prompt: "Hello", - }); - }); - - it("should pass headers", async () => { - prepareJsonResponse({ content: "" }); - - const provider = createHyperbolic({ - apiKey: "test-api-key", - headers: { - "Custom-Provider-Header": "provider-header-value", - }, - }); - - await provider.completion("meta-llama/Llama-3.1-405B-FP8").doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - headers: { - "Custom-Request-Header": "request-header-value", - }, - }); - - const requestHeaders = await server.getRequestHeaders(); - - expect(requestHeaders).toStrictEqual({ - authorization: "Bearer test-api-key", - "content-type": "application/json", - "custom-provider-header": "provider-header-value", - "custom-request-header": "request-header-value", - }); - }); -}); - -describe("doStream", () => { - const server = new StreamingTestServer("https://api.hyperbolic.xyz/v1/completions"); - - server.setupTestEnvironment(); - - function prepareStreamResponse({ - content, - finish_reason = "stop", - usage = { - prompt_tokens: 10, - total_tokens: 372, - completion_tokens: 362, - }, - logprobs = null, - }: { - content: string[]; - usage?: { - prompt_tokens: number; - total_tokens: number; - completion_tokens: number; - }; - logprobs?: { - tokens: string[]; - token_logprobs: number[]; - top_logprobs: Record[]; - } | null; - finish_reason?: string; - }) { - server.responseChunks = [ - ...content.map((text) => { - return ( - `data: {"id":"cmpl-96c64EdfhOw8pjFFgVpLuT8k2MtdT","object":"text_completion","created":1711363440,` + - `"choices":[{"text":"${text}","index":0,"logprobs":null,"finish_reason":null}],"model":"meta-llama/Llama-3.1-405B-FP8"}\n\n` - ); - }), - `data: {"id":"cmpl-96c3yLQE1TtZCd6n6OILVmzev8M8H","object":"text_completion","created":1711363310,` + - `"choices":[{"text":"","index":0,"logprobs":${JSON.stringify( - logprobs, - )},"finish_reason":"${finish_reason}"}],"model":"meta-llama/Llama-3.1-405B-FP8"}\n\n`, - `data: {"id":"cmpl-96c3yLQE1TtZCd6n6OILVmzev8M8H","object":"text_completion","created":1711363310,` + - `"model":"meta-llama/Llama-3.1-405B-FP8","usage":${JSON.stringify( - usage, - )},"choices":[]}\n\n`, - "data: [DONE]\n\n", - ]; - } - - it("should stream text deltas", async () => { - prepareStreamResponse({ - content: ["Hello", ", ", "World!"], - finish_reason: "stop", - usage: { - prompt_tokens: 10, - total_tokens: 372, - completion_tokens: 362, - }, - logprobs: TEST_LOGPROBS, - }); - - const { stream } = await model.doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - // note: space moved to last chunk bc of trimming - expect(await convertReadableStreamToArray(stream)).toStrictEqual([ - { type: "text-delta", textDelta: "Hello" }, - { type: "text-delta", textDelta: ", " }, - { type: "text-delta", textDelta: "World!" }, - { type: "text-delta", textDelta: "" }, - { - type: "finish", - finishReason: "stop", - logprobs: mapHyperbolicCompletionLogProbs(TEST_LOGPROBS), - usage: { promptTokens: 10, completionTokens: 362 }, - }, - ]); - }); - - it("should handle error stream parts", async () => { - server.responseChunks = [ - `data: {"object": "error", "message": "The server had an error processing your request. Sorry about that! You can retry your request, or contact us through our ` + - `help center at app.hyperbolic.xyz/support if you keep seeing this error.","type":"server_error","param":null,"code":null}\n\n`, - "data: [DONE]\n\n", - ]; - - const { stream } = await model.doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(await convertReadableStreamToArray(stream)).toStrictEqual([ - { - type: "error", - error: { - object: "error", - message: - "The server had an error processing your request. Sorry about that! " + - "You can retry your request, or contact us through our help center at " + - "app.hyperbolic.xyz/support if you keep seeing this error.", - type: "server_error", - code: null, - param: null, - }, - }, - { - finishReason: "error", - logprobs: undefined, - type: "finish", - usage: { - completionTokens: NaN, - promptTokens: NaN, - }, - }, - ]); - }); - - it("should handle unparsable stream parts", async () => { - server.responseChunks = [`data: {unparsable}\n\n`, "data: [DONE]\n\n"]; - - const { stream } = await model.doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - const elements = await convertReadableStreamToArray(stream); - - expect(elements.length).toBe(2); - expect(elements[0]?.type).toBe("error"); - expect(elements[1]).toStrictEqual({ - finishReason: "error", - logprobs: undefined, - type: "finish", - usage: { - completionTokens: NaN, - promptTokens: NaN, - }, - }); - }); - - it("should expose the raw response headers", async () => { - prepareStreamResponse({ content: [] }); - - server.responseHeaders = { - "test-header": "test-value", - }; - - const { rawResponse } = await model.doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(rawResponse?.headers).toStrictEqual({ - // default headers: - "content-type": "text/event-stream", - "cache-control": "no-cache", - connection: "keep-alive", - - // custom header - "test-header": "test-value", - }); - }); - - it("should pass the model and the prompt", async () => { - prepareStreamResponse({ content: [] }); - - await model.doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(await server.getRequestBodyJson()).toStrictEqual({ - stream: true, - stream_options: { include_usage: true }, - model: "meta-llama/Llama-3.1-405B-FP8", - prompt: "Hello", - }); - }); - - it("should pass headers", async () => { - prepareStreamResponse({ content: [] }); - - const provider = createHyperbolic({ - apiKey: "test-api-key", - headers: { - "Custom-Provider-Header": "provider-header-value", - }, - }); - - await provider.completion("meta-llama/Llama-3.1-405B-FP8").doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - headers: { - "Custom-Request-Header": "request-header-value", - }, - }); - - const requestHeaders = await server.getRequestHeaders(); - - expect(requestHeaders).toStrictEqual({ - authorization: "Bearer test-api-key", - "content-type": "application/json", - "custom-provider-header": "provider-header-value", - "custom-request-header": "request-header-value", - }); - }); - - it("should pass extra body", async () => { - prepareStreamResponse({ content: [] }); - - const provider = createHyperbolic({ - apiKey: "test-api-key", - extraBody: { - custom_field: "custom_value", - providers: { - anthropic: { - custom_field: "custom_value", - }, - }, - }, - }); - - await provider.completion("openai/gpt-4o").doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - const requestBody = await server.getRequestBodyJson(); - - expect(requestBody).toHaveProperty("custom_field", "custom_value"); - expect(requestBody).toHaveProperty("providers.anthropic.custom_field", "custom_value"); - }); -}); diff --git a/packages/ai-sdk-provider-2/src/hyperbolic-completion-language-model.ts b/packages/ai-sdk-provider-2/src/hyperbolic-completion-language-model.ts deleted file mode 100644 index a37c22c..0000000 --- a/packages/ai-sdk-provider-2/src/hyperbolic-completion-language-model.ts +++ /dev/null @@ -1,352 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { - LanguageModelV1, - LanguageModelV1FinishReason, - LanguageModelV1LogProbs, - LanguageModelV1StreamPart, -} from "@ai-sdk/provider"; -import type { ParseResult } from "@ai-sdk/provider-utils"; -import { UnsupportedFunctionalityError } from "@ai-sdk/provider"; -import { - combineHeaders, - createEventSourceResponseHandler, - createJsonResponseHandler, - postJsonToApi, -} from "@ai-sdk/provider-utils"; -import { z } from "zod"; - -import type { - HyperbolicCompletionModelId, - HyperbolicCompletionSettings, -} from "./hyperbolic-completion-settings"; -import { convertToHyperbolicCompletionPrompt } from "./convert-to-hyperbolic-completion-prompt"; -import { - HyperbolicErrorResponseSchema, - hyperbolicFailedResponseHandler, - isHyperbolicError, -} from "./hyperbolic-error"; -import { mapHyperbolicCompletionLogProbs } from "./map-hyperbolic-completion-logprobs"; -import { mapHyperbolicFinishReason } from "./map-hyperbolic-finish-reason"; - -type HyperbolicCompletionConfig = { - provider: string; - compatibility: "strict" | "compatible"; - headers: () => Record; - url: (options: { modelId: string; path: string }) => string; - fetch?: typeof fetch; - extraBody?: Record; -}; - -export class HyperbolicCompletionLanguageModel implements LanguageModelV1 { - readonly specificationVersion = "v1"; - readonly defaultObjectGenerationMode = undefined; - - readonly modelId: HyperbolicCompletionModelId; - readonly settings: HyperbolicCompletionSettings; - - private readonly config: HyperbolicCompletionConfig; - - constructor( - modelId: HyperbolicCompletionModelId, - settings: HyperbolicCompletionSettings, - config: HyperbolicCompletionConfig, - ) { - this.modelId = modelId; - this.settings = settings; - this.config = config; - } - - get provider(): string { - return this.config.provider; - } - - private getArgs({ - mode, - inputFormat, - prompt, - maxTokens, - temperature, - topP, - frequencyPenalty, - presencePenalty, - seed, - responseFormat, - topK, - stopSequences, - providerMetadata, - }: Parameters[0]) { - const type = mode.type; - - const extraCallingBody = providerMetadata?.["hyperbolic"] ?? {}; - - const { prompt: completionPrompt } = convertToHyperbolicCompletionPrompt({ - prompt, - inputFormat, - }); - - const baseArgs = { - // model id: - model: this.modelId, - models: this.settings.models, - - // model specific settings: - logit_bias: this.settings.logitBias, - logprobs: - typeof this.settings.logprobs === "number" - ? this.settings.logprobs - : typeof this.settings.logprobs === "boolean" - ? this.settings.logprobs - ? 0 - : undefined - : undefined, - suffix: this.settings.suffix, - user: this.settings.user, - - // standardized settings: - max_tokens: maxTokens, - temperature, - top_p: topP, - frequency_penalty: frequencyPenalty, - presence_penalty: presencePenalty, - seed, - - stop: stopSequences, - response_format: responseFormat, - top_k: topK, - - // prompt: - prompt: completionPrompt, - - // Hyperbolic specific settings: - include_reasoning: this.settings.includeReasoning, - reasoning: this.settings.reasoning, - - // extra body: - ...this.config.extraBody, - ...this.settings.extraBody, - ...extraCallingBody, - }; - - switch (type) { - case "regular": { - if (mode.tools?.length) { - throw new UnsupportedFunctionalityError({ - functionality: "tools", - }); - } - - if (mode.toolChoice) { - throw new UnsupportedFunctionalityError({ - functionality: "toolChoice", - }); - } - - return baseArgs; - } - - case "object-json": { - throw new UnsupportedFunctionalityError({ - functionality: "object-json mode", - }); - } - - case "object-tool": { - throw new UnsupportedFunctionalityError({ - functionality: "object-tool mode", - }); - } - - // Handle all non-text types with a single default case - default: { - const _exhaustiveCheck: never = type; - throw new UnsupportedFunctionalityError({ - functionality: `${_exhaustiveCheck} mode`, - }); - } - } - } - - async doGenerate( - options: Parameters[0], - ): Promise>> { - const args = this.getArgs(options); - - const { responseHeaders, value: response } = await postJsonToApi({ - url: this.config.url({ - path: "/completions", - modelId: this.modelId, - }), - headers: combineHeaders(this.config.headers(), options.headers), - body: args, - failedResponseHandler: hyperbolicFailedResponseHandler, - successfulResponseHandler: createJsonResponseHandler(HyperbolicCompletionChunkSchema), - abortSignal: options.abortSignal, - fetch: this.config.fetch, - }); - - const { prompt: rawPrompt, ...rawSettings } = args; - if (isHyperbolicError(response)) { - throw new Error(`${response.message}`); - } - - const choice = response.choices[0]; - - if (!choice) { - throw new Error("No choice in Hyperbolic completion response"); - } - - return { - response: { - id: response.id, - modelId: response.model, - }, - text: choice.text ?? "", - reasoning: choice.reasoning || undefined, - usage: { - promptTokens: response.usage?.prompt_tokens ?? 0, - completionTokens: response.usage?.completion_tokens ?? 0, - }, - finishReason: mapHyperbolicFinishReason(choice.finish_reason), - logprobs: mapHyperbolicCompletionLogProbs(choice.logprobs), - rawCall: { rawPrompt, rawSettings }, - rawResponse: { headers: responseHeaders }, - warnings: [], - }; - } - - async doStream( - options: Parameters[0], - ): Promise>> { - const args = this.getArgs(options); - - const { responseHeaders, value: response } = await postJsonToApi({ - url: this.config.url({ - path: "/completions", - modelId: this.modelId, - }), - headers: combineHeaders(this.config.headers(), options.headers), - body: { - ...this.getArgs(options), - stream: true, - - // only include stream_options when in strict compatibility mode: - stream_options: - this.config.compatibility === "strict" ? { include_usage: true } : undefined, - }, - failedResponseHandler: hyperbolicFailedResponseHandler, - successfulResponseHandler: createEventSourceResponseHandler(HyperbolicCompletionChunkSchema), - abortSignal: options.abortSignal, - fetch: this.config.fetch, - }); - - const { prompt: rawPrompt, ...rawSettings } = args; - - let finishReason: LanguageModelV1FinishReason = "other"; - let usage: { promptTokens: number; completionTokens: number } = { - promptTokens: Number.NaN, - completionTokens: Number.NaN, - }; - let logprobs: LanguageModelV1LogProbs; - - return { - stream: response.pipeThrough( - new TransformStream< - ParseResult>, - LanguageModelV1StreamPart - >({ - transform(chunk, controller) { - // handle failed chunk parsing / validation: - if (!chunk.success) { - finishReason = "error"; - controller.enqueue({ type: "error", error: chunk.error }); - return; - } - - const value = chunk.value; - - // handle error chunks: - if (isHyperbolicError(value)) { - finishReason = "error"; - controller.enqueue({ type: "error", error: value }); - return; - } - - if (value.usage != null) { - usage = { - promptTokens: value.usage.prompt_tokens, - completionTokens: value.usage.completion_tokens, - }; - } - - const choice = value.choices[0]; - - if (choice?.finish_reason != null) { - finishReason = mapHyperbolicFinishReason(choice.finish_reason); - } - - if (choice?.text != null) { - controller.enqueue({ - type: "text-delta", - textDelta: choice.text, - }); - } - - const mappedLogprobs = mapHyperbolicCompletionLogProbs(choice?.logprobs); - if (mappedLogprobs?.length) { - if (logprobs === undefined) logprobs = []; - logprobs.push(...mappedLogprobs); - } - }, - - flush(controller) { - controller.enqueue({ - type: "finish", - finishReason, - logprobs, - usage, - }); - }, - }), - ), - rawCall: { rawPrompt, rawSettings }, - rawResponse: { headers: responseHeaders }, - warnings: [], - }; - } -} - -// limited version of the schema, focussed on what is needed for the implementation -// this approach limits breakages when the API changes and increases efficiency -const HyperbolicCompletionChunkSchema = z.union([ - z.object({ - id: z.string().optional(), - model: z.string().optional(), - choices: z.array( - z.object({ - text: z.string(), - reasoning: z.string().nullish().optional(), - finish_reason: z.string().nullish(), - index: z.number(), - logprobs: z - .object({ - tokens: z.array(z.string()), - token_logprobs: z.array(z.number()), - top_logprobs: z.array(z.record(z.string(), z.number())).nullable(), - }) - .nullable() - .optional(), - }), - ), - usage: z - .object({ - prompt_tokens: z.number(), - completion_tokens: z.number(), - }) - .optional() - .nullable(), - }), - HyperbolicErrorResponseSchema, -]); diff --git a/packages/ai-sdk-provider-2/src/hyperbolic-completion-settings.ts b/packages/ai-sdk-provider-2/src/hyperbolic-completion-settings.ts deleted file mode 100644 index efc31a0..0000000 --- a/packages/ai-sdk-provider-2/src/hyperbolic-completion-settings.ts +++ /dev/null @@ -1,42 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { HyperbolicSharedSettings } from "./types"; - -export type HyperbolicCompletionModelId = string; - -export type HyperbolicCompletionSettings = { - /** - * Modify the likelihood of specified tokens appearing in the completion. - * - * Accepts a JSON object that maps tokens (specified by their token ID in - * the GPT tokenizer) to an associated bias value from -100 to 100. You - * can use this tokenizer tool to convert text to token IDs. Mathematically, - * the bias is added to the logits generated by the model prior to sampling. - * The exact effect will vary per model, but values between -1 and 1 should - * decrease or increase likelihood of selection; values like -100 or 100 - * should result in a ban or exclusive selection of the relevant token. - * As an example, you can pass {"50256": -100} to prevent the <|endoftext|> - * token from being generated. - */ - logitBias?: Record; - - /** - * Return the log probabilities of the tokens. Including logprobs will increase - * the response size and can slow down response times. However, it can - * be useful to better understand how the model is behaving. - * - * Setting to true will return the log probabilities of the tokens that - * were generated. - * - * Setting to a number will return the log probabilities of the top n - * tokens that were generated. - */ - logprobs?: boolean | number; - - /** - * The suffix that comes after a completion of inserted text. - */ - suffix?: string; -} & HyperbolicSharedSettings; diff --git a/packages/ai-sdk-provider-2/src/hyperbolic-error.ts b/packages/ai-sdk-provider-2/src/hyperbolic-error.ts deleted file mode 100644 index 60eed7e..0000000 --- a/packages/ai-sdk-provider-2/src/hyperbolic-error.ts +++ /dev/null @@ -1,49 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { TypeValidationError } from "ai"; -import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils"; -import { JSONParseError } from "ai"; -import { z } from "zod"; - -export const HyperbolicErrorResponseSchema = z.object({ - object: z.literal("error"), - message: z.string(), - type: z.string(), - param: z.any().nullable(), - code: z.coerce.number().nullable(), -}); - -// eslint-disable-next-line @typescript-eslint/no-explicit-any -export const isHyperbolicError = (data: any): data is HyperbolicErrorData => { - return "object" in data && data.object === "error"; -}; - -export type HyperbolicErrorData = z.infer; - -export const hyperbolicFailedResponseHandler = createJsonErrorResponseHandler({ - errorSchema: HyperbolicErrorResponseSchema, - errorToMessage: (data) => data.message, -}); - -/** - * Error messages from the API are sometimes an ugly combo of text and JSON in a single chunk. Extract data from error message if it contains JSON - */ -export const tryParsingHyperbolicError = (error: JSONParseError | TypeValidationError) => { - if (!JSONParseError.isInstance(error)) { - return undefined; - } - - const jsonMatch = error.text.match(/\{.*\}/); // Match between brackets - if (jsonMatch) { - try { - const parsedErrorJson = JSON.parse(jsonMatch[0]); - if (parsedErrorJson.message) { - return HyperbolicErrorResponseSchema.parse(parsedErrorJson); - } - } catch { - return undefined; - } - } -}; diff --git a/packages/ai-sdk-provider-2/src/hyperbolic-image-language-model.ts b/packages/ai-sdk-provider-2/src/hyperbolic-image-language-model.ts deleted file mode 100644 index da5501d..0000000 --- a/packages/ai-sdk-provider-2/src/hyperbolic-image-language-model.ts +++ /dev/null @@ -1,130 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { ImageModelV1, ImageModelV1CallWarning } from "@ai-sdk/provider"; -import { combineHeaders, createJsonResponseHandler, postJsonToApi } from "@ai-sdk/provider-utils"; -import { z } from "zod"; - -import type { - HyperbolicImageModelId, - HyperbolicImageProviderOptions, - HyperbolicImageProviderResponseMetadata, - HyperbolicImageSettings, -} from "./hyperbolic-image-settings"; -import { hyperbolicFailedResponseHandler } from "./hyperbolic-error"; - -type HyperbolicImageModelConfig = { - provider: string; - compatibility: "strict" | "compatible"; - headers: () => Record; - url: (options: { modelId: string; path: string }) => string; - fetch?: typeof fetch; - extraBody?: Record; -}; - -export class HyperbolicImageModel implements ImageModelV1 { - readonly specificationVersion = "v1"; - readonly provider = "hyperbolic.image"; - - get maxImagesPerCall(): number { - return this.settings.maxImagesPerCall ?? 1; - } - - constructor( - readonly modelId: HyperbolicImageModelId, - private readonly settings: HyperbolicImageSettings, - private readonly config: HyperbolicImageModelConfig, - ) {} - - async doGenerate( - options: Omit[0], "providerOptions"> & { - providerOptions: { - hyperbolic?: HyperbolicImageProviderOptions; - }; - }, - ): Promise< - Omit>, "response"> & { - response: Awaited>["response"] & { - hyperbolic: HyperbolicImageProviderResponseMetadata; - }; - } - > { - const warnings: Array = []; - const [width, height] = options.size ? options.size.split("x").map(Number) : []; - - const args = { - prompt: options.prompt, - height, - width, - cfg_scale: options.providerOptions?.hyperbolic?.cfgScale, - enable_refiner: options.providerOptions?.hyperbolic?.enableRefiner, - model_name: this.modelId, - negative_prompt: options.providerOptions?.hyperbolic?.negativePrompt, - steps: options.providerOptions?.hyperbolic?.steps, - strength: options.providerOptions?.hyperbolic?.strength, - image: options.providerOptions?.hyperbolic?.image, - }; - - if (options.aspectRatio != undefined) { - warnings.push({ - type: "unsupported-setting", - setting: "aspectRatio", - details: "This model does not support `aspectRatio`. Use `size` instead.", - }); - } - if (options.seed != undefined) { - warnings.push({ - type: "unsupported-setting", - setting: "seed", - details: "This model does not support `seed`.", - }); - } - if (options.n != undefined) { - warnings.push({ - type: "unsupported-setting", - setting: "n", - details: "This model does not support `n`.", - }); - } - - const { value: response, responseHeaders } = await postJsonToApi({ - url: this.config.url({ - path: "/image/generation", - modelId: this.modelId, - }), - headers: combineHeaders(this.config.headers(), options.headers), - body: args, - failedResponseHandler: hyperbolicFailedResponseHandler, - successfulResponseHandler: createJsonResponseHandler(hyperbolicImageResponseSchema), - abortSignal: options.abortSignal, - fetch: this.config.fetch, - }); - - return { - images: response.images.map((image) => image.image), - warnings, - response: { - timestamp: new Date(), - modelId: this.modelId, - headers: responseHeaders, - hyperbolic: { - inferenceTime: response.inference_time, - randomSeeds: response.images.map((image) => image.random_seed), - }, - }, - }; - } -} - -// minimal version of the schema, focussed on what is needed for the implementation to avoid breaking changes -const hyperbolicImageResponseSchema = z.object({ - images: z.array( - z.object({ - image: z.string(), - index: z.number(), - random_seed: z.number(), - }), - ), - inference_time: z.number(), -}); diff --git a/packages/ai-sdk-provider-2/src/hyperbolic-image-settings.ts b/packages/ai-sdk-provider-2/src/hyperbolic-image-settings.ts deleted file mode 100644 index 9399faf..0000000 --- a/packages/ai-sdk-provider-2/src/hyperbolic-image-settings.ts +++ /dev/null @@ -1,40 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { Experimental_GenerateImageResult } from "ai"; - -import type { HyperbolicSharedSettings } from "./types"; - -export type HyperbolicImageModelId = string; - -export type HyperbolicImageSettings = { - /** - * Override the maximum number of images per call (default is dependent on the - * model, or 1 for an unknown model). - */ - maxImagesPerCall?: number; -} & HyperbolicSharedSettings; - -export type HyperbolicImageProviderOptions = { - cfgScale?: number; - negativePrompt?: string; - steps?: number; - strength?: number; - enableRefiner?: boolean; - image?: string; -}; - -export type HyperbolicImageProviderResponseMetadata = { - inferenceTime: number; - randomSeeds: number[]; -}; - -export type Experimental_HyperbolicGenerateImageResult = Omit< - Experimental_GenerateImageResult, - "responses" -> & { - responses: (Experimental_GenerateImageResult["responses"][number] & { - hyperbolic: HyperbolicImageProviderResponseMetadata; - })[]; -}; diff --git a/packages/ai-sdk-provider-2/src/hyperbolic-provider-options.test.ts b/packages/ai-sdk-provider-2/src/hyperbolic-provider-options.test.ts deleted file mode 100644 index b1db769..0000000 --- a/packages/ai-sdk-provider-2/src/hyperbolic-provider-options.test.ts +++ /dev/null @@ -1,64 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { LanguageModelV1Prompt } from "@ai-sdk/provider"; -import { createTestServer } from "@ai-sdk/provider-utils/test"; -import { streamText } from "ai"; -import { describe, expect, it, vi } from "vitest"; - -import { createHyperbolic } from "./hyperbolic-provider"; - -// Add type assertions for the mocked classes -const TEST_MESSAGES: LanguageModelV1Prompt = [ - { role: "user", content: [{ type: "text", text: "Hello" }] }, -]; - -describe("providerOptions", () => { - const server = createTestServer({ - "https://api.hyperbolic.xyz/v1/chat/completions": { - response: { - type: "stream-chunks", - chunks: [], - }, - }, - }); - - beforeEach(() => { - vi.clearAllMocks(); - }); - - it("should set providerOptions hyperbolic to extra body", async () => { - const hyperbolic = createHyperbolic({ - apiKey: "test", - }); - const model = hyperbolic("Qwen/Qwen2.5-72B-Instruct"); - - await streamText({ - model, - messages: TEST_MESSAGES, - providerOptions: { - hyperbolic: { - reasoning: { - max_tokens: 1000, - }, - }, - }, - }).consumeStream(); - - expect(await server.calls[0]?.requestBody).toStrictEqual({ - messages: [ - { - content: "Hello", - role: "user", - }, - ], - reasoning: { - max_tokens: 1000, - }, - temperature: 0, - model: "Qwen/Qwen2.5-72B-Instruct", - stream: true, - }); - }); -}); diff --git a/packages/ai-sdk-provider-2/src/hyperbolic-provider.ts b/packages/ai-sdk-provider-2/src/hyperbolic-provider.ts deleted file mode 100644 index e878dcf..0000000 --- a/packages/ai-sdk-provider-2/src/hyperbolic-provider.ts +++ /dev/null @@ -1,180 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import { loadApiKey, withoutTrailingSlash } from "@ai-sdk/provider-utils"; - -import type { HyperbolicChatModelId, HyperbolicChatSettings } from "./hyperbolic-chat-settings"; -import type { - HyperbolicCompletionModelId, - HyperbolicCompletionSettings, -} from "./hyperbolic-completion-settings"; -import type { HyperbolicImageModelId, HyperbolicImageSettings } from "./hyperbolic-image-settings"; -import { HyperbolicChatLanguageModel } from "./hyperbolic-chat-language-model"; -import { HyperbolicCompletionLanguageModel } from "./hyperbolic-completion-language-model"; -import { HyperbolicImageModel } from "./hyperbolic-image-language-model"; - -export type { HyperbolicCompletionSettings }; - -export interface HyperbolicProvider { - ( - modelId: HyperbolicChatModelId, - settings?: HyperbolicCompletionSettings, - ): HyperbolicCompletionLanguageModel; - (modelId: HyperbolicChatModelId, settings?: HyperbolicChatSettings): HyperbolicChatLanguageModel; - - languageModel( - modelId: HyperbolicChatModelId, - settings?: HyperbolicCompletionSettings, - ): HyperbolicCompletionLanguageModel; - languageModel( - modelId: HyperbolicChatModelId, - settings?: HyperbolicChatSettings, - ): HyperbolicChatLanguageModel; - - /** - * Creates a Hyperbolic chat model for text generation. - */ - chat( - modelId: HyperbolicChatModelId, - settings?: HyperbolicChatSettings, - ): HyperbolicChatLanguageModel; - - /** - * Creates a Hyperbolic completion model for text generation. - */ - completion( - modelId: HyperbolicCompletionModelId, - settings?: HyperbolicCompletionSettings, - ): HyperbolicCompletionLanguageModel; - - /** - * Creates a Hyperbolic image model for image generation. - */ - image(modelId: HyperbolicImageModelId, settings?: HyperbolicImageSettings): HyperbolicImageModel; -} - -export interface HyperbolicProviderSettings { - /** - * Base URL for the Hyperbolic API calls. - */ - baseURL?: string; - - /** - * @deprecated Use `baseURL` instead. - */ - baseUrl?: string; - - /** - * API key for authenticating requests. - */ - apiKey?: string; - - /** - * Custom headers to include in the requests. - */ - headers?: Record; - - /** - * Hyperbolic compatibility mode. Should be set to `strict` when using the Hyperbolic API, - * and `compatible` when using 3rd party providers. In `compatible` mode, newer - * information such as streamOptions are not being sent. Defaults to 'compatible'. - */ - compatibility?: "strict" | "compatible"; - - /** - * Custom fetch implementation. You can use it as a middleware to intercept requests, - * or to provide a custom fetch implementation for e.g. testing. - */ - fetch?: typeof fetch; - - /** - * A JSON object to send as the request body to access Hyperbolic features & upstream provider features. - */ - extraBody?: Record; -} - -/** - * Create an Hyperbolic provider instance. - */ -export function createHyperbolic(options: HyperbolicProviderSettings = {}): HyperbolicProvider { - const baseURL = - withoutTrailingSlash(options.baseURL ?? options.baseUrl) ?? "https://api.hyperbolic.xyz/v1"; - - // we default to compatible, because strict breaks providers like Groq: - const compatibility = options.compatibility ?? "compatible"; - - const getHeaders = () => ({ - Authorization: `Bearer ${loadApiKey({ - apiKey: options.apiKey, - environmentVariableName: "HYPERBOLIC_API_KEY", - description: "Hyperbolic", - })}`, - ...options.headers, - }); - - const createChatModel = (modelId: HyperbolicChatModelId, settings: HyperbolicChatSettings = {}) => - new HyperbolicChatLanguageModel(modelId, settings, { - provider: "hyperbolic.chat", - url: ({ path }) => `${baseURL}${path}`, - headers: getHeaders, - compatibility, - fetch: options.fetch, - extraBody: options.extraBody, - }); - - const createCompletionModel = ( - modelId: HyperbolicCompletionModelId, - settings: HyperbolicCompletionSettings = {}, - ) => - new HyperbolicCompletionLanguageModel(modelId, settings, { - provider: "hyperbolic.completion", - url: ({ path }) => `${baseURL}${path}`, - headers: getHeaders, - compatibility, - fetch: options.fetch, - extraBody: options.extraBody, - }); - - const createImageModel = ( - modelId: HyperbolicImageModelId, - settings: HyperbolicImageSettings = {}, - ) => - new HyperbolicImageModel(modelId, settings, { - provider: "hyperbolic.image", - url: ({ path }) => `${baseURL}${path}`, - headers: getHeaders, - compatibility, - fetch: options.fetch, - extraBody: options.extraBody, - }); - - const createLanguageModel = ( - modelId: HyperbolicChatModelId | HyperbolicCompletionModelId, - settings?: HyperbolicChatSettings | HyperbolicCompletionSettings, - ) => { - if (new.target) { - throw new Error("The Hyperbolic model function cannot be called with the new keyword."); - } - - if (modelId === "openai/gpt-3.5-turbo-instruct") { - return createCompletionModel(modelId, settings as HyperbolicCompletionSettings); - } - - return createChatModel(modelId, settings as HyperbolicChatSettings); - }; - - const provider = function ( - modelId: HyperbolicChatModelId | HyperbolicCompletionModelId, - settings?: HyperbolicChatSettings | HyperbolicCompletionSettings, - ) { - return createLanguageModel(modelId, settings); - }; - - provider.languageModel = createLanguageModel; - provider.chat = createChatModel; - provider.completion = createCompletionModel; - provider.image = createImageModel; - - return provider as HyperbolicProvider; -} diff --git a/packages/ai-sdk-provider-2/src/index.ts b/packages/ai-sdk-provider-2/src/index.ts index 0180b06..670fb1c 100644 --- a/packages/ai-sdk-provider-2/src/index.ts +++ b/packages/ai-sdk-provider-2/src/index.ts @@ -1,3 +1,3 @@ -export * from "./hyperbolic-provider"; -export * from "./types"; -export * from "./hyperbolic-error"; +export * from './facade'; +export * from './provider'; +export * from './types'; diff --git a/packages/ai-sdk-provider-2/src/internal copy/index.ts b/packages/ai-sdk-provider-2/src/internal copy/index.ts new file mode 100644 index 0000000..fb46972 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/internal copy/index.ts @@ -0,0 +1,5 @@ +export * from '../chat'; +export * from '../completion'; +export * from '../types'; +export * from '../types/openrouter-chat-settings'; +export * from '../types/openrouter-completion-settings'; diff --git a/packages/ai-sdk-provider-2/src/internal/index.ts b/packages/ai-sdk-provider-2/src/internal/index.ts deleted file mode 100644 index c9936d2..0000000 --- a/packages/ai-sdk-provider-2/src/internal/index.ts +++ /dev/null @@ -1,7 +0,0 @@ -export * from "../hyperbolic-chat-language-model"; -export * from "../hyperbolic-chat-settings"; -export * from "../hyperbolic-completion-language-model"; -export * from "../hyperbolic-completion-settings"; -export * from "../hyperbolic-image-language-model"; -export * from "../hyperbolic-image-settings"; -export * from "../types"; diff --git a/packages/ai-sdk-provider-2/src/map-hyperbolic-chat-logprobs.ts b/packages/ai-sdk-provider-2/src/map-hyperbolic-chat-logprobs.ts deleted file mode 100644 index f325b88..0000000 --- a/packages/ai-sdk-provider-2/src/map-hyperbolic-chat-logprobs.ts +++ /dev/null @@ -1,37 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { LanguageModelV1LogProbs } from "@ai-sdk/provider"; - -type HyperbolicChatLogProbs = { - content: - | { - token: string; - logprob: number; - top_logprobs: - | { - token: string; - logprob: number; - }[] - | null; - }[] - | null; -}; - -export function mapHyperbolicChatLogProbsOutput( - logprobs: HyperbolicChatLogProbs | null | undefined, -): LanguageModelV1LogProbs | undefined { - return ( - logprobs?.content?.map(({ token, logprob, top_logprobs }) => ({ - token, - logprob, - topLogprobs: top_logprobs - ? top_logprobs.map(({ token, logprob }) => ({ - token, - logprob, - })) - : [], - })) ?? undefined - ); -} diff --git a/packages/ai-sdk-provider-2/src/map-hyperbolic-completion-logprobs.ts b/packages/ai-sdk-provider-2/src/map-hyperbolic-completion-logprobs.ts deleted file mode 100644 index 121d731..0000000 --- a/packages/ai-sdk-provider-2/src/map-hyperbolic-completion-logprobs.ts +++ /dev/null @@ -1,24 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -type HyperbolicCompletionLogProps = { - tokens: string[]; - token_logprobs: number[]; - top_logprobs: Record[] | null; -}; - -export function mapHyperbolicCompletionLogProbs( - logprobs: HyperbolicCompletionLogProps | null | undefined, -) { - return logprobs?.tokens.map((token, index) => ({ - token, - logprob: logprobs.token_logprobs[index] ?? 0, - topLogprobs: logprobs.top_logprobs - ? Object.entries(logprobs.top_logprobs[index] ?? {}).map(([token, logprob]) => ({ - token, - logprob, - })) - : [], - })); -} diff --git a/packages/ai-sdk-provider-2/src/map-hyperbolic-finish-reason.ts b/packages/ai-sdk-provider-2/src/map-hyperbolic-finish-reason.ts deleted file mode 100644 index 5763ff8..0000000 --- a/packages/ai-sdk-provider-2/src/map-hyperbolic-finish-reason.ts +++ /dev/null @@ -1,23 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { LanguageModelV1FinishReason } from "@ai-sdk/provider"; - -export function mapHyperbolicFinishReason( - finishReason: string | null | undefined, -): LanguageModelV1FinishReason { - switch (finishReason) { - case "stop": - return "stop"; - case "length": - return "length"; - case "content_filter": - return "content-filter"; - case "function_call": - case "tool_calls": - return "tool-calls"; - default: - return "unknown"; - } -} diff --git a/packages/ai-sdk-provider-2/src/provider.ts b/packages/ai-sdk-provider-2/src/provider.ts new file mode 100644 index 0000000..6c9a3e3 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/provider.ts @@ -0,0 +1,230 @@ +import type { ProviderV3 } from '@ai-sdk/provider'; +import type { + OpenRouterChatModelId, + OpenRouterChatSettings, +} from './types/openrouter-chat-settings'; +import type { + OpenRouterCompletionModelId, + OpenRouterCompletionSettings, +} from './types/openrouter-completion-settings'; +import type { + OpenRouterEmbeddingModelId, + OpenRouterEmbeddingSettings, +} from './types/openrouter-embedding-settings'; + +import { loadApiKey, withoutTrailingSlash } from '@ai-sdk/provider-utils'; +import { OpenRouterChatLanguageModel } from './chat'; +import { OpenRouterCompletionLanguageModel } from './completion'; +import { OpenRouterEmbeddingModel } from './embedding'; +import { withUserAgentSuffix } from './utils/with-user-agent-suffix'; +import { VERSION } from './version'; + +export type { OpenRouterChatSettings, OpenRouterCompletionSettings }; + +export interface OpenRouterProvider extends ProviderV3 { + ( + modelId: OpenRouterChatModelId, + settings?: OpenRouterCompletionSettings, + ): OpenRouterCompletionLanguageModel; + ( + modelId: OpenRouterChatModelId, + settings?: OpenRouterChatSettings, + ): OpenRouterChatLanguageModel; + + languageModel( + modelId: OpenRouterChatModelId, + settings?: OpenRouterCompletionSettings, + ): OpenRouterCompletionLanguageModel; + languageModel( + modelId: OpenRouterChatModelId, + settings?: OpenRouterChatSettings, + ): OpenRouterChatLanguageModel; + + /** +Creates an OpenRouter chat model for text generation. + */ + chat( + modelId: OpenRouterChatModelId, + settings?: OpenRouterChatSettings, + ): OpenRouterChatLanguageModel; + + /** +Creates an OpenRouter completion model for text generation. + */ + completion( + modelId: OpenRouterCompletionModelId, + settings?: OpenRouterCompletionSettings, + ): OpenRouterCompletionLanguageModel; + + /** +Creates an OpenRouter text embedding model. (AI SDK v5) + */ + textEmbeddingModel( + modelId: OpenRouterEmbeddingModelId, + settings?: OpenRouterEmbeddingSettings, + ): OpenRouterEmbeddingModel; + + /** +Creates an OpenRouter text embedding model. (AI SDK v4 - deprecated, use textEmbeddingModel instead) +@deprecated Use textEmbeddingModel instead + */ + embedding( + modelId: OpenRouterEmbeddingModelId, + settings?: OpenRouterEmbeddingSettings, + ): OpenRouterEmbeddingModel; +} + +export interface OpenRouterProviderSettings { + /** +Base URL for the OpenRouter API calls. + */ + baseURL?: string; + + /** +@deprecated Use `baseURL` instead. + */ + baseUrl?: string; + + /** +API key for authenticating requests. + */ + apiKey?: string; + + /** +Custom headers to include in the requests. + */ + headers?: Record; + + /** +OpenRouter compatibility mode. Should be set to `strict` when using the OpenRouter API, +and `compatible` when using 3rd party providers. In `compatible` mode, newer +information such as streamOptions are not being sent. Defaults to 'compatible'. + */ + compatibility?: 'strict' | 'compatible'; + + /** +Custom fetch implementation. You can use it as a middleware to intercept requests, +or to provide a custom fetch implementation for e.g. testing. + */ + fetch?: typeof fetch; + + /** +A JSON object to send as the request body to access OpenRouter features & upstream provider features. + */ + extraBody?: Record; + + /** + * Record of provider slugs to API keys for injecting into provider routing. + * Maps provider slugs (e.g. "anthropic", "openai") to their respective API keys. + */ + api_keys?: Record; +} + +/** +Create an OpenRouter provider instance. + */ +export function createOpenRouter( + options: OpenRouterProviderSettings = {}, +): OpenRouterProvider { + const baseURL = + withoutTrailingSlash(options.baseURL ?? options.baseUrl) ?? + 'https://openrouter.ai/api/v1'; + + // we default to compatible, because strict breaks providers like Groq: + const compatibility = options.compatibility ?? 'compatible'; + + const getHeaders = () => + withUserAgentSuffix( + { + Authorization: `Bearer ${loadApiKey({ + apiKey: options.apiKey, + environmentVariableName: 'OPENROUTER_API_KEY', + description: 'OpenRouter', + })}`, + ...options.headers, + ...(options.api_keys && + Object.keys(options.api_keys).length > 0 && { + 'X-Provider-API-Keys': JSON.stringify(options.api_keys), + }), + }, + `ai-sdk/openrouter/${VERSION}`, + ); + + const createChatModel = ( + modelId: OpenRouterChatModelId, + settings: OpenRouterChatSettings = {}, + ) => + new OpenRouterChatLanguageModel(modelId, settings, { + provider: 'openrouter.chat', + url: ({ path }) => `${baseURL}${path}`, + headers: getHeaders, + compatibility, + fetch: options.fetch, + extraBody: options.extraBody, + }); + + const createCompletionModel = ( + modelId: OpenRouterCompletionModelId, + settings: OpenRouterCompletionSettings = {}, + ) => + new OpenRouterCompletionLanguageModel(modelId, settings, { + provider: 'openrouter.completion', + url: ({ path }) => `${baseURL}${path}`, + headers: getHeaders, + compatibility, + fetch: options.fetch, + extraBody: options.extraBody, + }); + + const createEmbeddingModel = ( + modelId: OpenRouterEmbeddingModelId, + settings: OpenRouterEmbeddingSettings = {}, + ) => + new OpenRouterEmbeddingModel(modelId, settings, { + provider: 'openrouter.embedding', + url: ({ path }) => `${baseURL}${path}`, + headers: getHeaders, + fetch: options.fetch, + extraBody: options.extraBody, + }); + + const createLanguageModel = ( + modelId: OpenRouterChatModelId | OpenRouterCompletionModelId, + settings?: OpenRouterChatSettings | OpenRouterCompletionSettings, + ) => { + if (new.target) { + throw new Error( + 'The OpenRouter model function cannot be called with the new keyword.', + ); + } + + if (modelId === 'openai/gpt-3.5-turbo-instruct') { + return createCompletionModel( + modelId, + settings as OpenRouterCompletionSettings, + ); + } + + return createChatModel(modelId, settings as OpenRouterChatSettings); + }; + + const provider = ( + modelId: OpenRouterChatModelId | OpenRouterCompletionModelId, + settings?: OpenRouterChatSettings | OpenRouterCompletionSettings, + ) => createLanguageModel(modelId, settings); + + provider.languageModel = createLanguageModel; + provider.chat = createChatModel; + provider.completion = createCompletionModel; + provider.textEmbeddingModel = createEmbeddingModel; + provider.embedding = createEmbeddingModel; // deprecated alias for v4 compatibility + + return provider as OpenRouterProvider; +} + +/** +Default OpenRouter provider instance. It uses 'strict' compatibility mode. + */ +export const openrouter = createOpenRouter({ + compatibility: 'strict', // strict for OpenRouter API +}); diff --git a/packages/ai-sdk-provider-2/src/schemas/error-response.test.ts b/packages/ai-sdk-provider-2/src/schemas/error-response.test.ts new file mode 100644 index 0000000..df21892 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/schemas/error-response.test.ts @@ -0,0 +1,52 @@ +import { OpenRouterErrorResponseSchema } from './error-response'; + +describe('OpenRouterErrorResponseSchema', () => { + it('should be valid without a type, code, and param', () => { + const errorWithoutTypeCodeAndParam = { + error: { + message: 'Example error message', + metadata: { provider_name: 'Example Provider' }, + }, + user_id: 'example_1', + }; + + const result = OpenRouterErrorResponseSchema.parse( + errorWithoutTypeCodeAndParam, + ); + + expect(result).toEqual({ + error: { + message: 'Example error message', + code: null, + type: null, + param: null, + metadata: { provider_name: 'Example Provider' }, + }, + user_id: 'example_1', + }); + }); + + it('should be invalid with a type', () => { + const errorWithType = { + error: { + message: 'Example error message with type', + type: 'invalid_request_error', + code: 400, + param: 'canBeAnything', + metadata: { provider_name: 'Example Provider' }, + }, + }; + + const result = OpenRouterErrorResponseSchema.parse(errorWithType); + + expect(result).toEqual({ + error: { + code: 400, + message: 'Example error message with type', + type: 'invalid_request_error', + param: 'canBeAnything', + metadata: { provider_name: 'Example Provider' }, + }, + }); + }); +}); diff --git a/packages/ai-sdk-provider-2/src/schemas/error-response.ts b/packages/ai-sdk-provider-2/src/schemas/error-response.ts new file mode 100644 index 0000000..de6ce3b --- /dev/null +++ b/packages/ai-sdk-provider-2/src/schemas/error-response.ts @@ -0,0 +1,32 @@ +import type { ChatErrorError } from '@openrouter/sdk/models'; + +import { createJsonErrorResponseHandler } from '@ai-sdk/provider-utils'; +import { z } from 'zod/v4'; + +// Use SDK's ChatErrorError type but wrap in response schema +// SDK type: { code: string | number | null; message: string; param?: string | null; type?: string | null } +export const OpenRouterErrorResponseSchema = z + .object({ + error: z + .object({ + code: z + .union([z.string(), z.number()]) + .nullable() + .optional() + .default(null), + message: z.string(), + type: z.string().nullable().optional().default(null), + param: z.any().nullable().optional().default(null), + }) + .passthrough() satisfies z.ZodType< + Omit & { code: string | number | null } + >, + }) + .passthrough(); + +export type OpenRouterErrorData = z.infer; + +export const openrouterFailedResponseHandler = createJsonErrorResponseHandler({ + errorSchema: OpenRouterErrorResponseSchema, + errorToMessage: (data: OpenRouterErrorData) => data.error.message, +}); diff --git a/packages/ai-sdk-provider-2/src/schemas/format.ts b/packages/ai-sdk-provider-2/src/schemas/format.ts new file mode 100644 index 0000000..3be2d9c --- /dev/null +++ b/packages/ai-sdk-provider-2/src/schemas/format.ts @@ -0,0 +1,11 @@ +export enum ReasoningFormat { + Unknown = 'unknown', + OpenAIResponsesV1 = 'openai-responses-v1', + XAIResponsesV1 = 'xai-responses-v1', + AnthropicClaudeV1 = 'anthropic-claude-v1', + GoogleGeminiV1 = 'google-gemini-v1', +} + +// Anthropic Claude was the first reasoning that we're +// passing back and forth +export const DEFAULT_REASONING_FORMAT = ReasoningFormat.AnthropicClaudeV1; diff --git a/packages/ai-sdk-provider-2/src/schemas/image.ts b/packages/ai-sdk-provider-2/src/schemas/image.ts new file mode 100644 index 0000000..d6df790 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/schemas/image.ts @@ -0,0 +1,23 @@ +import { z } from 'zod/v4'; + +const ImageResponseSchema = z + .object({ + type: z.literal('image_url'), + image_url: z + .object({ + url: z.string(), + }) + .passthrough(), + }) + .passthrough(); + +export type ImageResponse = z.infer; + +const ImageResponseWithUnknownSchema = z.union([ + ImageResponseSchema, + z.unknown().transform(() => null), +]); + +export const ImageResponseArraySchema = z + .array(ImageResponseWithUnknownSchema) + .transform((d) => d.filter((d): d is ImageResponse => !!d)); diff --git a/packages/ai-sdk-provider-2/src/schemas/provider-metadata.ts b/packages/ai-sdk-provider-2/src/schemas/provider-metadata.ts new file mode 100644 index 0000000..4f9c0e1 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/schemas/provider-metadata.ts @@ -0,0 +1,84 @@ +import { z } from 'zod/v4'; +import { ReasoningDetailUnionSchema } from './reasoning-details'; + +/** + * Schema for file annotations from FileParserPlugin + */ +export const FileAnnotationSchema = z + .object({ + type: z.literal('file'), + file: z + .object({ + hash: z.string(), + name: z.string(), + content: z + .array( + z + .object({ + type: z.string(), + text: z.string().optional(), + }) + .catchall(z.any()), + ) + .optional(), + }) + .catchall(z.any()), + }) + .catchall(z.any()); + +export type FileAnnotation = z.infer; + +/** + * Schema for OpenRouter provider metadata attached to responses + */ +export const OpenRouterProviderMetadataSchema = z + .object({ + provider: z.string(), + reasoning_details: z.array(ReasoningDetailUnionSchema).optional(), + annotations: z.array(FileAnnotationSchema).optional(), + usage: z + .object({ + promptTokens: z.number(), + promptTokensDetails: z + .object({ + cachedTokens: z.number(), + }) + .catchall(z.any()) + .optional(), + completionTokens: z.number(), + completionTokensDetails: z + .object({ + reasoningTokens: z.number(), + }) + .catchall(z.any()) + .optional(), + totalTokens: z.number(), + cost: z.number().optional(), + costDetails: z + .object({ + upstreamInferenceCost: z.number(), + }) + .catchall(z.any()) + .optional(), + }) + .catchall(z.any()), + }) + .catchall(z.any()); + +export type OpenRouterProviderMetadata = z.infer< + typeof OpenRouterProviderMetadataSchema +>; + +/** + * Schema for parsing provider options that may contain reasoning_details and annotations + */ +export const OpenRouterProviderOptionsSchema = z + .object({ + openrouter: z + .object({ + reasoning_details: z.array(ReasoningDetailUnionSchema).optional(), + annotations: z.array(FileAnnotationSchema).optional(), + }) + .optional(), + }) + .optional(); diff --git a/packages/ai-sdk-provider-2/src/schemas/reasoning-details.ts b/packages/ai-sdk-provider-2/src/schemas/reasoning-details.ts new file mode 100644 index 0000000..916374b --- /dev/null +++ b/packages/ai-sdk-provider-2/src/schemas/reasoning-details.ts @@ -0,0 +1,92 @@ +import { z } from 'zod/v4'; +import { isDefinedOrNotNull } from '../utils/type-guards'; +import { ReasoningFormat } from './format'; + +export enum ReasoningDetailType { + Summary = 'reasoning.summary', + Encrypted = 'reasoning.encrypted', + Text = 'reasoning.text', +} + +export const CommonReasoningDetailSchema = z + .object({ + id: z.string().nullish(), + format: z.enum(ReasoningFormat).nullish(), + index: z.number().optional(), + }) + .loose(); + +export const ReasoningDetailSummarySchema = z + .object({ + type: z.literal(ReasoningDetailType.Summary), + summary: z.string(), + }) + .extend(CommonReasoningDetailSchema.shape); +export type ReasoningDetailSummary = z.infer< + typeof ReasoningDetailSummarySchema +>; + +export const ReasoningDetailEncryptedSchema = z + .object({ + type: z.literal(ReasoningDetailType.Encrypted), + data: z.string(), + }) + .extend(CommonReasoningDetailSchema.shape); + +export type ReasoningDetailEncrypted = z.infer< + typeof ReasoningDetailEncryptedSchema +>; + +export const ReasoningDetailTextSchema = z + .object({ + type: z.literal(ReasoningDetailType.Text), + text: z.string().nullish(), + signature: z.string().nullish(), + }) + .extend(CommonReasoningDetailSchema.shape); + +export type ReasoningDetailText = z.infer; + +export const ReasoningDetailUnionSchema = z.union([ + ReasoningDetailSummarySchema, + ReasoningDetailEncryptedSchema, + ReasoningDetailTextSchema, +]); + +export type ReasoningDetailUnion = z.infer; + +const ReasoningDetailsWithUnknownSchema = z.union([ + ReasoningDetailUnionSchema, + z.unknown().transform(() => null), +]); + +export const ReasoningDetailArraySchema = z + .array(ReasoningDetailsWithUnknownSchema) + .transform((d) => d.filter((d): d is ReasoningDetailUnion => !!d)); + +export const OutputUnionToReasoningDetailsSchema = z.union([ + z + .object({ + delta: z.object({ + reasoning_details: z.array(ReasoningDetailsWithUnknownSchema), + }), + }) + .transform((data) => + data.delta.reasoning_details.filter(isDefinedOrNotNull), + ), + z + .object({ + message: z.object({ + reasoning_details: z.array(ReasoningDetailsWithUnknownSchema), + }), + }) + .transform((data) => + data.message.reasoning_details.filter(isDefinedOrNotNull), + ), + z + .object({ + text: z.string(), + reasoning_details: z.array(ReasoningDetailsWithUnknownSchema), + }) + .transform((data) => data.reasoning_details.filter(isDefinedOrNotNull)), +]); diff --git a/packages/ai-sdk-provider-2/src/test-utils/test-server.ts b/packages/ai-sdk-provider-2/src/test-utils/test-server.ts new file mode 100644 index 0000000..84332e5 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/test-utils/test-server.ts @@ -0,0 +1,148 @@ +/** + * Simple test server utility to replace the removed @ai-sdk/provider-utils/test createTestServer + * This provides HTTP request interception for testing purposes. + */ + +import type { JsonBodyType } from 'msw'; +import type { SetupServerApi } from 'msw/node'; + +import { HttpResponse, http } from 'msw'; +import { setupServer } from 'msw/node'; +import { afterAll, afterEach, beforeAll } from 'vitest'; + +// Re-export utilities that were previously in @ai-sdk/provider-utils/test +export { convertReadableStreamToArray } from '@ai-sdk/provider-utils/test'; + +type ResponseConfig = { + type: 'json-value' | 'stream-chunks' | 'error'; + body?: JsonBodyType; + chunks?: string[]; + status?: number; + headers?: Record; +}; + +type CallRecord = { + requestBody: string; + requestBodyJson: Promise; + requestHeaders: Record; +}; + +type UrlConfig = { + response?: ResponseConfig; +}; + +type UrlConfigWithCalls = UrlConfig & { + calls: CallRecord[]; +}; + +type TestServerConfig = Record; + +export function createTestServer(config: TestServerConfig): { + urls: Record; + server: SetupServerApi; + calls: CallRecord[]; +} { + const urls: Record = {}; + const calls: CallRecord[] = []; + + // Initialize URL configs with call tracking + for (const [url, urlConfig] of Object.entries(config)) { + urls[url] = { ...urlConfig, calls: [] }; + } + + const handlers = Object.keys(config).map((url) => + http.post(url, async ({ request }) => { + const urlConfig = urls[url]!; + + // Record the call + const bodyText = await request.clone().text(); + + const headers: Record = {}; + request.headers.forEach((value, key) => { + headers[key] = value; + }); + + const callRecord: CallRecord = { + requestBody: bodyText, + requestBodyJson: Promise.resolve().then(() => { + try { + return JSON.parse(bodyText); + } catch { + return bodyText; + } + }), + requestHeaders: headers, + }; + + urlConfig.calls.push(callRecord); + calls.push(callRecord); + + const response = urlConfig.response; + + if (!response) { + return HttpResponse.json( + { error: 'No response configured' }, + { status: 500 }, + ); + } + + const status = response.status ?? 200; + const responseHeaders = response.headers ?? {}; + + switch (response.type) { + case 'json-value': + return HttpResponse.json(response.body ?? null, { + status, + headers: responseHeaders, + }); + + case 'stream-chunks': { + const encoder = new TextEncoder(); + const chunks = response.chunks ?? []; + const stream = new ReadableStream({ + async start(controller) { + for (const chunk of chunks) { + controller.enqueue(encoder.encode(chunk)); + } + controller.close(); + }, + }); + return new HttpResponse(stream, { + status, + headers: { + 'Content-Type': 'text/event-stream', + ...responseHeaders, + }, + }); + } + + case 'error': + return HttpResponse.json(response.body ?? { error: 'Test error' }, { + status: response.status ?? 500, + headers: responseHeaders, + }); + + default: + return HttpResponse.json(response.body ?? null, { + status, + headers: responseHeaders, + }); + } + }), + ); + + const server = setupServer(...handlers); + + beforeAll(() => server.listen({ onUnhandledRequest: 'bypass' })); + afterEach(() => { + server.resetHandlers(); + // Clear calls between tests + calls.length = 0; + for (const url of Object.keys(urls)) { + urls[url]!.calls = []; + } + }); + afterAll(() => server.close()); + + return { urls, server, calls }; +} diff --git a/packages/ai-sdk-provider-2/src/tests/provider-options.test.ts b/packages/ai-sdk-provider-2/src/tests/provider-options.test.ts new file mode 100644 index 0000000..20fff19 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/tests/provider-options.test.ts @@ -0,0 +1,59 @@ +import type { ModelMessage } from 'ai'; + +import { streamText } from 'ai'; +import { describe, expect, it, vi } from 'vitest'; +import { createOpenRouter } from '../provider'; +import { createTestServer } from '../test-utils/test-server'; + +// Add type assertions for the mocked classes +const TEST_MESSAGES: ModelMessage[] = [ + { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, +]; + +describe('providerOptions', () => { + const server = createTestServer({ + 'https://openrouter.ai/api/v1/chat/completions': { + response: { + type: 'stream-chunks', + chunks: [], + }, + }, + }); + + beforeEach(() => { + vi.clearAllMocks(); + }); + + it('should set providerOptions openrouter to extra body', async () => { + const openrouter = createOpenRouter({ + apiKey: 'test', + }); + const model = openrouter('anthropic/claude-3.7-sonnet'); + + await streamText({ + model: model, + messages: TEST_MESSAGES, + providerOptions: { + openrouter: { + reasoning: { + max_tokens: 1000, + }, + }, + }, + }).consumeStream(); + + expect(await server.calls[0]?.requestBodyJson).toStrictEqual({ + messages: [ + { + content: 'Hello', + role: 'user', + }, + ], + reasoning: { + max_tokens: 1000, + }, + model: 'anthropic/claude-3.7-sonnet', + stream: true, + }); + }); +}); diff --git a/packages/ai-sdk-provider-2/src/tests/stream-usage-accounting.test.ts b/packages/ai-sdk-provider-2/src/tests/stream-usage-accounting.test.ts new file mode 100644 index 0000000..75b73d0 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/tests/stream-usage-accounting.test.ts @@ -0,0 +1,177 @@ +import type { OpenRouterChatSettings } from '../types/openrouter-chat-settings'; + +import { describe, expect, it } from 'vitest'; +import { OpenRouterChatLanguageModel } from '../chat'; +import { + convertReadableStreamToArray, + createTestServer, +} from '../test-utils/test-server'; + +describe('OpenRouter Streaming Usage Accounting', () => { + const server = createTestServer({ + 'https://api.openrouter.ai/chat/completions': { + response: { type: 'stream-chunks', chunks: [] }, + }, + }); + + function prepareStreamResponse(includeUsage = true) { + const chunks = [ + `data: {"id":"test-id","model":"test-model","choices":[{"delta":{"content":"Hello"},"index":0}]}\n\n`, + `data: {"choices":[{"finish_reason":"stop","index":0}]}\n\n`, + ]; + + if (includeUsage) { + chunks.push( + `data: ${JSON.stringify({ + usage: { + prompt_tokens: 10, + prompt_tokens_details: { cached_tokens: 5 }, + completion_tokens: 20, + completion_tokens_details: { reasoning_tokens: 8 }, + total_tokens: 30, + cost: 0.0015, + cost_details: { upstream_inference_cost: 0.0019 }, + }, + choices: [], + })}\n\n`, + ); + } + + chunks.push('data: [DONE]\n\n'); + + server.urls['https://api.openrouter.ai/chat/completions']!.response = { + type: 'stream-chunks', + chunks, + }; + } + + it('should include stream_options.include_usage in request when enabled', async () => { + prepareStreamResponse(); + + // Create model with usage accounting enabled + const settings: OpenRouterChatSettings = { + usage: { include: true }, + }; + + const model = new OpenRouterChatLanguageModel('test-model', settings, { + provider: 'openrouter.chat', + url: () => 'https://api.openrouter.ai/chat/completions', + headers: () => ({}), + compatibility: 'strict', + fetch: global.fetch, + }); + + // Call the model with streaming + await model.doStream({ + prompt: [ + { + role: 'user', + content: [{ type: 'text', text: 'Hello' }], + }, + ], + maxOutputTokens: 100, + }); + + // Verify stream options + const requestBody = (await server.calls[0]!.requestBodyJson) as Record< + string, + unknown + >; + expect(requestBody).toBeDefined(); + expect(requestBody.stream).toBe(true); + expect(requestBody.stream_options).toEqual({ + include_usage: true, + }); + }); + + it('should include provider-specific metadata in finish event when usage accounting is enabled', async () => { + prepareStreamResponse(true); + + // Create model with usage accounting enabled + const settings: OpenRouterChatSettings = { + usage: { include: true }, + }; + + const model = new OpenRouterChatLanguageModel('test-model', settings, { + provider: 'openrouter.chat', + url: () => 'https://api.openrouter.ai/chat/completions', + headers: () => ({}), + compatibility: 'strict', + fetch: global.fetch, + }); + + // Call the model with streaming + const result = await model.doStream({ + prompt: [ + { + role: 'user', + content: [{ type: 'text', text: 'Hello' }], + }, + ], + maxOutputTokens: 100, + }); + + // Read all chunks from the stream + const chunks = await convertReadableStreamToArray(result.stream); + + // Find the finish chunk + const finishChunk = chunks.find((chunk) => chunk.type === 'finish'); + expect(finishChunk).toBeDefined(); + + // Verify metadata is included + expect(finishChunk?.providerMetadata).toBeDefined(); + const openrouterData = finishChunk?.providerMetadata?.openrouter; + expect(openrouterData).toBeDefined(); + + const usage = openrouterData?.usage; + expect(usage).toMatchObject({ + promptTokens: 10, + completionTokens: 20, + totalTokens: 30, + cost: 0.0015, + costDetails: { upstreamInferenceCost: 0.0019 }, + promptTokensDetails: { cachedTokens: 5 }, + completionTokensDetails: { reasoningTokens: 8 }, + }); + }); + + it('should not include provider-specific metadata when usage accounting is disabled', async () => { + prepareStreamResponse(false); + + // Create model with usage accounting disabled + const settings: OpenRouterChatSettings = { + // No usage property + }; + + const model = new OpenRouterChatLanguageModel('test-model', settings, { + provider: 'openrouter.chat', + url: () => 'https://api.openrouter.ai/chat/completions', + headers: () => ({}), + compatibility: 'strict', + fetch: global.fetch, + }); + + // Call the model with streaming + const result = await model.doStream({ + prompt: [ + { + role: 'user', + content: [{ type: 'text', text: 'Hello' }], + }, + ], + maxOutputTokens: 100, + }); + + // Read all chunks from the stream + const chunks = await convertReadableStreamToArray(result.stream); + + // Find the finish chunk + const finishChunk = chunks.find((chunk) => chunk.type === 'finish'); + expect(finishChunk).toBeDefined(); + + // Verify that provider metadata is not included + expect(finishChunk?.providerMetadata?.openrouter).toStrictEqual({ + usage: {}, + }); + }); +}); diff --git a/packages/ai-sdk-provider-2/src/tests/usage-accounting.test.ts b/packages/ai-sdk-provider-2/src/tests/usage-accounting.test.ts new file mode 100644 index 0000000..9949a76 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/tests/usage-accounting.test.ts @@ -0,0 +1,326 @@ +import type { OpenRouterChatSettings } from '../types/openrouter-chat-settings'; + +import { describe, expect, it } from 'vitest'; +import { OpenRouterChatLanguageModel } from '../chat'; +import { createTestServer } from '../test-utils/test-server'; + +describe('OpenRouter Usage Accounting', () => { + const server = createTestServer({ + 'https://api.openrouter.ai/chat/completions': { + response: { type: 'json-value', body: {} }, + }, + }); + + function prepareJsonResponse(includeUsage = true) { + const response = { + id: 'test-id', + model: 'test-model', + choices: [ + { + message: { + role: 'assistant', + content: 'Hello, I am an AI assistant.', + }, + index: 0, + finish_reason: 'stop', + }, + ], + usage: includeUsage + ? { + prompt_tokens: 10, + prompt_tokens_details: { + cached_tokens: 5, + }, + completion_tokens: 20, + completion_tokens_details: { + reasoning_tokens: 8, + }, + total_tokens: 30, + cost: 0.0015, + cost_details: { + upstream_inference_cost: 0.0019, + }, + } + : undefined, + }; + + server.urls['https://api.openrouter.ai/chat/completions']!.response = { + type: 'json-value', + body: response, + }; + } + + it('should include usage parameter in the request when enabled', async () => { + prepareJsonResponse(); + + // Create model with usage accounting enabled + const settings: OpenRouterChatSettings = { + usage: { include: true }, + }; + + const model = new OpenRouterChatLanguageModel('test-model', settings, { + provider: 'openrouter.chat', + url: () => 'https://api.openrouter.ai/chat/completions', + headers: () => ({}), + compatibility: 'strict', + fetch: global.fetch, + }); + + // Call the model + await model.doGenerate({ + prompt: [ + { + role: 'user', + content: [{ type: 'text', text: 'Hello' }], + }, + ], + maxOutputTokens: 100, + }); + + // Check request contains usage parameter + const requestBody = (await server.calls[0]!.requestBodyJson) as Record< + string, + unknown + >; + expect(requestBody).toBeDefined(); + expect(requestBody).toHaveProperty('usage'); + expect(requestBody.usage).toEqual({ include: true }); + }); + + it('should include provider-specific metadata in response when usage accounting is enabled', async () => { + prepareJsonResponse(); + + // Create model with usage accounting enabled + const settings: OpenRouterChatSettings = { + usage: { include: true }, + }; + + const model = new OpenRouterChatLanguageModel('test-model', settings, { + provider: 'openrouter.chat', + url: () => 'https://api.openrouter.ai/chat/completions', + headers: () => ({}), + compatibility: 'strict', + fetch: global.fetch, + }); + + // Call the model + const result = await model.doGenerate({ + prompt: [ + { + role: 'user', + content: [{ type: 'text', text: 'Hello' }], + }, + ], + maxOutputTokens: 100, + }); + + // Check result contains provider metadata + expect(result.providerMetadata).toBeDefined(); + const providerData = result.providerMetadata; + + // Check for OpenRouter usage data + expect(providerData?.openrouter).toBeDefined(); + const openrouterData = providerData?.openrouter as Record; + expect(openrouterData.usage).toBeDefined(); + + const usage = openrouterData.usage; + expect(usage).toMatchObject({ + promptTokens: 10, + completionTokens: 20, + totalTokens: 30, + cost: 0.0015, + costDetails: { + upstreamInferenceCost: 0.0019, + }, + promptTokensDetails: { + cachedTokens: 5, + }, + completionTokensDetails: { + reasoningTokens: 8, + }, + }); + }); + + it('should not include provider-specific metadata when usage accounting is disabled', async () => { + prepareJsonResponse(); + + // Create model with usage accounting disabled + const settings: OpenRouterChatSettings = { + // No usage property + }; + + const model = new OpenRouterChatLanguageModel('test-model', settings, { + provider: 'openrouter.chat', + url: () => 'https://api.openrouter.ai/chat/completions', + headers: () => ({}), + compatibility: 'strict', + fetch: global.fetch, + }); + + // Call the model + const result = await model.doGenerate({ + prompt: [ + { + role: 'user', + content: [{ type: 'text', text: 'Hello' }], + }, + ], + maxOutputTokens: 100, + }); + + // Verify that OpenRouter metadata is not included + expect(result.providerMetadata?.openrouter?.usage).toStrictEqual({ + promptTokens: 10, + completionTokens: 20, + totalTokens: 30, + cost: 0.0015, + costDetails: { + upstreamInferenceCost: 0.0019, + }, + promptTokensDetails: { + cachedTokens: 5, + }, + completionTokensDetails: { + reasoningTokens: 8, + }, + }); + }); + + it('should exclude token details from providerMetadata when not present in response', async () => { + // Prepare a response without token details + const response = { + id: 'test-id', + model: 'test-model', + choices: [ + { + message: { + role: 'assistant', + content: 'Hello, I am an AI assistant.', + }, + index: 0, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 10, + completion_tokens: 20, + total_tokens: 30, + cost: 0.0015, + // No prompt_tokens_details, completion_tokens_details, or cost_details + }, + }; + + server.urls['https://api.openrouter.ai/chat/completions']!.response = { + type: 'json-value', + body: response, + }; + + const settings: OpenRouterChatSettings = { + usage: { include: true }, + }; + + const model = new OpenRouterChatLanguageModel('test-model', settings, { + provider: 'openrouter.chat', + url: () => 'https://api.openrouter.ai/chat/completions', + headers: () => ({}), + compatibility: 'strict', + fetch: global.fetch, + }); + + const result = await model.doGenerate({ + prompt: [ + { + role: 'user', + content: [{ type: 'text', text: 'Hello' }], + }, + ], + maxOutputTokens: 100, + }); + + const usage = ( + result.providerMetadata?.openrouter as Record + )?.usage; + + // Should include basic token counts + expect(usage).toMatchObject({ + promptTokens: 10, + completionTokens: 20, + totalTokens: 30, + cost: 0.0015, + }); + + // Should NOT include token details when not present in response + expect(usage).not.toHaveProperty('promptTokensDetails'); + expect(usage).not.toHaveProperty('completionTokensDetails'); + expect(usage).not.toHaveProperty('costDetails'); + }); + + it('should include only present token details in providerMetadata', async () => { + // Prepare a response with only cached_tokens (no reasoning or cost details) + const response = { + id: 'test-id', + model: 'test-model', + choices: [ + { + message: { + role: 'assistant', + content: 'Hello, I am an AI assistant.', + }, + index: 0, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 10, + prompt_tokens_details: { + cached_tokens: 5, + }, + completion_tokens: 20, + total_tokens: 30, + cost: 0.0015, + // No completion_tokens_details or cost_details + }, + }; + + server.urls['https://api.openrouter.ai/chat/completions']!.response = { + type: 'json-value', + body: response, + }; + + const settings: OpenRouterChatSettings = { + usage: { include: true }, + }; + + const model = new OpenRouterChatLanguageModel('test-model', settings, { + provider: 'openrouter.chat', + url: () => 'https://api.openrouter.ai/chat/completions', + headers: () => ({}), + compatibility: 'strict', + fetch: global.fetch, + }); + + const result = await model.doGenerate({ + prompt: [ + { + role: 'user', + content: [{ type: 'text', text: 'Hello' }], + }, + ], + maxOutputTokens: 100, + }); + + const usage = ( + result.providerMetadata?.openrouter as Record + )?.usage; + + // Should include promptTokensDetails since cached_tokens is present + expect(usage).toHaveProperty('promptTokensDetails'); + expect((usage as Record).promptTokensDetails).toEqual({ + cachedTokens: 5, + }); + + // Should NOT include completionTokensDetails or costDetails + expect(usage).not.toHaveProperty('completionTokensDetails'); + expect(usage).not.toHaveProperty('costDetails'); + }); +}); diff --git a/packages/ai-sdk-provider-2/src/types.ts b/packages/ai-sdk-provider-2/src/types.ts deleted file mode 100644 index d0d0c8a..0000000 --- a/packages/ai-sdk-provider-2/src/types.ts +++ /dev/null @@ -1,47 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { LanguageModelV1 } from "@ai-sdk/provider"; - -// Re-export the LanguageModelV1 type to ensure proper type compatibility -export type { LanguageModelV1 }; - -// Export our model types with explicit type constraints -export type HyperbolicLanguageModel = LanguageModelV1; - -export type HyperbolicProviderOptions = { - models?: string[]; - - /** - * https://openrouter.ai/docs/use-cases/reasoning-tokens - * One of `max_tokens` or `effort` is required. - * If `exclude` is true, reasoning will be removed from the response. Default is false. - */ - reasoning?: { - exclude?: boolean; - } & ( - | { - max_tokens: number; - } - | { - effort: "high" | "medium" | "low"; - } - ); - - /** - * A unique identifier representing your end-user, which can - * help Hyperbolic to monitor and detect abuse. - */ - user?: string; -}; - -export type HyperbolicSharedSettings = HyperbolicProviderOptions & { - /** - * @deprecated use `reasoning` instead - */ - includeReasoning?: boolean; - - // eslint-disable-next-line @typescript-eslint/no-explicit-any - extraBody?: Record; -}; diff --git a/packages/ai-sdk-provider-2/src/types/index.ts b/packages/ai-sdk-provider-2/src/types/index.ts new file mode 100644 index 0000000..338a897 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/types/index.ts @@ -0,0 +1,72 @@ +import type { LanguageModelV3, LanguageModelV3Prompt } from '@ai-sdk/provider'; + +export type { LanguageModelV3, LanguageModelV3Prompt }; + +export * from './openrouter-embedding-settings'; + +export type OpenRouterProviderOptions = { + models?: string[]; + + /** + * https://openrouter.ai/docs/use-cases/reasoning-tokens + * One of `max_tokens` or `effort` is required. + * If `exclude` is true, reasoning will be removed from the response. Default is false. + */ + reasoning?: { + enabled?: boolean; + exclude?: boolean; + } & ( + | { + max_tokens: number; + } + | { + effort: 'high' | 'medium' | 'low'; + } + ); + + /** + * A unique identifier representing your end-user, which can + * help OpenRouter to monitor and detect abuse. + */ + user?: string; +}; + +export type OpenRouterSharedSettings = OpenRouterProviderOptions & { + /** + * @deprecated use `reasoning` instead + */ + includeReasoning?: boolean; + + extraBody?: Record; + + /** + * Enable usage accounting to get detailed token usage information. + * https://openrouter.ai/docs/use-cases/usage-accounting + */ + usage?: { + /** + * When true, includes token usage information in the response. + */ + include: boolean; + }; +}; + +/** + * Usage accounting response + * @see https://openrouter.ai/docs/use-cases/usage-accounting + */ +export type OpenRouterUsageAccounting = { + promptTokens: number; + promptTokensDetails?: { + cachedTokens: number; + }; + completionTokens: number; + completionTokensDetails?: { + reasoningTokens: number; + }; + totalTokens: number; + cost?: number; + costDetails?: { + upstreamInferenceCost: number; + }; +}; diff --git a/packages/ai-sdk-provider-2/src/types/openrouter-chat-completions-input.ts b/packages/ai-sdk-provider-2/src/types/openrouter-chat-completions-input.ts new file mode 100644 index 0000000..0c99504 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/types/openrouter-chat-completions-input.ts @@ -0,0 +1,106 @@ +import type { FileAnnotation } from '@/src/schemas/provider-metadata'; +import type { ReasoningDetailUnion } from '@/src/schemas/reasoning-details'; + +// Type for OpenRouter Cache Control following Anthropic's pattern +export type OpenRouterCacheControl = { type: 'ephemeral' }; + +export type OpenRouterChatCompletionsInput = Array; + +export type ChatCompletionMessageParam = + | ChatCompletionSystemMessageParam + | ChatCompletionUserMessageParam + | ChatCompletionAssistantMessageParam + | ChatCompletionToolMessageParam; + +export interface ChatCompletionSystemMessageParam { + role: 'system'; + content: string; + cache_control?: OpenRouterCacheControl; +} + +export interface ChatCompletionUserMessageParam { + role: 'user'; + content: string | Array; + cache_control?: OpenRouterCacheControl; +} + +export type ChatCompletionContentPart = + | ChatCompletionContentPartText + | ChatCompletionContentPartImage + | ChatCompletionContentPartFile + | ChatCompletionContentPartInputAudio; + +export interface ChatCompletionContentPartFile { + type: 'file'; + file: { + filename?: string; + file_data?: string; + file_id?: string; + }; + cache_control?: OpenRouterCacheControl; +} + +export interface ChatCompletionContentPartImage { + type: 'image_url'; + image_url: { + url: string; + }; + cache_control?: OpenRouterCacheControl; +} + +export interface ChatCompletionContentPartText { + type: 'text'; + text: string; + reasoning?: string | null; + cache_control?: OpenRouterCacheControl; +} + +/** https://openrouter.ai/docs/guides/overview/multimodal/audio */ +export const OPENROUTER_AUDIO_FORMATS = [ + 'wav', + 'mp3', + 'aiff', + 'aac', + 'ogg', + 'flac', + 'm4a', + 'pcm16', + 'pcm24', +] as const; + +export type OpenRouterAudioFormat = (typeof OPENROUTER_AUDIO_FORMATS)[number]; + +export interface ChatCompletionContentPartInputAudio { + type: 'input_audio'; + input_audio: { + data: string; + format: OpenRouterAudioFormat; + }; + cache_control?: OpenRouterCacheControl; +} + +export interface ChatCompletionAssistantMessageParam { + role: 'assistant'; + content?: string | null; + reasoning?: string | null; + reasoning_details?: ReasoningDetailUnion[]; + annotations?: FileAnnotation[]; + tool_calls?: Array; + cache_control?: OpenRouterCacheControl; +} + +export interface ChatCompletionMessageToolCall { + type: 'function'; + id: string; + function: { + arguments: string; + name: string; + }; +} + +export interface ChatCompletionToolMessageParam { + role: 'tool'; + content: string; + tool_call_id: string; + cache_control?: OpenRouterCacheControl; +} diff --git a/packages/ai-sdk-provider-2/src/types/openrouter-chat-settings.ts b/packages/ai-sdk-provider-2/src/types/openrouter-chat-settings.ts new file mode 100644 index 0000000..e06b653 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/types/openrouter-chat-settings.ts @@ -0,0 +1,158 @@ +import type * as models from '@openrouter/sdk/models'; +import type { OpenRouterSharedSettings } from '..'; + +// https://openrouter.ai/api/v1/models +export type OpenRouterChatModelId = string; + +export type OpenRouterChatSettings = { + /** +Modify the likelihood of specified tokens appearing in the completion. + +Accepts a JSON object that maps tokens (specified by their token ID in +the GPT tokenizer) to an associated bias value from -100 to 100. You +can use this tokenizer tool to convert text to token IDs. Mathematically, +the bias is added to the logits generated by the model prior to sampling. +The exact effect will vary per model, but values between -1 and 1 should +decrease or increase likelihood of selection; values like -100 or 100 +should result in a ban or exclusive selection of the relevant token. + +As an example, you can pass {"50256": -100} to prevent the <|endoftext|> +token from being generated. +*/ + logitBias?: Record; + + /** +Return the log probabilities of the tokens. Including logprobs will increase +the response size and can slow down response times. However, it can +be useful to understand better how the model is behaving. + +Setting to true will return the log probabilities of the tokens that +were generated. + +Setting to a number will return the log probabilities of the top n +tokens that were generated. +*/ + logprobs?: boolean | number; + + /** +Whether to enable parallel function calling during tool use. Default to true. + */ + parallelToolCalls?: boolean; + + /** +A unique identifier representing your end-user, which can help OpenRouter to +monitor and detect abuse. Learn more. +*/ + user?: string; + + /** + * Plugin configurations for enabling various capabilities + */ + plugins?: Array< + | { + id: models.IdWeb; + max_results?: number; + search_prompt?: string; + engine?: models.Engine; + } + | { + id: models.IdFileParser; + max_files?: number; + pdf?: { + engine?: models.PdfEngine; + }; + } + | { + id: models.IdModeration; + } + >; + + /** + * Built-in web search options for models that support native web search + */ + web_search_options?: { + /** + * Maximum number of search results to include + */ + max_results?: number; + /** + * Custom search prompt to guide the search query + */ + search_prompt?: string; + /** + * Search engine to use for web search + * - "native": Use provider's built-in web search + * - "exa": Use Exa's search API + * - undefined: Native if supported, otherwise Exa + * @see https://openrouter.ai/docs/features/web-search + */ + engine?: models.Engine; + }; + + /** + * Debug options for troubleshooting API requests. + * Only works with streaming requests. + * @see https://openrouter.ai/docs/api-reference/debugging + */ + debug?: { + /** + * When true, echoes back the request body that was sent to the upstream provider. + * The debug data will be returned as the first chunk in the stream with a `debug.echo_upstream_body` field. + * Sensitive data like user IDs and base64 content will be redacted. + */ + echo_upstream_body?: boolean; + }; + + /** + * Provider routing preferences to control request routing behavior + */ + provider?: { + /** + * List of provider slugs to try in order (e.g. ["anthropic", "openai"]) + */ + order?: string[]; + /** + * Whether to allow backup providers when primary is unavailable (default: true) + */ + allow_fallbacks?: boolean; + /** + * Only use providers that support all parameters in your request (default: false) + */ + require_parameters?: boolean; + /** + * Control whether to use providers that may store data + */ + data_collection?: models.DataCollection; + /** + * List of provider slugs to allow for this request + */ + only?: string[]; + /** + * List of provider slugs to skip for this request + */ + ignore?: string[]; + /** + * List of quantization levels to filter by (e.g. ["int4", "int8"]) + */ + quantizations?: Array; + /** + * Sort providers by price, throughput, or latency + */ + sort?: models.ProviderSort; + /** + * Maximum pricing you want to pay for this request + */ + max_price?: { + prompt?: number | string; + completion?: number | string; + image?: number | string; + audio?: number | string; + request?: number | string; + }; + /** + * Whether to restrict routing to only ZDR (Zero Data Retention) endpoints. + * When true, only endpoints that do not retain prompts will be used. + */ + zdr?: boolean; + }; +} & OpenRouterSharedSettings; diff --git a/packages/ai-sdk-provider-2/src/types/openrouter-completion-settings.ts b/packages/ai-sdk-provider-2/src/types/openrouter-completion-settings.ts new file mode 100644 index 0000000..b5f6b99 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/types/openrouter-completion-settings.ts @@ -0,0 +1,39 @@ +import type { OpenRouterSharedSettings } from '.'; + +export type OpenRouterCompletionModelId = string; + +export type OpenRouterCompletionSettings = { + /** +Modify the likelihood of specified tokens appearing in the completion. + +Accepts a JSON object that maps tokens (specified by their token ID in +the GPT tokenizer) to an associated bias value from -100 to 100. You +can use this tokenizer tool to convert text to token IDs. Mathematically, +the bias is added to the logits generated by the model prior to sampling. +The exact effect will vary per model, but values between -1 and 1 should +decrease or increase likelihood of selection; values like -100 or 100 +should result in a ban or exclusive selection of the relevant token. + +As an example, you can pass {"50256": -100} to prevent the <|endoftext|> +token from being generated. + */ + logitBias?: Record; + + /** +Return the log probabilities of the tokens. Including logprobs will increase +the response size and can slow down response times. However, it can +be useful to better understand how the model is behaving. + +Setting to true will return the log probabilities of the tokens that +were generated. + +Setting to a number will return the log probabilities of the top n +tokens that were generated. + */ + logprobs?: boolean | number; + + /** +The suffix that comes after a completion of inserted text. + */ + suffix?: string; +} & OpenRouterSharedSettings; diff --git a/packages/ai-sdk-provider-2/src/types/openrouter-embedding-settings.ts b/packages/ai-sdk-provider-2/src/types/openrouter-embedding-settings.ts new file mode 100644 index 0000000..a20a60b --- /dev/null +++ b/packages/ai-sdk-provider-2/src/types/openrouter-embedding-settings.ts @@ -0,0 +1,56 @@ +import type { OpenRouterSharedSettings } from '..'; + +// https://openrouter.ai/api/v1/models +export type OpenRouterEmbeddingModelId = string; + +export type OpenRouterEmbeddingSettings = { + /** + * A unique identifier representing your end-user, which can help OpenRouter to + * monitor and detect abuse. + */ + user?: string; + + /** + * Provider routing preferences to control request routing behavior + */ + provider?: { + /** + * List of provider slugs to try in order (e.g. ["openai", "voyageai"]) + */ + order?: string[]; + /** + * Whether to allow backup providers when primary is unavailable (default: true) + */ + allow_fallbacks?: boolean; + /** + * Only use providers that support all parameters in your request (default: false) + */ + require_parameters?: boolean; + /** + * Control whether to use providers that may store data + */ + data_collection?: 'allow' | 'deny'; + /** + * List of provider slugs to allow for this request + */ + only?: string[]; + /** + * List of provider slugs to skip for this request + */ + ignore?: string[]; + /** + * Sort providers by price, throughput, or latency + */ + sort?: 'price' | 'throughput' | 'latency'; + /** + * Maximum pricing you want to pay for this request + */ + max_price?: { + prompt?: number | string; + completion?: number | string; + image?: number | string; + audio?: number | string; + request?: number | string; + }; + }; +} & OpenRouterSharedSettings; diff --git a/packages/ai-sdk-provider-2/src/utils/map-finish-reason.ts b/packages/ai-sdk-provider-2/src/utils/map-finish-reason.ts new file mode 100644 index 0000000..ad29cc5 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/utils/map-finish-reason.ts @@ -0,0 +1,43 @@ +import type { LanguageModelV3FinishReason } from '@ai-sdk/provider'; + +type UnifiedFinishReason = + | 'stop' + | 'length' + | 'content-filter' + | 'tool-calls' + | 'error' + | 'other'; + +function mapToUnified( + finishReason: string | null | undefined, +): UnifiedFinishReason { + switch (finishReason) { + case 'stop': + return 'stop'; + case 'length': + return 'length'; + case 'content_filter': + return 'content-filter'; + case 'function_call': + case 'tool_calls': + return 'tool-calls'; + default: + return 'other'; + } +} + +export function mapOpenRouterFinishReason( + finishReason: string | null | undefined, +): LanguageModelV3FinishReason { + return { + unified: mapToUnified(finishReason), + raw: finishReason ?? undefined, + }; +} + +export function createFinishReason( + unified: UnifiedFinishReason, + raw?: string, +): LanguageModelV3FinishReason { + return { unified, raw }; +} diff --git a/packages/ai-sdk-provider-2/src/utils/remove-undefined.ts b/packages/ai-sdk-provider-2/src/utils/remove-undefined.ts new file mode 100644 index 0000000..2de511a --- /dev/null +++ b/packages/ai-sdk-provider-2/src/utils/remove-undefined.ts @@ -0,0 +1,12 @@ +/** + * Removes entries from a record where the value is null or undefined. + * @param record - The input object whose entries may be null or undefined. + * @returns A new object containing only entries with non-null and non-undefined values. + */ +export function removeUndefinedEntries( + record: Record, +): Record { + return Object.fromEntries( + Object.entries(record).filter(([, value]) => value !== null), + ) as Record; +} diff --git a/packages/ai-sdk-provider-2/src/utils/type-guards.ts b/packages/ai-sdk-provider-2/src/utils/type-guards.ts new file mode 100644 index 0000000..4a0f6e8 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/utils/type-guards.ts @@ -0,0 +1,6 @@ +/** + * Type guard to check if a value is defined and not null + */ +export function isDefinedOrNotNull(value: T | null | undefined): value is T { + return value !== null && value !== undefined; +} diff --git a/packages/ai-sdk-provider-2/src/utils/with-user-agent-suffix.ts b/packages/ai-sdk-provider-2/src/utils/with-user-agent-suffix.ts new file mode 100644 index 0000000..06a6eca --- /dev/null +++ b/packages/ai-sdk-provider-2/src/utils/with-user-agent-suffix.ts @@ -0,0 +1,30 @@ +import { removeUndefinedEntries } from '@/src/utils/remove-undefined'; + +/** + * Appends suffix parts to the `user-agent` header. + * If a `user-agent` header already exists, the suffix parts are appended to it. + * If no `user-agent` header exists, a new one is created with the suffix parts. + * Automatically removes undefined entries from the headers. + * + * @param headers - The original headers. + * @param userAgentSuffixParts - The parts to append to the `user-agent` header. + * @returns The new headers with the `user-agent` header set or updated. + */ +export function withUserAgentSuffix( + headers: HeadersInit | Record | undefined, + ...userAgentSuffixParts: string[] +): Record { + const cleanedHeaders = removeUndefinedEntries( + (headers as Record) ?? {}, + ); + + const currentUserAgentHeader = cleanedHeaders['user-agent'] || ''; + const newUserAgent = [currentUserAgentHeader, ...userAgentSuffixParts] + .filter(Boolean) + .join(' '); + + return { + ...cleanedHeaders, + 'user-agent': newUserAgent, + }; +} diff --git a/packages/ai-sdk-provider-2/src/version.ts b/packages/ai-sdk-provider-2/src/version.ts new file mode 100644 index 0000000..531fbc9 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/version.ts @@ -0,0 +1,4 @@ +// Version string of this package injected at build time. +declare const __PACKAGE_VERSION__: string | undefined; +export const VERSION: string = + __PACKAGE_VERSION__ === undefined ? '0.0.0-test' : __PACKAGE_VERSION__; diff --git a/packages/ai-sdk-provider-2/tsup.config.ts b/packages/ai-sdk-provider-2/tsup.config.ts index cded0e2..2d8b30b 100644 --- a/packages/ai-sdk-provider-2/tsup.config.ts +++ b/packages/ai-sdk-provider-2/tsup.config.ts @@ -1,11 +1,17 @@ +import { readFileSync } from "node:fs"; import { defineConfig } from "tsup"; +const package_ = JSON.parse(readFileSync(new URL("./package.json", import.meta.url), "utf8")); + export default defineConfig([ { entry: ["src/index.ts"], format: ["cjs", "esm"], dts: true, sourcemap: true, + define: { + __PACKAGE_VERSION__: JSON.stringify(package_.version), + }, }, { entry: ["src/internal/index.ts"], @@ -13,5 +19,8 @@ export default defineConfig([ format: ["cjs", "esm"], dts: true, sourcemap: true, + define: { + __PACKAGE_VERSION__: JSON.stringify(package_.version), + }, }, ]); diff --git a/packages/ai-sdk-provider/package.json b/packages/ai-sdk-provider/package.json index b98781a..b543660 100644 --- a/packages/ai-sdk-provider/package.json +++ b/packages/ai-sdk-provider/package.json @@ -57,14 +57,14 @@ "eslint": "catalog:", "handlebars": "^4.7.8", "prettier": "catalog:", - "tsup": "8.4.0", + "tsup": "8.5.0", "type-fest": "^4.37.0", "typescript": "catalog:" }, "dependencies": { - "@ai-sdk/provider": "^1.1.3", - "@ai-sdk/provider-utils": "^2.2.8", - "ai": "^4.3.16", - "zod": "^3.24.2" + "@ai-sdk/provider": "^3.0.5", + "@ai-sdk/provider-utils": "^4.0.9", + "ai": "^6.0.48", + "zod": "^4.0.0" } } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index b8568e3..a09bcc8 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -16,8 +16,8 @@ catalogs: specifier: ^3.4.2 version: 3.4.2 typescript: - specifier: ^5.7.3 - version: 5.7.3 + specifier: ^5.9.2 + version: 5.9.3 importers: @@ -55,28 +55,28 @@ importers: version: 2.4.4 typescript: specifier: 'catalog:' - version: 5.7.3 + version: 5.9.3 vite-tsconfig-paths: specifier: ^5.1.4 - version: 5.1.4(typescript@5.7.3)(vite@5.4.14(@types/node@22.13.10)(lightningcss@1.29.1)) + version: 5.1.4(typescript@5.9.3)(vite@5.4.14(@types/node@22.13.10)(lightningcss@1.29.1)) vitest: - specifier: ^2.0.5 - version: 2.1.8(@edge-runtime/vm@5.0.0)(@types/node@22.13.10)(lightningcss@1.29.1)(msw@2.7.1(@types/node@22.13.10)(typescript@5.7.3)) + specifier: 3.2.4 + version: 3.2.4(@edge-runtime/vm@5.0.0)(@types/node@22.13.10)(lightningcss@1.29.1)(msw@2.12.4(@types/node@22.13.10)(typescript@5.9.3)) packages/ai-sdk-provider: dependencies: '@ai-sdk/provider': - specifier: ^1.1.3 - version: 1.1.3 + specifier: ^3.0.5 + version: 3.0.5 '@ai-sdk/provider-utils': - specifier: ^2.2.8 - version: 2.2.8(zod@3.24.2) + specifier: ^4.0.9 + version: 4.0.9(zod@4.3.6) ai: - specifier: ^4.3.16 - version: 4.3.16(react@18.3.1)(zod@3.24.2) + specifier: ^6.0.48 + version: 6.0.48(zod@4.3.6) zod: - specifier: ^3.24.2 - version: 3.24.2 + specifier: ^4.0.0 + version: 4.3.6 devDependencies: '@edge-runtime/vm': specifier: ^5.0.0 @@ -103,14 +103,72 @@ importers: specifier: 'catalog:' version: 3.4.2 tsup: - specifier: 8.4.0 - version: 8.4.0(jiti@2.4.2)(postcss@8.5.1)(tsx@4.19.3)(typescript@5.7.3)(yaml@2.7.0) + specifier: 8.5.0 + version: 8.5.0(jiti@2.4.2)(postcss@8.5.1)(tsx@4.19.3)(typescript@5.9.3)(yaml@2.7.0) type-fest: specifier: ^4.37.0 version: 4.37.0 typescript: specifier: 'catalog:' - version: 5.7.3 + version: 5.9.3 + + packages/ai-sdk-provider-2: + dependencies: + '@ai-sdk/provider': + specifier: ^3.0.5 + version: 3.0.5 + '@ai-sdk/provider-utils': + specifier: ^4.0.9 + version: 4.0.9(zod@4.3.6) + devDependencies: + '@edge-runtime/vm': + specifier: ^5.0.0 + version: 5.0.0 + '@hyperbolic/api': + specifier: workspace:* + version: link:../api + '@hyperbolic/eslint-config': + specifier: workspace:* + version: link:../../tooling/eslint + '@hyperbolic/prettier-config': + specifier: workspace:* + version: link:../../tooling/prettier + '@hyperbolic/tsconfig': + specifier: workspace:* + version: link:../../tooling/typescript + ai: + specifier: ^6.0.48 + version: 6.0.48(zod@4.3.6) + eslint: + specifier: 'catalog:' + version: 9.19.0(jiti@2.4.2) + handlebars: + specifier: ^4.7.8 + version: 4.7.8 + msw: + specifier: 2.12.4 + version: 2.12.4(@types/node@22.13.10)(typescript@5.9.3) + prettier: + specifier: 'catalog:' + version: 3.4.2 + tsup: + specifier: 8.5.0 + version: 8.5.0(jiti@2.4.2)(postcss@8.5.1)(tsx@4.19.3)(typescript@5.9.3)(yaml@2.7.0) + type-fest: + specifier: ^4.37.0 + version: 4.37.0 + typescript: + specifier: 'catalog:' + version: 5.9.3 + vite-tsconfig-paths: + specifier: ^5.1.4 + version: 5.1.4(typescript@5.9.3)(vite@5.4.14(@types/node@22.13.10)(lightningcss@1.29.1)) + vitest: + specifier: 3.2.4 + version: 3.2.4(@edge-runtime/vm@5.0.0)(@types/node@22.13.10)(lightningcss@1.29.1)(msw@2.12.4(@types/node@22.13.10)(typescript@5.9.3)) + zod: + specifier: ^4.0.0 + version: 4.3.6 packages/api: devDependencies: @@ -119,7 +177,7 @@ importers: version: 0.8.3 '@hey-api/openapi-ts': specifier: 0.64.11 - version: 0.64.11(typescript@5.7.3) + version: 0.64.11(typescript@5.9.3) '@hyperbolic/eslint-config': specifier: workspace:* version: link:../../tooling/eslint @@ -137,7 +195,7 @@ importers: version: 3.4.2 typescript: specifier: 'catalog:' - version: 5.7.3 + version: 5.9.3 tooling/eslint: dependencies: @@ -146,7 +204,7 @@ importers: version: 1.2.7(eslint@9.19.0(jiti@2.4.2)) eslint-plugin-import: specifier: ^2.31.0 - version: 2.31.0(@typescript-eslint/parser@8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3))(eslint@9.19.0(jiti@2.4.2)) + version: 2.31.0(@typescript-eslint/parser@8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.9.3))(eslint@9.19.0(jiti@2.4.2)) eslint-plugin-jsx-a11y: specifier: ^6.10.2 version: 6.10.2(eslint@9.19.0(jiti@2.4.2)) @@ -155,7 +213,7 @@ importers: version: 2.4.4(eslint@9.19.0(jiti@2.4.2))(turbo@2.4.4) typescript-eslint: specifier: ^8.21.0 - version: 8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3) + version: 8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.9.3) devDependencies: '@hyperbolic/prettier-config': specifier: workspace:* @@ -174,7 +232,7 @@ importers: version: 3.4.2 typescript: specifier: 'catalog:' - version: 5.7.3 + version: 5.9.3 tooling/github: devDependencies: @@ -205,7 +263,7 @@ importers: version: link:../typescript typescript: specifier: 'catalog:' - version: 5.7.3 + version: 5.9.3 tooling/typescript: {} @@ -220,31 +278,21 @@ packages: '@actions/http-client@2.2.3': resolution: {integrity: sha512-mx8hyJi/hjFvbPokCg4uRd4ZX78t+YyRPtnKWwIl+RzNaVuFpQHfmlGVfsKEJN8LwTCvL+DfVgAM04XaHkm6bA==} - '@ai-sdk/provider-utils@2.2.8': - resolution: {integrity: sha512-fqhG+4sCVv8x7nFzYnFo19ryhAa3w096Kmc3hWxMQfW/TubPOmt3A6tYZhl4mUfQWWQMsuSkLrtjlWuXBVSGQA==} + '@ai-sdk/gateway@3.0.22': + resolution: {integrity: sha512-NgnlY73JNuooACHqUIz5uMOEWvqR1MMVbb2soGLMozLY1fgwEIF5iJFDAGa5/YArlzw2ATVU7zQu7HkR/FUjgA==} engines: {node: '>=18'} peerDependencies: - zod: ^3.23.8 - - '@ai-sdk/provider@1.1.3': - resolution: {integrity: sha512-qZMxYJ0qqX/RfnuIaab+zp8UAeJn/ygXXAffR5I4N0n1IrvA6qBsjc8hXLmBiMV2zoXlifkacF7sEFnYnjBcqg==} - engines: {node: '>=18'} + zod: ^3.25.76 || ^4.1.8 - '@ai-sdk/react@1.2.12': - resolution: {integrity: sha512-jK1IZZ22evPZoQW3vlkZ7wvjYGYF+tRBKXtrcolduIkQ/m/sOAVcVeVDUDvh1T91xCnWCdUGCPZg2avZ90mv3g==} + '@ai-sdk/provider-utils@4.0.9': + resolution: {integrity: sha512-bB4r6nfhBOpmoS9mePxjRoCy+LnzP3AfhyMGCkGL4Mn9clVNlqEeKj26zEKEtB6yoSVcT1IQ0Zh9fytwMCDnow==} engines: {node: '>=18'} peerDependencies: - react: ^18 || ^19 || ^19.0.0-rc - zod: ^3.23.8 - peerDependenciesMeta: - zod: - optional: true + zod: ^3.25.76 || ^4.1.8 - '@ai-sdk/ui-utils@1.2.11': - resolution: {integrity: sha512-3zcwCc8ezzFlwp3ZD15wAPjf2Au4s3vAbKsXQVyhxODHcmu0iyPO2Eua6D/vicq/AUm/BAo60r97O6HU+EI0+w==} + '@ai-sdk/provider@3.0.5': + resolution: {integrity: sha512-2Xmoq6DBJqmSl80U6V9z5jJSJP7ehaJJQMy2iFUqTay06wdCqTnPVBBQbtEL8RCChenL+q5DC5H5WzU3vV3v8w==} engines: {node: '>=18'} - peerDependencies: - zod: ^3.23.8 '@babel/code-frame@7.26.2': resolution: {integrity: sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==} @@ -287,15 +335,6 @@ packages: resolution: {integrity: sha512-t8kDRGrKXyp6+tjUh7hw2RLyclsW4TRoRvRHtSyAX9Bb5ldlFh+90YAYY6awRXrlB4G5G2izNeGySpATlFzmOg==} engines: {node: '>=6.9.0'} - '@bundled-es-modules/cookie@2.0.1': - resolution: {integrity: sha512-8o+5fRPLNbjbdGRRmJj3h6Hh1AQJf2dk3qQ/5ZFb+PXkRNiSoMGGUKlsgLfrxneb72axVJyIYji64E2+nNfYyw==} - - '@bundled-es-modules/statuses@1.0.1': - resolution: {integrity: sha512-yn7BklA5acgcBr+7w064fGV+SGIFySjCKpqjcWgBAIfrAkY+4GQTJJHQMeT3V/sgz23VTEVV8TtOmkvJAhFVfg==} - - '@bundled-es-modules/tough-cookie@0.1.6': - resolution: {integrity: sha512-dvMHbL464C0zI+Yqxbz6kZ5TOEp7GLW+pry/RWndAR8MJQAXZ2rPmIs8tziTZjeIyhSNZgZbCePtfSbdWqStJw==} - '@changesets/apply-release-plan@7.0.8': resolution: {integrity: sha512-qjMUj4DYQ1Z6qHawsn7S71SujrExJ+nceyKKyI9iB+M5p9lCL55afuEd6uLBPRpLGWQwkwvWegDHtwHJb1UjpA==} @@ -887,8 +926,8 @@ packages: '@manypkg/get-packages@1.1.3': resolution: {integrity: sha512-fo+QhuU3qE/2TQMQmbVMqaQ6EWbMhi4ABWP+O4AM1NqPBuy0OrApV5LO6BrrgnhtAHS2NH6RrVk9OL181tTi8A==} - '@mswjs/interceptors@0.37.6': - resolution: {integrity: sha512-wK+5pLK5XFmgtH3aQ2YVvA3HohS3xqV/OxuVOdNx9Wpnz7VE/fnC+e1A7ln6LFYeck7gOJ/dsZV6OLplOtAJ2w==} + '@mswjs/interceptors@0.40.0': + resolution: {integrity: sha512-EFd6cVbHsgLa6wa4RljGj6Wk75qoHxUSyc5asLyyPSyuhIcdS2Q3Phw6ImS1q+CkALthJRShiYfKANcQMuMqsQ==} engines: {node: '>=18'} '@nodelib/fs.scandir@2.1.5': @@ -954,191 +993,96 @@ packages: resolution: {integrity: sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==} engines: {node: '>=14'} - '@rollup/rollup-android-arm-eabi@4.33.0': - resolution: {integrity: sha512-eBVEV1xXlpRFlaW8IhM/kXHY7vnfWIfQG8ZQV1k5q+3gDQI5D39H9fZk5XysGv/b9kj/pSGAUKJ6+rWoWSNdEQ==} - cpu: [arm] - os: [android] - '@rollup/rollup-android-arm-eabi@4.35.0': resolution: {integrity: sha512-uYQ2WfPaqz5QtVgMxfN6NpLD+no0MYHDBywl7itPYd3K5TjjSghNKmX8ic9S8NU8w81NVhJv/XojcHptRly7qQ==} cpu: [arm] os: [android] - '@rollup/rollup-android-arm64@4.33.0': - resolution: {integrity: sha512-CALerXF20lsoIVAEb/FNjnMIvF7v79PUq9NDL2y2sv2cPFC8AFJzE23BbaOvS0CPqsawaAcc+vwlju5v+mw2Pg==} - cpu: [arm64] - os: [android] - '@rollup/rollup-android-arm64@4.35.0': resolution: {integrity: sha512-FtKddj9XZudurLhdJnBl9fl6BwCJ3ky8riCXjEw3/UIbjmIY58ppWwPEvU3fNu+W7FUsAsB1CdH+7EQE6CXAPA==} cpu: [arm64] os: [android] - '@rollup/rollup-darwin-arm64@4.33.0': - resolution: {integrity: sha512-0gR69dCAYCUk5e6jgbekfNBB7ZPLAR3R2VSXL3vyL/c8YlyEg2iq3ol+O3XHS5enKCN/sUsBcBAjSE6sLIXtug==} - cpu: [arm64] - os: [darwin] - '@rollup/rollup-darwin-arm64@4.35.0': resolution: {integrity: sha512-Uk+GjOJR6CY844/q6r5DR/6lkPFOw0hjfOIzVx22THJXMxktXG6CbejseJFznU8vHcEBLpiXKY3/6xc+cBm65Q==} cpu: [arm64] os: [darwin] - '@rollup/rollup-darwin-x64@4.33.0': - resolution: {integrity: sha512-4rOvNnIRt2Kc4a6G6OXlmA8H88PUcrSn9Ac3L6rksCNK83rvHm1xTQz/XAcoy3EWuaKqkRUch0HC5DjF1rNRKA==} - cpu: [x64] - os: [darwin] - '@rollup/rollup-darwin-x64@4.35.0': resolution: {integrity: sha512-3IrHjfAS6Vkp+5bISNQnPogRAW5GAV1n+bNCrDwXmfMHbPl5EhTmWtfmwlJxFRUCBZ+tZ/OxDyU08aF6NI/N5Q==} cpu: [x64] os: [darwin] - '@rollup/rollup-freebsd-arm64@4.33.0': - resolution: {integrity: sha512-VZhX9ymo7t0PSHN3tJXaZ5tne8lmNxFnUBgpjWRT+x6HYWMvfMC0GBrhTEJhLanI6l6ctF0UHrqEWQ8AzgXvOw==} - cpu: [arm64] - os: [freebsd] - '@rollup/rollup-freebsd-arm64@4.35.0': resolution: {integrity: sha512-sxjoD/6F9cDLSELuLNnY0fOrM9WA0KrM0vWm57XhrIMf5FGiN8D0l7fn+bpUeBSU7dCgPV2oX4zHAsAXyHFGcQ==} cpu: [arm64] os: [freebsd] - '@rollup/rollup-freebsd-x64@4.33.0': - resolution: {integrity: sha512-v8zFftaPMqOMiobWgEa/2gZ9O1RrL/qnycxU19OiNIq6ayIziAnZ/WeKaqKiXrg7Riy1sFdXkYo3qVwd1DDVEA==} - cpu: [x64] - os: [freebsd] - '@rollup/rollup-freebsd-x64@4.35.0': resolution: {integrity: sha512-2mpHCeRuD1u/2kruUiHSsnjWtHjqVbzhBkNVQ1aVD63CcexKVcQGwJ2g5VphOd84GvxfSvnnlEyBtQCE5hxVVw==} cpu: [x64] os: [freebsd] - '@rollup/rollup-linux-arm-gnueabihf@4.33.0': - resolution: {integrity: sha512-KZLksFX9g6dZuuiRqC/qVLDn58U3fgDij8rAKqcznbIJi/J1laoE3A2xYZZCo0zITr14K5/4Gecr/HrrhCQ7/w==} - cpu: [arm] - os: [linux] - '@rollup/rollup-linux-arm-gnueabihf@4.35.0': resolution: {integrity: sha512-mrA0v3QMy6ZSvEuLs0dMxcO2LnaCONs1Z73GUDBHWbY8tFFocM6yl7YyMu7rz4zS81NDSqhrUuolyZXGi8TEqg==} cpu: [arm] os: [linux] - '@rollup/rollup-linux-arm-musleabihf@4.33.0': - resolution: {integrity: sha512-p+1NjIzdjNDSXFJxE5lWa/sKdtfpWrqzd8ROnVcOv38weHX+kOA9wTZbogpXEPaCK35wag6vbkm0xas/8FblLQ==} - cpu: [arm] - os: [linux] - '@rollup/rollup-linux-arm-musleabihf@4.35.0': resolution: {integrity: sha512-DnYhhzcvTAKNexIql8pFajr0PiDGrIsBYPRvCKlA5ixSS3uwo/CWNZxB09jhIapEIg945KOzcYEAGGSmTSpk7A==} cpu: [arm] os: [linux] - '@rollup/rollup-linux-arm64-gnu@4.33.0': - resolution: {integrity: sha512-WSyz2HpBxS/EQpZ21/fruXcQSVRYU7kcfOVbZsLuIbberf1Lw8f1y0IoWu4GVv8PK+V6CdjSIsZiLP4/tDKAJA==} - cpu: [arm64] - os: [linux] - '@rollup/rollup-linux-arm64-gnu@4.35.0': resolution: {integrity: sha512-uagpnH2M2g2b5iLsCTZ35CL1FgyuzzJQ8L9VtlJ+FckBXroTwNOaD0z0/UF+k5K3aNQjbm8LIVpxykUOQt1m/A==} cpu: [arm64] os: [linux] - '@rollup/rollup-linux-arm64-musl@4.33.0': - resolution: {integrity: sha512-BgJABG3iaIeiJKDcPhj4x7XRSke4SkcrFxYKnciy/V/auEPbKu4uBhD0SUhIx+0EI/uXtelPqA6VW0GxfDHRYA==} - cpu: [arm64] - os: [linux] - '@rollup/rollup-linux-arm64-musl@4.35.0': resolution: {integrity: sha512-XQxVOCd6VJeHQA/7YcqyV0/88N6ysSVzRjJ9I9UA/xXpEsjvAgDTgH3wQYz5bmr7SPtVK2TsP2fQ2N9L4ukoUg==} cpu: [arm64] os: [linux] - '@rollup/rollup-linux-loongarch64-gnu@4.33.0': - resolution: {integrity: sha512-j991T8HolnEaoJZ4sgzCNhAv6F62+SPzMwUUgZp5R1i8+FknghmMmdMny42erAh4jYbCUpYqFS6PFxjD7B947A==} - cpu: [loong64] - os: [linux] - '@rollup/rollup-linux-loongarch64-gnu@4.35.0': resolution: {integrity: sha512-5pMT5PzfgwcXEwOaSrqVsz/LvjDZt+vQ8RT/70yhPU06PTuq8WaHhfT1LW+cdD7mW6i/J5/XIkX/1tCAkh1W6g==} cpu: [loong64] os: [linux] - '@rollup/rollup-linux-powerpc64le-gnu@4.33.0': - resolution: {integrity: sha512-dtqsVHoQ1xyQpCMTuWJOYbE6j3byEUCjUAD5J9XEHl2zRO/1+F/N8jfu97GZxOx+ugrZHEFn6hTFvI+PR1Fxbg==} - cpu: [ppc64] - os: [linux] - '@rollup/rollup-linux-powerpc64le-gnu@4.35.0': resolution: {integrity: sha512-c+zkcvbhbXF98f4CtEIP1EBA/lCic5xB0lToneZYvMeKu5Kamq3O8gqrxiYYLzlZH6E3Aq+TSW86E4ay8iD8EA==} cpu: [ppc64] os: [linux] - '@rollup/rollup-linux-riscv64-gnu@4.33.0': - resolution: {integrity: sha512-WQltfx27zYsp2miNbI65bgKFuOvBzHzJvUxWeO8+dGmudmvdGq5mPfGhVBjJBdX6BWOZrPtkLftOSlIllKNARQ==} - cpu: [riscv64] - os: [linux] - '@rollup/rollup-linux-riscv64-gnu@4.35.0': resolution: {integrity: sha512-s91fuAHdOwH/Tad2tzTtPX7UZyytHIRR6V4+2IGlV0Cej5rkG0R61SX4l4y9sh0JBibMiploZx3oHKPnQBKe4g==} cpu: [riscv64] os: [linux] - '@rollup/rollup-linux-s390x-gnu@4.33.0': - resolution: {integrity: sha512-j/8mBAf5RQCJgm0lPkFOHu2qJsXxKJKBadwEwjHxB3K9ZIul+BxMfWYXUX/RUKGji3r7AaxOgsbD0XkGV7YaQg==} - cpu: [s390x] - os: [linux] - '@rollup/rollup-linux-s390x-gnu@4.35.0': resolution: {integrity: sha512-hQRkPQPLYJZYGP+Hj4fR9dDBMIM7zrzJDWFEMPdTnTy95Ljnv0/4w/ixFw3pTBMEuuEuoqtBINYND4M7ujcuQw==} cpu: [s390x] os: [linux] - '@rollup/rollup-linux-x64-gnu@4.33.0': - resolution: {integrity: sha512-11lI0xOnzSZUG2RuH8Eqlj3obnu7BGcTnm/sb3LwCGCHD09PLjaMVCaYx4Pf+E0K4AYMPpHKDP5TTKG24W+JXQ==} - cpu: [x64] - os: [linux] - '@rollup/rollup-linux-x64-gnu@4.35.0': resolution: {integrity: sha512-Pim1T8rXOri+0HmV4CdKSGrqcBWX0d1HoPnQ0uw0bdp1aP5SdQVNBy8LjYncvnLgu3fnnCt17xjWGd4cqh8/hA==} cpu: [x64] os: [linux] - '@rollup/rollup-linux-x64-musl@4.33.0': - resolution: {integrity: sha512-1W1Rl8zUm2qIwGDUaH51aH64CfL9sag5BB3U92Dq7HrpRQb5pvv+tUeN1dMYr9KHX1t52fy5HuPEDVBrlywSvQ==} - cpu: [x64] - os: [linux] - '@rollup/rollup-linux-x64-musl@4.35.0': resolution: {integrity: sha512-QysqXzYiDvQWfUiTm8XmJNO2zm9yC9P/2Gkrwg2dH9cxotQzunBHYr6jk4SujCTqnfGxduOmQcI7c2ryuW8XVg==} cpu: [x64] os: [linux] - '@rollup/rollup-win32-arm64-msvc@4.33.0': - resolution: {integrity: sha512-Wmr+r26PU9fu2BMGA2Rb7FJf6yNUcnWrPvDHf0h15iOsY3iMxSCk98/Ez/QQOjLP6F0hTCCP7hWUuruoPbjAgQ==} - cpu: [arm64] - os: [win32] - '@rollup/rollup-win32-arm64-msvc@4.35.0': resolution: {integrity: sha512-OUOlGqPkVJCdJETKOCEf1mw848ZyJ5w50/rZ/3IBQVdLfR5jk/6Sr5m3iO2tdPgwo0x7VcncYuOvMhBWZq8ayg==} cpu: [arm64] os: [win32] - '@rollup/rollup-win32-ia32-msvc@4.33.0': - resolution: {integrity: sha512-9IhxrTkZzFlCx9+odRlDYPBCK7E/ThQQ1JulZYAuwTkoKalXT5Roy46+B2aOfstzIfygwsjmxfJE2Mvcgb1evQ==} - cpu: [ia32] - os: [win32] - '@rollup/rollup-win32-ia32-msvc@4.35.0': resolution: {integrity: sha512-2/lsgejMrtwQe44glq7AFFHLfJBPafpsTa6JvP2NGef/ifOa4KBoglVf7AKN7EV9o32evBPRqfg96fEHzWo5kw==} cpu: [ia32] os: [win32] - '@rollup/rollup-win32-x64-msvc@4.33.0': - resolution: {integrity: sha512-wqYKNnO1Sbfl2/Tn/4gLJbzaye+MhCQWF/ZTe/7zTqq3G8GBAtL746Oj3Eq3VwJD0Mt5Z9uZYCdvMxTV/GJ2+A==} - cpu: [x64] - os: [win32] - '@rollup/rollup-win32-x64-msvc@4.35.0': resolution: {integrity: sha512-PIQeY5XDkrOysbQblSW7v3l1MDZzkTEzAfTPkj5VAu3FW8fS4ynyLg2sINp0fp3SjZ8xkRYpLqoKcYqAkhU1dw==} cpu: [x64] @@ -1147,6 +1091,9 @@ packages: '@rtsao/scc@1.1.0': resolution: {integrity: sha512-zt6OdqaDoOnJ1ZYsCYGt9YmWzDXl4vQdKTyJev62gFhRGKdx7mcT54V9KIjg+d2wi9EXsPvAPKe7i7WjfVWB8g==} + '@standard-schema/spec@1.1.0': + resolution: {integrity: sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==} + '@tsconfig/node10@1.0.11': resolution: {integrity: sha512-DcRjDCujK/kCk/cUe8Xz8ZSpm8mS3mNNpta+jGCA6USEDfktlNvm1+IuZ9eTcDbNk41BHwpHHeW+N1lKCz4zOw==} @@ -1159,11 +1106,11 @@ packages: '@tsconfig/node16@1.0.4': resolution: {integrity: sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==} - '@types/cookie@0.6.0': - resolution: {integrity: sha512-4Kh9a6B2bQciAhf7FSuMRRkUWecJgJu9nPnx3yzpsfXX/c50REIqpHY4C82bXP90qrLtXtkDxTZosYO3UpOwlA==} + '@types/chai@5.2.3': + resolution: {integrity: sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==} - '@types/diff-match-patch@1.0.36': - resolution: {integrity: sha512-xFdR6tkm0MWvBfO8xXCSsinYxHcqkQUlcHeSpMC2ukzOb6lwQAfDmW+Qt0AvlGd8HpsS28qKsB+oPeJn9I39jg==} + '@types/deep-eql@4.0.2': + resolution: {integrity: sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==} '@types/eslint@9.6.1': resolution: {integrity: sha512-FXx2pKgId/WyYo2jXw63kk7/+TY7u7AziEJxJAnSFzHlqTAS3Ync6SvgYAN/k4/PQpnnVuzoMuVnByKK2qp0ag==} @@ -1195,11 +1142,8 @@ packages: '@types/normalize-package-data@2.4.4': resolution: {integrity: sha512-37i+OaWTh9qeK4LSHPsyRC7NahnGotNuZvjLSgcPzblpHB3rrCJxAOgI5gCdKm7coonsaX1Of0ILiTcnZjbfxA==} - '@types/statuses@2.0.5': - resolution: {integrity: sha512-jmIUGWrAiwu3dZpxntxieC+1n/5c3mjrImkmOSQ2NC5uP6cYO4aAZDdSmRcI5C1oiTmqlZGHC+/NmJrKogbP5A==} - - '@types/tough-cookie@4.0.5': - resolution: {integrity: sha512-/Ad8+nIOV7Rl++6f1BdKxFSMgmoqEoYbHRpPcx3JEfv8VRsQe9Z4mCXeJBzxs7mbHY/XOZZuXlRNfhpVPbs6ZA==} + '@types/statuses@2.0.6': + resolution: {integrity: sha512-xMAgYwceFhRA2zY+XbEA7mxYbA093wdiW8Vu6gZPGWy9cmOyU9XesH1tNcEWsKFd5Vzrqx5T3D38PWx1FIIXkA==} '@typescript-eslint/eslint-plugin@8.26.1': resolution: {integrity: sha512-2X3mwqsj9Bd3Ciz508ZUtoQQYpOhU/kWoUqIf49H8Z0+Vbh6UF/y0OEYp0Q0axOGzaBGs7QxRwq0knSQ8khQNA==} @@ -1248,34 +1192,38 @@ packages: resolution: {integrity: sha512-AjOC3zfnxd6S4Eiy3jwktJPclqhFHNyd8L6Gycf9WUPoKZpgM5PjkxY1X7uSy61xVpiJDhhk7XT2NVsN3ALTWg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@vitest/expect@2.1.8': - resolution: {integrity: sha512-8ytZ/fFHq2g4PJVAtDX57mayemKgDR6X3Oa2Foro+EygiOJHUXhCqBAAKQYYajZpFoIfvBCF1j6R6IYRSIUFuw==} + '@vercel/oidc@3.1.0': + resolution: {integrity: sha512-Fw28YZpRnA3cAHHDlkt7xQHiJ0fcL+NRcIqsocZQUSmbzeIKRpwttJjik5ZGanXP+vlA4SbTg+AbA3bP363l+w==} + engines: {node: '>= 20'} + + '@vitest/expect@3.2.4': + resolution: {integrity: sha512-Io0yyORnB6sikFlt8QW5K7slY4OjqNX9jmJQ02QDda8lyM6B5oNgVWoSoKPac8/kgnCUzuHQKrSLtu/uOqqrig==} - '@vitest/mocker@2.1.8': - resolution: {integrity: sha512-7guJ/47I6uqfttp33mgo6ga5Gr1VnL58rcqYKyShoRK9ebu8T5Rs6HN3s1NABiBeVTdWNrwUMcHH54uXZBN4zA==} + '@vitest/mocker@3.2.4': + resolution: {integrity: sha512-46ryTE9RZO/rfDd7pEqFl7etuyzekzEhUbTW3BvmeO/BcCMEgq59BKhek3dXDWgAj4oMK6OZi+vRr1wPW6qjEQ==} peerDependencies: msw: ^2.4.9 - vite: ^5.0.0 + vite: ^5.0.0 || ^6.0.0 || ^7.0.0-0 peerDependenciesMeta: msw: optional: true vite: optional: true - '@vitest/pretty-format@2.1.8': - resolution: {integrity: sha512-9HiSZ9zpqNLKlbIDRWOnAWqgcA7xu+8YxXSekhr0Ykab7PAYFkhkwoqVArPOtJhPmYeE2YHgKZlj3CP36z2AJQ==} + '@vitest/pretty-format@3.2.4': + resolution: {integrity: sha512-IVNZik8IVRJRTr9fxlitMKeJeXFFFN0JaB9PHPGQ8NKQbGpfjlTx9zO4RefN8gp7eqjNy8nyK3NZmBzOPeIxtA==} - '@vitest/runner@2.1.8': - resolution: {integrity: sha512-17ub8vQstRnRlIU5k50bG+QOMLHRhYPAna5tw8tYbj+jzjcspnwnwtPtiOlkuKC4+ixDPTuLZiqiWWQ2PSXHVg==} + '@vitest/runner@3.2.4': + resolution: {integrity: sha512-oukfKT9Mk41LreEW09vt45f8wx7DordoWUZMYdY/cyAk7w5TWkTRCNZYF7sX7n2wB7jyGAl74OxgwhPgKaqDMQ==} - '@vitest/snapshot@2.1.8': - resolution: {integrity: sha512-20T7xRFbmnkfcmgVEz+z3AU/3b0cEzZOt/zmnvZEctg64/QZbSDJEVm9fLnnlSi74KibmRsO9/Qabi+t0vCRPg==} + '@vitest/snapshot@3.2.4': + resolution: {integrity: sha512-dEYtS7qQP2CjU27QBC5oUOxLE/v5eLkGqPE0ZKEIDGMs4vKWe7IjgLOeauHsR0D5YuuycGRO5oSRXnwnmA78fQ==} - '@vitest/spy@2.1.8': - resolution: {integrity: sha512-5swjf2q95gXeYPevtW0BLk6H8+bPlMb4Vw/9Em4hFxDcaOxS+e0LOX4yqNxoHzMR2akEB2xfpnWUzkZokmgWDg==} + '@vitest/spy@3.2.4': + resolution: {integrity: sha512-vAfasCOe6AIK70iP5UD11Ac4siNUNJ9i/9PZ3NKx07sG6sUxeag1LWdNrMWeKKYBLlzuK+Gn65Yd5nyL6ds+nw==} - '@vitest/utils@2.1.8': - resolution: {integrity: sha512-dwSoui6djdwbfFmIgbIjX2ZhIoG7Ex/+xpxyiEgIGzjliY8xGkcpITKTlp6B4MgtGkF2ilvm97cPM96XZaAgcA==} + '@vitest/utils@3.2.4': + resolution: {integrity: sha512-fB2V0JFrQSMsCo9HiSq3Ezpdv4iYaXRG1Sx8edX3MwxfyNn83mKiGzOcH+Fkxt4MHxr3y42fQi1oeAInqgX2QA==} JSONStream@1.3.5: resolution: {integrity: sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==} @@ -1295,15 +1243,11 @@ packages: engines: {node: '>=0.4.0'} hasBin: true - ai@4.3.16: - resolution: {integrity: sha512-KUDwlThJ5tr2Vw0A1ZkbDKNME3wzWhuVfAOwIvFUzl1TPVDFAXDFTXio3p+jaKneB+dKNCvFFlolYmmgHttG1g==} + ai@6.0.48: + resolution: {integrity: sha512-nON0rHNTEQgzT1HahGl7AeszJh7es5ibZ6LWqm3IiN+bUAQtXkdArXD9vDlVAPBiCd7ERclZ6lUpOE6ltgJb3w==} engines: {node: '>=18'} peerDependencies: - react: ^18 || ^19 || ^19.0.0-rc - zod: ^3.23.8 - peerDependenciesMeta: - react: - optional: true + zod: ^3.25.76 || ^4.1.8 ajv@6.12.6: resolution: {integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==} @@ -1483,18 +1427,14 @@ packages: resolution: {integrity: sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==} engines: {node: '>=6'} - chai@5.1.2: - resolution: {integrity: sha512-aGtmf24DW6MLHHG5gCx4zaI3uBq3KRtxeVs0DjFH6Z0rDNbsvTxFASFvdj79pxjxZ8/5u3PIiN3IwEIQkiiuPw==} - engines: {node: '>=12'} + chai@5.3.3: + resolution: {integrity: sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw==} + engines: {node: '>=18'} chalk@4.1.2: resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} engines: {node: '>=10'} - chalk@5.4.1: - resolution: {integrity: sha512-zgVZuo2WcZgfUEmsn6eO3kINexW8RAE4maiQ8QNs8CtpPCSyMiYsULR3HQYkm3w8FIA3SberyMJMSldGsW+U3w==} - engines: {node: ^12.17.0 || ^14.13 || >=16.0.0} - chardet@0.7.0: resolution: {integrity: sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==} @@ -1569,9 +1509,9 @@ packages: engines: {node: '>=14'} hasBin: true - cookie@0.7.2: - resolution: {integrity: sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==} - engines: {node: '>= 0.6'} + cookie@1.1.1: + resolution: {integrity: sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ==} + engines: {node: '>=18'} cosmiconfig-typescript-loader@4.4.0: resolution: {integrity: sha512-BabizFdC3wBHhbI4kJh0VkQP9GkBfoHPydD0COMce1nJ1kJAB3F2TmJ/I7diULBKtmEWSwEbuN/KDtgnmUUVmw==} @@ -1658,6 +1598,15 @@ packages: supports-color: optional: true + debug@4.4.3: + resolution: {integrity: sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + decamelize-keys@1.1.1: resolution: {integrity: sha512-WiPxgEirIV0/eIOMcnFBA3/IJZAZqKnwAwWyvvdi4lsr1WCN22nhdf/3db3DoZcUjTV2SqfzIwNyp6y2xs3nmg==} engines: {node: '>=0.10.0'} @@ -1687,10 +1636,6 @@ packages: deprecation@2.3.1: resolution: {integrity: sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ==} - dequal@2.0.3: - resolution: {integrity: sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==} - engines: {node: '>=6'} - destr@2.0.3: resolution: {integrity: sha512-2N3BOUU4gYMpTP24s5rF5iP7BDr7uNTCs4ozw3kf/eKfvWSIu93GEBi5m427YoyJoeOzQ5smuu4nNAPGb8idSQ==} @@ -1703,9 +1648,6 @@ packages: engines: {node: '>=0.10'} hasBin: true - diff-match-patch@1.0.5: - resolution: {integrity: sha512-IayShXAgj/QMXgB0IWmKx+rOPuGMhqm5w6jvFxmVenXKIzRqTAAsbBPT3kWQeGANj3jGgvcvv4yK6SxqYmikgw==} - diff@4.0.2: resolution: {integrity: sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==} engines: {node: '>=0.3.1'} @@ -1782,8 +1724,8 @@ packages: resolution: {integrity: sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==} engines: {node: '>= 0.4'} - es-module-lexer@1.6.0: - resolution: {integrity: sha512-qqnD1yMU6tk/jnaMosogGySTZP8YtUgAffA9nMN+E/rjxcfRQ6IEk7IiozUjgxKoFHBGjTLnrHB/YC45r/59EQ==} + es-module-lexer@1.7.0: + resolution: {integrity: sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==} es-object-atoms@1.0.0: resolution: {integrity: sha512-MZ4iQ6JwHOBQjahnjwaC1ZtIBH+2ohjamzAO3oaHcXYup7qxjF2fixyH+Q71voWHeOkI2q/TnJao/KfXYIZWbw==} @@ -1926,12 +1868,16 @@ packages: resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==} engines: {node: '>=0.10.0'} + eventsource-parser@3.0.6: + resolution: {integrity: sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg==} + engines: {node: '>=18.0.0'} + execa@5.1.1: resolution: {integrity: sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==} engines: {node: '>=10'} - expect-type@1.1.0: - resolution: {integrity: sha512-bFi65yM+xZgk+u/KRIpekdSYkTB5W1pEf0Lt8Q8Msh7b+eQ7LXVtIB1Bkm4fvclDEL1b2CZkMhv2mOeF8tMdkA==} + expect-type@1.3.0: + resolution: {integrity: sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==} engines: {node: '>=12.0.0'} extendable-error@0.1.7: @@ -1960,8 +1906,9 @@ packages: fastq@1.17.1: resolution: {integrity: sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==} - fdir@6.4.3: - resolution: {integrity: sha512-PMXmW2y1hDDfTSRc9gaXIuCCRpuoz3Kaz8cUelp3smouvfT632ozg2vrT6lJsHKKOF59YLbOGfAWGUcKEfRMQw==} + fdir@6.5.0: + resolution: {integrity: sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==} + engines: {node: '>=12.0.0'} peerDependencies: picomatch: ^3 || ^4 peerDependenciesMeta: @@ -1984,6 +1931,9 @@ packages: resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} engines: {node: '>=10'} + fix-dts-default-cjs-exports@1.0.1: + resolution: {integrity: sha512-pVIECanWFC61Hzl2+oOCtoJ3F17kglZC/6N94eRWycFgBH35hHx0Li604ZIzhseh97mf2p0cv7vVrOZGoqhlEg==} + flat-cache@4.0.1: resolution: {integrity: sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==} engines: {node: '>=16'} @@ -2125,8 +2075,8 @@ packages: graphemer@1.4.0: resolution: {integrity: sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==} - graphql@16.10.0: - resolution: {integrity: sha512-AjqGKbDGUFRKIRCP9tCKiIGHyriz2oHEbPIbEtcSLSs4YjReZOIPQQWek4+6hjw62H9QShXHyaGivGiYVLeYFQ==} + graphql@16.12.0: + resolution: {integrity: sha512-DKKrynuQRne0PNpEbzuEdHlYOMksHSUI8Zc9Unei5gTsMNA2/vMpoMz/yKba50pejK56qj98qM0SjYxAKi13gQ==} engines: {node: ^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0} handlebars@4.7.8: @@ -2431,6 +2381,9 @@ packages: js-tokens@4.0.0: resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} + js-tokens@9.0.1: + resolution: {integrity: sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==} + js-yaml@3.14.1: resolution: {integrity: sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==} hasBin: true @@ -2466,11 +2419,6 @@ packages: resolution: {integrity: sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==} hasBin: true - jsondiffpatch@0.6.0: - resolution: {integrity: sha512-3QItJOXp2AP1uv7waBkao5nCvhEv+QmJAd38Ybq7wNI74Q+BBmnLn4EDKz6yI9xGAIQoUF87qHt+kc1IVxB4zQ==} - engines: {node: ^18.0.0 || >=20.0.0} - hasBin: true - jsonfile@4.0.0: resolution: {integrity: sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==} @@ -2622,12 +2570,8 @@ packages: lodash@4.17.21: resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==} - loose-envify@1.4.0: - resolution: {integrity: sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==} - hasBin: true - - loupe@3.1.3: - resolution: {integrity: sha512-kkIp7XSkP78ZxJEsSxW3712C6teJVoeHHwgo9zJ380de7IYyJ2ISlxojcH2pC5OFLewESmnRi/+XCDIEEVyoug==} + loupe@3.2.1: + resolution: {integrity: sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ==} lru-cache@10.4.3: resolution: {integrity: sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==} @@ -2722,8 +2666,8 @@ packages: ms@2.1.3: resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} - msw@2.7.1: - resolution: {integrity: sha512-TVT65uoWt9LE4lMTLBdClHBQVwvZv5ofac1YyE119nCrNyXf4ktdeVnWH9Fyt94Ifmiedhw6Npp4DSuVRSuRpw==} + msw@2.12.4: + resolution: {integrity: sha512-rHNiVfTyKhzc0EjoXUBVGteNKBevdjOlVC6GlIRXpy+/3LHEIGRovnB5WPjcvmNODVQ1TNFnoa7wsGbd0V3epg==} engines: {node: '>=18'} hasBin: true peerDependencies: @@ -2925,8 +2869,8 @@ packages: resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==} engines: {node: '>=8.6'} - picomatch@4.0.2: - resolution: {integrity: sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==} + picomatch@4.0.3: + resolution: {integrity: sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==} engines: {node: '>=12'} pify@4.0.1: @@ -2980,16 +2924,10 @@ packages: engines: {node: '>=14'} hasBin: true - psl@1.15.0: - resolution: {integrity: sha512-JZd3gMVBAVQkSs6HdNZo9Sdo0LNcQeMNP3CozBJb3JYC/QUYZTnKxP+f8oWRX4rHP5EurWxqAHTSwUCjlNKa1w==} - punycode@2.3.1: resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==} engines: {node: '>=6'} - querystringify@2.2.0: - resolution: {integrity: sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==} - queue-microtask@1.2.3: resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==} @@ -3000,10 +2938,6 @@ packages: rc9@2.1.2: resolution: {integrity: sha512-btXCnMmRIBINM2LDZoEmOogIZU7Qe7zn4BpomSKZ/ykbLObuBdvG+mFq11DL6fjH1DRwHhrlgtYWG96bJiC7Cg==} - react@18.3.1: - resolution: {integrity: sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==} - engines: {node: '>=0.10.0'} - read-pkg-up@7.0.1: resolution: {integrity: sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==} engines: {node: '>=8'} @@ -3051,9 +2985,6 @@ packages: resolution: {integrity: sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==} engines: {node: '>=0.10.0'} - requires-port@1.0.0: - resolution: {integrity: sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==} - resolve-from@4.0.0: resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==} engines: {node: '>=4'} @@ -3078,15 +3009,13 @@ packages: resolution: {integrity: sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==} hasBin: true + rettime@0.7.0: + resolution: {integrity: sha512-LPRKoHnLKd/r3dVxcwO7vhCW+orkOGj9ViueosEBK6ie89CijnfRlhaDhHq/3Hxu4CkWQtxwlBG0mzTQY6uQjw==} + reusify@1.0.4: resolution: {integrity: sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==} engines: {iojs: '>=1.0.0', node: '>=0.10.0'} - rollup@4.33.0: - resolution: {integrity: sha512-zdkoGsCeuVLKX8jaa3D969EaRP5mvjWx+oZTYDAzvXuFmLlZlhuKKQcpqHcDI5Z6ox9ztInGOJTiJKMwo8kGpw==} - engines: {node: '>=18.0.0', npm: '>=8.0.0'} - hasBin: true - rollup@4.35.0: resolution: {integrity: sha512-kg6oI4g+vc41vePJyO6dHt/yl0Rz3Thv0kJeVQ3D1kS3E5XSuKbPc29G4IpT/Kv1KQwgHVcN+HtyS+HYLNSvQg==} engines: {node: '>=18.0.0', npm: '>=8.0.0'} @@ -3121,9 +3050,6 @@ packages: safer-buffer@2.1.2: resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} - secure-json-parse@2.7.0: - resolution: {integrity: sha512-6aU+Rwsezw7VR8/nyvKTx8QpWH9FrcYiXXlqC4z5d5XQBDRqtbfsRjnwGyqbi3gddNtWHuEk9OANUotL26qKUw==} - semver@5.7.2: resolution: {integrity: sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==} hasBin: true @@ -3203,6 +3129,7 @@ packages: source-map@0.8.0-beta.0: resolution: {integrity: sha512-2ymg6oRBpebeZi9UUNsgQ89bhx01TcTkmNTGnNO88imTmbSgy4nfujrgVEFKWpMTEGA11EDkTt7mqObTPdigIA==} engines: {node: '>= 8'} + deprecated: The work that was done in this beta branch won't be included in future versions spawndamnit@3.0.1: resolution: {integrity: sha512-MmnduQUuHCoFckZoWnXsTg7JaiLBJrKFj9UI2MbRPGaJeVpsLcVBu6P/IGZovziM/YBsellCmsprgNA+w0CzVg==} @@ -3228,12 +3155,12 @@ packages: stackback@0.0.2: resolution: {integrity: sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==} - statuses@2.0.1: - resolution: {integrity: sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==} + statuses@2.0.2: + resolution: {integrity: sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==} engines: {node: '>= 0.8'} - std-env@3.8.0: - resolution: {integrity: sha512-Bc3YwwCB+OzldMxOXJIIvC6cPRWr/LxOp48CdQTOkPyk/t4JWWJbrilwBd7RJzKV8QW7tJkcgAmeuLLJugl5/w==} + std-env@3.10.0: + resolution: {integrity: sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==} strict-event-emitter@0.5.1: resolution: {integrity: sha512-vMgjE/GGEPEFnhFub6pa4FmJBRBVOLpIII2hvCZ8Kzb7K0hlHo7mQv6xYrBvCL2LtAIBwFUK8wvuJgTVSQ5MFQ==} @@ -3296,6 +3223,9 @@ packages: resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} engines: {node: '>=8'} + strip-literal@3.1.0: + resolution: {integrity: sha512-8r3mkIM/2+PpjHoOtiAW8Rg3jJLHaV7xPwG+YRGrv6FP0wwk/toTpATxWYOW0BKdWwl82VT2tFYi5DlROa0Mxg==} + sucrase@3.35.0: resolution: {integrity: sha512-8EbVDiu9iN/nESwxeSxDKe0dunta1GOlHufmSSXxMD2z2/tMZpDMpvXQGsc+ajGo8y2uYUmixaSRUc/QPoQ0GA==} engines: {node: '>=16 || 14 >=14.17'} @@ -3309,10 +3239,9 @@ packages: resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} engines: {node: '>= 0.4'} - swr@2.3.3: - resolution: {integrity: sha512-dshNvs3ExOqtZ6kJBaAsabhPdHyeY4P2cKwRCniDVifBMoG/SVI7tfLWqPXriVspf2Rg4tPzXJTnwaihIeFw2A==} - peerDependencies: - react: ^16.11.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + tagged-tag@1.0.0: + resolution: {integrity: sha512-yEFYrVhod+hdNyx7g5Bnkkb0G6si8HJurOoOEgC8B/O0uXLHlaey/65KRv6cuWBNhBgHKAROVpc7QyYqE5gFng==} + engines: {node: '>=20'} tar@6.2.1: resolution: {integrity: sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==} @@ -3333,10 +3262,6 @@ packages: thenify@3.3.1: resolution: {integrity: sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==} - throttleit@2.1.0: - resolution: {integrity: sha512-nt6AMGKW1p/70DF/hGBdJB57B8Tspmbp5gfJ8ilhLnt7kkr2ye7hzD6NVG8GGErk2HWF34igrL2CXmNIkzKqKw==} - engines: {node: '>=18'} - through2@4.0.2: resolution: {integrity: sha512-iOqSav00cVxEEICeD7TjLB1sueEL+81Wpzp2bY17uZjZN0pWZPuo4suZ/61VujxmqSGFfgOcNuTZ85QJwNZQpw==} @@ -3349,22 +3274,29 @@ packages: tinyexec@0.3.2: resolution: {integrity: sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==} - tinyglobby@0.2.12: - resolution: {integrity: sha512-qkf4trmKSIiMTs/E63cxH+ojC2unam7rJ0WrauAzpT3ECNTxGRMlaXxVbfxMUC/w0LaYk6jQ4y/nGR9uBO3tww==} + tinyglobby@0.2.15: + resolution: {integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==} engines: {node: '>=12.0.0'} - tinypool@1.0.2: - resolution: {integrity: sha512-al6n+QEANGFOMf/dmUMsuS5/r9B06uwlyNjZZql/zv8J7ybHCgoihBNORZCY2mzUuAnomQa2JdhyHKzZxPCrFA==} + tinypool@1.1.1: + resolution: {integrity: sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg==} engines: {node: ^18.0.0 || >=20.0.0} - tinyrainbow@1.2.0: - resolution: {integrity: sha512-weEDEq7Z5eTHPDh4xjX789+fHfF+P8boiFB+0vbWzpbnbsEr/GRaohi/uMKxg8RZMXnl1ItAi/IUHWMsjDV7kQ==} + tinyrainbow@2.0.0: + resolution: {integrity: sha512-op4nsTR47R6p0vMUUoYl/a+ljLFVtlfaXkLQmqfLR1qHma1h/ysYk4hEXZ880bf2CYgTskvTa/e196Vd5dDQXw==} engines: {node: '>=14.0.0'} - tinyspy@3.0.2: - resolution: {integrity: sha512-n1cw8k1k0x4pgA2+9XrOkFydTerNcJ1zWCO5Nn9scWHTD+5tp8dghT2x1uduQePZTZgd3Tupf+x9BxJjeJi77Q==} + tinyspy@4.0.4: + resolution: {integrity: sha512-azl+t0z7pw/z958Gy9svOTuzqIk6xq+NSheJzn5MMWtWTFywIacg2wUlzKFGtt3cthx0r2SxMK0yzJOR0IES7Q==} engines: {node: '>=14.0.0'} + tldts-core@7.0.19: + resolution: {integrity: sha512-lJX2dEWx0SGH4O6p+7FPwYmJ/bu1JbcGJ8RLaG9b7liIgZ85itUVEPbMtWRVrde/0fnDPEPHW10ZsKW3kVsE9A==} + + tldts@7.0.19: + resolution: {integrity: sha512-8PWx8tvC4jDB39BQw1m4x8y5MH1BcQ5xHeL2n7UVFulMPH/3Q0uiamahFJ3lXA0zO2SUyRXuVVbWSDmstlt9YA==} + hasBin: true + tmp@0.0.33: resolution: {integrity: sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==} engines: {node: '>=0.6.0'} @@ -3373,9 +3305,9 @@ packages: resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} engines: {node: '>=8.0'} - tough-cookie@4.1.4: - resolution: {integrity: sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag==} - engines: {node: '>=6'} + tough-cookie@6.0.0: + resolution: {integrity: sha512-kXuRi1mtaKMrsLUxz3sQYvVl37B0Ns6MzfrtV5DvJceE9bPyspOqk9xxv7XbZWcfLWbFmm997vl83qUWVJA64w==} + engines: {node: '>=16'} tr46@0.0.3: resolution: {integrity: sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==} @@ -3427,8 +3359,8 @@ packages: tsconfig-paths@3.15.0: resolution: {integrity: sha512-2Ac2RgzDe/cn48GvOe3M+o82pEFewD3UPbyoUHHdKasHwJKjds4fLXWf/Ux5kATBKN20oaFGu+jbElp1pos0mg==} - tsup@8.4.0: - resolution: {integrity: sha512-b+eZbPCjz10fRryaAA7C8xlIHnf8VnsaRqydheLIqwG/Mcpfk8Z5zp3HayX7GaTygkigHl5cBUs+IhcySiIexQ==} + tsup@8.5.0: + resolution: {integrity: sha512-VmBp77lWNQq6PfuMqCHD3xWl22vEoWsKajkF8t+yMBawlUS8JzEI+vOVMeuNZIuMML8qXRizFKi9oD5glKQVcQ==} engines: {node: '>=18'} hasBin: true peerDependencies: @@ -3513,6 +3445,10 @@ packages: resolution: {integrity: sha512-S/5/0kFftkq27FPNye0XM1e2NsnoD/3FS+pBmbjmmtLT6I+i344KoOf7pvXreaFsDamWeaJX55nczA1m5PsBDg==} engines: {node: '>=16'} + type-fest@5.4.1: + resolution: {integrity: sha512-xygQcmneDyzsEuKZrFbRMne5HDqMs++aFzefrJTgEIKjQ3rekM+RPfFCVq2Gp1VIDqddoYeppCj4Pcb+RZW0GQ==} + engines: {node: '>=20'} + typed-array-buffer@1.0.2: resolution: {integrity: sha512-gEymJYKZtKXzzBzM4jqa9w6Q1Jjm7x2d+sh19AdsD4wqnMPDYyvwpsIc2Q/835kHuo3BEQ7CjelGhfTsoBb2MQ==} engines: {node: '>= 0.4'} @@ -3552,13 +3488,8 @@ packages: eslint: ^8.57.0 || ^9.0.0 typescript: '>=4.8.4 <5.9.0' - typescript@5.6.3: - resolution: {integrity: sha512-hjcS1mhfuyi4WW8IWtjP7brDrG2cuDZukyrYrSauoXGNgx0S7zceP07adYkJycEr56BOUTNPzbInooiN3fn1qw==} - engines: {node: '>=14.17'} - hasBin: true - - typescript@5.7.3: - resolution: {integrity: sha512-84MVSjMEHP+FQRPy3pX9sTVV/INIex71s9TL2Gm5FG/WG1SqXeKyZ0k7/blY/4FdOzI12CBy1vGc4og/eus0fw==} + typescript@5.9.3: + resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==} engines: {node: '>=14.17'} hasBin: true @@ -3591,25 +3522,16 @@ packages: resolution: {integrity: sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==} engines: {node: '>= 4.0.0'} - universalify@0.2.0: - resolution: {integrity: sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==} - engines: {node: '>= 4.0.0'} - universalify@2.0.1: resolution: {integrity: sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==} engines: {node: '>= 10.0.0'} + until-async@3.0.2: + resolution: {integrity: sha512-IiSk4HlzAMqTUseHHe3VhIGyuFmN90zMTpD3Z3y8jeQbzLIq500MVM7Jq2vUAnTKAFPJrqwkzr6PoTcPhGcOiw==} + uri-js@4.4.1: resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} - url-parse@1.5.10: - resolution: {integrity: sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==} - - use-sync-external-store@1.4.0: - resolution: {integrity: sha512-9WXSPC5fMv61vaupRkCKCxsPxBocVnwakBEkMIHHpkTTg6icbJtg6jzgtLDm4bl3cSHAca52rYWih0k4K3PfHw==} - peerDependencies: - react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 - util-deprecate@1.0.2: resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} @@ -3623,9 +3545,9 @@ packages: validate-npm-package-license@3.0.4: resolution: {integrity: sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==} - vite-node@2.1.8: - resolution: {integrity: sha512-uPAwSr57kYjAUux+8E2j0q0Fxpn8M9VoyfGiRI8Kfktz9NcYMCenwY5RnZxnF1WTu3TGiYipirIzacLL3VVGFg==} - engines: {node: ^18.0.0 || >=20.0.0} + vite-node@3.2.4: + resolution: {integrity: sha512-EbKSKh+bh1E1IFxeO0pg1n4dvoOTt0UDiXMd/qn++r98+jPO1xtJilvXldeuQ8giIB5IkpjCgMleHMNEsGH6pg==} + engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0} hasBin: true vite-tsconfig-paths@5.1.4: @@ -3667,20 +3589,23 @@ packages: terser: optional: true - vitest@2.1.8: - resolution: {integrity: sha512-1vBKTZskHw/aosXqQUlVWWlGUxSJR8YtiyZDJAFeW2kPAeX6S3Sool0mjspO+kXLuxVWlEDDowBAeqeAQefqLQ==} - engines: {node: ^18.0.0 || >=20.0.0} + vitest@3.2.4: + resolution: {integrity: sha512-LUCP5ev3GURDysTWiP47wRRUpLKMOfPh+yKTx3kVIEiu5KOMeqzpnYNsKyOoVrULivR8tLcks4+lga33Whn90A==} + engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0} hasBin: true peerDependencies: '@edge-runtime/vm': '*' - '@types/node': ^18.0.0 || >=20.0.0 - '@vitest/browser': 2.1.8 - '@vitest/ui': 2.1.8 + '@types/debug': ^4.1.12 + '@types/node': ^18.0.0 || ^20.0.0 || >=22.0.0 + '@vitest/browser': 3.2.4 + '@vitest/ui': 3.2.4 happy-dom: '*' jsdom: '*' peerDependenciesMeta: '@edge-runtime/vm': optional: true + '@types/debug': + optional: true '@types/node': optional: true '@vitest/browser': @@ -3795,13 +3720,8 @@ packages: resolution: {integrity: sha512-cYVsTjKl8b+FrnidjibDWskAv7UKOfcwaVZdp/it9n1s9fU3IkgDbhdIRKCW4JDsAlECJY0ytoVPT3sK6kideA==} engines: {node: '>=18'} - zod-to-json-schema@3.24.4: - resolution: {integrity: sha512-0uNlcvgabyrni9Ag8Vghj21drk7+7tp7VTwwR7KxxXXc/3pbXz2PHlDgj3cICahgF1kHm4dExBFj7BXrZJXzig==} - peerDependencies: - zod: ^3.24.1 - - zod@3.24.2: - resolution: {integrity: sha512-lY7CDW43ECgW9u1TcT3IoXHflywfVqDYze4waEz812jR/bZ8FHDsl7pFQoSZTz5N+2NqRXs8GBwnAwo3ZNxqhQ==} + zod@4.3.6: + resolution: {integrity: sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==} snapshots: @@ -3824,33 +3744,23 @@ snapshots: tunnel: 0.0.6 undici: 5.28.5 - '@ai-sdk/provider-utils@2.2.8(zod@3.24.2)': - dependencies: - '@ai-sdk/provider': 1.1.3 - nanoid: 3.3.8 - secure-json-parse: 2.7.0 - zod: 3.24.2 - - '@ai-sdk/provider@1.1.3': + '@ai-sdk/gateway@3.0.22(zod@4.3.6)': dependencies: - json-schema: 0.4.0 + '@ai-sdk/provider': 3.0.5 + '@ai-sdk/provider-utils': 4.0.9(zod@4.3.6) + '@vercel/oidc': 3.1.0 + zod: 4.3.6 - '@ai-sdk/react@1.2.12(react@18.3.1)(zod@3.24.2)': + '@ai-sdk/provider-utils@4.0.9(zod@4.3.6)': dependencies: - '@ai-sdk/provider-utils': 2.2.8(zod@3.24.2) - '@ai-sdk/ui-utils': 1.2.11(zod@3.24.2) - react: 18.3.1 - swr: 2.3.3(react@18.3.1) - throttleit: 2.1.0 - optionalDependencies: - zod: 3.24.2 + '@ai-sdk/provider': 3.0.5 + '@standard-schema/spec': 1.1.0 + eventsource-parser: 3.0.6 + zod: 4.3.6 - '@ai-sdk/ui-utils@1.2.11(zod@3.24.2)': + '@ai-sdk/provider@3.0.5': dependencies: - '@ai-sdk/provider': 1.1.3 - '@ai-sdk/provider-utils': 2.2.8(zod@3.24.2) - zod: 3.24.2 - zod-to-json-schema: 3.24.4(zod@3.24.2) + json-schema: 0.4.0 '@babel/code-frame@7.26.2': dependencies: @@ -3905,22 +3815,6 @@ snapshots: '@babel/helper-string-parser': 7.25.9 '@babel/helper-validator-identifier': 7.25.9 - '@bundled-es-modules/cookie@2.0.1': - dependencies: - cookie: 0.7.2 - optional: true - - '@bundled-es-modules/statuses@1.0.1': - dependencies: - statuses: 2.0.1 - optional: true - - '@bundled-es-modules/tough-cookie@0.1.6': - dependencies: - '@types/tough-cookie': 4.0.5 - tough-cookie: 4.1.4 - optional: true - '@changesets/apply-release-plan@7.0.8': dependencies: '@changesets/config': 3.0.5 @@ -4141,14 +4035,14 @@ snapshots: '@commitlint/types': 17.8.1 '@types/node': 20.5.1 chalk: 4.1.2 - cosmiconfig: 8.3.6(typescript@5.6.3) - cosmiconfig-typescript-loader: 4.4.0(@types/node@20.5.1)(cosmiconfig@8.3.6(typescript@5.7.3))(ts-node@10.9.2(@types/node@22.13.10)(typescript@5.7.3))(typescript@5.6.3) + cosmiconfig: 8.3.6(typescript@5.9.3) + cosmiconfig-typescript-loader: 4.4.0(@types/node@20.5.1)(cosmiconfig@8.3.6(typescript@5.9.3))(ts-node@10.9.2(@types/node@20.5.1)(typescript@5.9.3))(typescript@5.9.3) lodash.isplainobject: 4.0.6 lodash.merge: 4.6.2 lodash.uniq: 4.5.0 resolve-from: 5.0.0 - ts-node: 10.9.2(@types/node@20.5.1)(typescript@5.6.3) - typescript: 5.6.3 + ts-node: 10.9.2(@types/node@20.5.1)(typescript@5.9.3) + typescript: 5.9.3 transitivePeerDependencies: - '@swc/core' - '@swc/wasm' @@ -4406,13 +4300,13 @@ snapshots: '@types/json-schema': 7.0.15 js-yaml: 4.1.0 - '@hey-api/openapi-ts@0.64.11(typescript@5.7.3)': + '@hey-api/openapi-ts@0.64.11(typescript@5.9.3)': dependencies: '@hey-api/json-schema-ref-parser': 1.0.2 c12: 2.0.1 commander: 13.0.0 handlebars: 4.7.8 - typescript: 5.7.3 + typescript: 5.9.3 transitivePeerDependencies: - magicast @@ -4446,7 +4340,6 @@ snapshots: '@inquirer/type': 3.0.4(@types/node@22.13.10) optionalDependencies: '@types/node': 22.13.10 - optional: true '@inquirer/core@10.1.7(@types/node@22.13.10)': dependencies: @@ -4460,15 +4353,12 @@ snapshots: yoctocolors-cjs: 2.1.2 optionalDependencies: '@types/node': 22.13.10 - optional: true - '@inquirer/figures@1.0.10': - optional: true + '@inquirer/figures@1.0.10': {} '@inquirer/type@3.0.4(@types/node@22.13.10)': optionalDependencies: '@types/node': 22.13.10 - optional: true '@isaacs/cliui@8.0.2': dependencies: @@ -4521,7 +4411,7 @@ snapshots: globby: 11.1.0 read-yaml-file: 1.1.0 - '@mswjs/interceptors@0.37.6': + '@mswjs/interceptors@0.40.0': dependencies: '@open-draft/deferred-promise': 2.2.0 '@open-draft/logger': 0.3.0 @@ -4529,7 +4419,6 @@ snapshots: is-node-process: 1.2.0 outvariant: 1.4.3 strict-event-emitter: 0.5.1 - optional: true '@nodelib/fs.scandir@2.1.5': dependencies: @@ -4607,139 +4496,81 @@ snapshots: dependencies: '@octokit/openapi-types': 12.11.0 - '@open-draft/deferred-promise@2.2.0': - optional: true + '@open-draft/deferred-promise@2.2.0': {} '@open-draft/logger@0.3.0': dependencies: is-node-process: 1.2.0 outvariant: 1.4.3 - optional: true - '@open-draft/until@2.1.0': - optional: true + '@open-draft/until@2.1.0': {} '@opentelemetry/api@1.9.0': {} '@pkgjs/parseargs@0.11.0': optional: true - '@rollup/rollup-android-arm-eabi@4.33.0': - optional: true - '@rollup/rollup-android-arm-eabi@4.35.0': optional: true - '@rollup/rollup-android-arm64@4.33.0': - optional: true - '@rollup/rollup-android-arm64@4.35.0': optional: true - '@rollup/rollup-darwin-arm64@4.33.0': - optional: true - '@rollup/rollup-darwin-arm64@4.35.0': optional: true - '@rollup/rollup-darwin-x64@4.33.0': - optional: true - '@rollup/rollup-darwin-x64@4.35.0': optional: true - '@rollup/rollup-freebsd-arm64@4.33.0': - optional: true - '@rollup/rollup-freebsd-arm64@4.35.0': optional: true - '@rollup/rollup-freebsd-x64@4.33.0': - optional: true - '@rollup/rollup-freebsd-x64@4.35.0': optional: true - '@rollup/rollup-linux-arm-gnueabihf@4.33.0': - optional: true - '@rollup/rollup-linux-arm-gnueabihf@4.35.0': optional: true - '@rollup/rollup-linux-arm-musleabihf@4.33.0': - optional: true - '@rollup/rollup-linux-arm-musleabihf@4.35.0': optional: true - '@rollup/rollup-linux-arm64-gnu@4.33.0': - optional: true - '@rollup/rollup-linux-arm64-gnu@4.35.0': optional: true - '@rollup/rollup-linux-arm64-musl@4.33.0': - optional: true - '@rollup/rollup-linux-arm64-musl@4.35.0': optional: true - '@rollup/rollup-linux-loongarch64-gnu@4.33.0': - optional: true - '@rollup/rollup-linux-loongarch64-gnu@4.35.0': optional: true - '@rollup/rollup-linux-powerpc64le-gnu@4.33.0': - optional: true - '@rollup/rollup-linux-powerpc64le-gnu@4.35.0': optional: true - '@rollup/rollup-linux-riscv64-gnu@4.33.0': - optional: true - '@rollup/rollup-linux-riscv64-gnu@4.35.0': optional: true - '@rollup/rollup-linux-s390x-gnu@4.33.0': - optional: true - '@rollup/rollup-linux-s390x-gnu@4.35.0': optional: true - '@rollup/rollup-linux-x64-gnu@4.33.0': - optional: true - '@rollup/rollup-linux-x64-gnu@4.35.0': optional: true - '@rollup/rollup-linux-x64-musl@4.33.0': - optional: true - '@rollup/rollup-linux-x64-musl@4.35.0': optional: true - '@rollup/rollup-win32-arm64-msvc@4.33.0': - optional: true - '@rollup/rollup-win32-arm64-msvc@4.35.0': optional: true - '@rollup/rollup-win32-ia32-msvc@4.33.0': - optional: true - '@rollup/rollup-win32-ia32-msvc@4.35.0': optional: true - '@rollup/rollup-win32-x64-msvc@4.33.0': - optional: true - '@rollup/rollup-win32-x64-msvc@4.35.0': optional: true '@rtsao/scc@1.1.0': {} + '@standard-schema/spec@1.1.0': {} + '@tsconfig/node10@1.0.11': {} '@tsconfig/node12@1.0.11': {} @@ -4748,10 +4579,12 @@ snapshots: '@tsconfig/node16@1.0.4': {} - '@types/cookie@0.6.0': - optional: true + '@types/chai@5.2.3': + dependencies: + '@types/deep-eql': 4.0.2 + assertion-error: 2.0.1 - '@types/diff-match-patch@1.0.36': {} + '@types/deep-eql@4.0.2': {} '@types/eslint@9.6.1': dependencies: @@ -4780,38 +4613,34 @@ snapshots: '@types/normalize-package-data@2.4.4': {} - '@types/statuses@2.0.5': - optional: true + '@types/statuses@2.0.6': {} - '@types/tough-cookie@4.0.5': - optional: true - - '@typescript-eslint/eslint-plugin@8.26.1(@typescript-eslint/parser@8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3))(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3)': + '@typescript-eslint/eslint-plugin@8.26.1(@typescript-eslint/parser@8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.9.3))(eslint@9.19.0(jiti@2.4.2))(typescript@5.9.3)': dependencies: '@eslint-community/regexpp': 4.12.1 - '@typescript-eslint/parser': 8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3) + '@typescript-eslint/parser': 8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.9.3) '@typescript-eslint/scope-manager': 8.26.1 - '@typescript-eslint/type-utils': 8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3) - '@typescript-eslint/utils': 8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3) + '@typescript-eslint/type-utils': 8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.9.3) + '@typescript-eslint/utils': 8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.9.3) '@typescript-eslint/visitor-keys': 8.26.1 eslint: 9.19.0(jiti@2.4.2) graphemer: 1.4.0 ignore: 5.3.1 natural-compare: 1.4.0 - ts-api-utils: 2.0.1(typescript@5.7.3) - typescript: 5.7.3 + ts-api-utils: 2.0.1(typescript@5.9.3) + typescript: 5.9.3 transitivePeerDependencies: - supports-color - '@typescript-eslint/parser@8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3)': + '@typescript-eslint/parser@8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.9.3)': dependencies: '@typescript-eslint/scope-manager': 8.26.1 '@typescript-eslint/types': 8.26.1 - '@typescript-eslint/typescript-estree': 8.26.1(typescript@5.7.3) + '@typescript-eslint/typescript-estree': 8.26.1(typescript@5.9.3) '@typescript-eslint/visitor-keys': 8.26.1 debug: 4.4.0 eslint: 9.19.0(jiti@2.4.2) - typescript: 5.7.3 + typescript: 5.9.3 transitivePeerDependencies: - supports-color @@ -4820,20 +4649,20 @@ snapshots: '@typescript-eslint/types': 8.26.1 '@typescript-eslint/visitor-keys': 8.26.1 - '@typescript-eslint/type-utils@8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3)': + '@typescript-eslint/type-utils@8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.9.3)': dependencies: - '@typescript-eslint/typescript-estree': 8.26.1(typescript@5.7.3) - '@typescript-eslint/utils': 8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3) + '@typescript-eslint/typescript-estree': 8.26.1(typescript@5.9.3) + '@typescript-eslint/utils': 8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.9.3) debug: 4.4.0 eslint: 9.19.0(jiti@2.4.2) - ts-api-utils: 2.0.1(typescript@5.7.3) - typescript: 5.7.3 + ts-api-utils: 2.0.1(typescript@5.9.3) + typescript: 5.9.3 transitivePeerDependencies: - supports-color '@typescript-eslint/types@8.26.1': {} - '@typescript-eslint/typescript-estree@8.26.1(typescript@5.7.3)': + '@typescript-eslint/typescript-estree@8.26.1(typescript@5.9.3)': dependencies: '@typescript-eslint/types': 8.26.1 '@typescript-eslint/visitor-keys': 8.26.1 @@ -4842,19 +4671,19 @@ snapshots: is-glob: 4.0.3 minimatch: 9.0.5 semver: 7.7.0 - ts-api-utils: 2.0.1(typescript@5.7.3) - typescript: 5.7.3 + ts-api-utils: 2.0.1(typescript@5.9.3) + typescript: 5.9.3 transitivePeerDependencies: - supports-color - '@typescript-eslint/utils@8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3)': + '@typescript-eslint/utils@8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.9.3)': dependencies: '@eslint-community/eslint-utils': 4.4.0(eslint@9.19.0(jiti@2.4.2)) '@typescript-eslint/scope-manager': 8.26.1 '@typescript-eslint/types': 8.26.1 - '@typescript-eslint/typescript-estree': 8.26.1(typescript@5.7.3) + '@typescript-eslint/typescript-estree': 8.26.1(typescript@5.9.3) eslint: 9.19.0(jiti@2.4.2) - typescript: 5.7.3 + typescript: 5.9.3 transitivePeerDependencies: - supports-color @@ -4863,46 +4692,50 @@ snapshots: '@typescript-eslint/types': 8.26.1 eslint-visitor-keys: 4.2.0 - '@vitest/expect@2.1.8': + '@vercel/oidc@3.1.0': {} + + '@vitest/expect@3.2.4': dependencies: - '@vitest/spy': 2.1.8 - '@vitest/utils': 2.1.8 - chai: 5.1.2 - tinyrainbow: 1.2.0 + '@types/chai': 5.2.3 + '@vitest/spy': 3.2.4 + '@vitest/utils': 3.2.4 + chai: 5.3.3 + tinyrainbow: 2.0.0 - '@vitest/mocker@2.1.8(msw@2.7.1(@types/node@22.13.10)(typescript@5.7.3))(vite@5.4.14(@types/node@22.13.10)(lightningcss@1.29.1))': + '@vitest/mocker@3.2.4(msw@2.12.4(@types/node@22.13.10)(typescript@5.9.3))(vite@5.4.14(@types/node@22.13.10)(lightningcss@1.29.1))': dependencies: - '@vitest/spy': 2.1.8 + '@vitest/spy': 3.2.4 estree-walker: 3.0.3 magic-string: 0.30.17 optionalDependencies: - msw: 2.7.1(@types/node@22.13.10)(typescript@5.7.3) + msw: 2.12.4(@types/node@22.13.10)(typescript@5.9.3) vite: 5.4.14(@types/node@22.13.10)(lightningcss@1.29.1) - '@vitest/pretty-format@2.1.8': + '@vitest/pretty-format@3.2.4': dependencies: - tinyrainbow: 1.2.0 + tinyrainbow: 2.0.0 - '@vitest/runner@2.1.8': + '@vitest/runner@3.2.4': dependencies: - '@vitest/utils': 2.1.8 - pathe: 1.1.2 + '@vitest/utils': 3.2.4 + pathe: 2.0.3 + strip-literal: 3.1.0 - '@vitest/snapshot@2.1.8': + '@vitest/snapshot@3.2.4': dependencies: - '@vitest/pretty-format': 2.1.8 + '@vitest/pretty-format': 3.2.4 magic-string: 0.30.17 - pathe: 1.1.2 + pathe: 2.0.3 - '@vitest/spy@2.1.8': + '@vitest/spy@3.2.4': dependencies: - tinyspy: 3.0.2 + tinyspy: 4.0.4 - '@vitest/utils@2.1.8': + '@vitest/utils@3.2.4': dependencies: - '@vitest/pretty-format': 2.1.8 - loupe: 3.1.3 - tinyrainbow: 1.2.0 + '@vitest/pretty-format': 3.2.4 + loupe: 3.2.1 + tinyrainbow: 2.0.0 JSONStream@1.3.5: dependencies: @@ -4919,17 +4752,13 @@ snapshots: acorn@8.14.0: {} - ai@4.3.16(react@18.3.1)(zod@3.24.2): + ai@6.0.48(zod@4.3.6): dependencies: - '@ai-sdk/provider': 1.1.3 - '@ai-sdk/provider-utils': 2.2.8(zod@3.24.2) - '@ai-sdk/react': 1.2.12(react@18.3.1)(zod@3.24.2) - '@ai-sdk/ui-utils': 1.2.11(zod@3.24.2) + '@ai-sdk/gateway': 3.0.22(zod@4.3.6) + '@ai-sdk/provider': 3.0.5 + '@ai-sdk/provider-utils': 4.0.9(zod@4.3.6) '@opentelemetry/api': 1.9.0 - jsondiffpatch: 0.6.0 - zod: 3.24.2 - optionalDependencies: - react: 18.3.1 + zod: 4.3.6 ajv@6.12.6: dependencies: @@ -4950,7 +4779,6 @@ snapshots: ansi-escapes@4.3.2: dependencies: type-fest: 0.21.3 - optional: true ansi-regex@5.0.1: {} @@ -5135,12 +4963,12 @@ snapshots: camelcase@5.3.1: {} - chai@5.1.2: + chai@5.3.3: dependencies: assertion-error: 2.0.1 check-error: 2.1.1 deep-eql: 5.0.2 - loupe: 3.1.3 + loupe: 3.2.1 pathval: 2.0.0 chalk@4.1.2: @@ -5148,8 +4976,6 @@ snapshots: ansi-styles: 4.3.0 supports-color: 7.2.0 - chalk@5.4.1: {} - chardet@0.7.0: {} check-error@2.1.1: {} @@ -5166,8 +4992,7 @@ snapshots: dependencies: consola: 3.4.0 - cli-width@4.1.0: - optional: true + cli-width@4.1.0: {} cliui@8.0.1: dependencies: @@ -5213,24 +5038,23 @@ snapshots: meow: 8.1.2 split2: 3.2.2 - cookie@0.7.2: - optional: true + cookie@1.1.1: {} - cosmiconfig-typescript-loader@4.4.0(@types/node@20.5.1)(cosmiconfig@8.3.6(typescript@5.7.3))(ts-node@10.9.2(@types/node@22.13.10)(typescript@5.7.3))(typescript@5.6.3): + cosmiconfig-typescript-loader@4.4.0(@types/node@20.5.1)(cosmiconfig@8.3.6(typescript@5.9.3))(ts-node@10.9.2(@types/node@20.5.1)(typescript@5.9.3))(typescript@5.9.3): dependencies: '@types/node': 20.5.1 - cosmiconfig: 8.3.6(typescript@5.6.3) - ts-node: 10.9.2(@types/node@20.5.1)(typescript@5.6.3) - typescript: 5.6.3 + cosmiconfig: 8.3.6(typescript@5.9.3) + ts-node: 10.9.2(@types/node@20.5.1)(typescript@5.9.3) + typescript: 5.9.3 - cosmiconfig@8.3.6(typescript@5.6.3): + cosmiconfig@8.3.6(typescript@5.9.3): dependencies: import-fresh: 3.3.0 js-yaml: 4.1.0 parse-json: 5.2.0 path-type: 4.0.0 optionalDependencies: - typescript: 5.6.3 + typescript: 5.9.3 create-require@1.1.1: {} @@ -5294,6 +5118,10 @@ snapshots: dependencies: ms: 2.1.3 + debug@4.4.3: + dependencies: + ms: 2.1.3 + decamelize-keys@1.1.1: dependencies: decamelize: 1.2.0 @@ -5321,8 +5149,6 @@ snapshots: deprecation@2.3.1: {} - dequal@2.0.3: {} - destr@2.0.3: {} detect-indent@6.1.0: {} @@ -5330,8 +5156,6 @@ snapshots: detect-libc@1.0.3: optional: true - diff-match-patch@1.0.5: {} - diff@4.0.2: {} dir-glob@3.0.1: @@ -5493,7 +5317,7 @@ snapshots: es-errors@1.3.0: {} - es-module-lexer@1.6.0: {} + es-module-lexer@1.7.0: {} es-object-atoms@1.0.0: dependencies: @@ -5598,17 +5422,17 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-module-utils@2.12.0(@typescript-eslint/parser@8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3))(eslint-import-resolver-node@0.3.9)(eslint@9.19.0(jiti@2.4.2)): + eslint-module-utils@2.12.0(@typescript-eslint/parser@8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint@9.19.0(jiti@2.4.2)): dependencies: debug: 3.2.7 optionalDependencies: - '@typescript-eslint/parser': 8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3) + '@typescript-eslint/parser': 8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.9.3) eslint: 9.19.0(jiti@2.4.2) eslint-import-resolver-node: 0.3.9 transitivePeerDependencies: - supports-color - eslint-plugin-import@2.31.0(@typescript-eslint/parser@8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3))(eslint@9.19.0(jiti@2.4.2)): + eslint-plugin-import@2.31.0(@typescript-eslint/parser@8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.9.3))(eslint@9.19.0(jiti@2.4.2)): dependencies: '@rtsao/scc': 1.1.0 array-includes: 3.1.8 @@ -5619,7 +5443,7 @@ snapshots: doctrine: 2.1.0 eslint: 9.19.0(jiti@2.4.2) eslint-import-resolver-node: 0.3.9 - eslint-module-utils: 2.12.0(@typescript-eslint/parser@8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3))(eslint-import-resolver-node@0.3.9)(eslint@9.19.0(jiti@2.4.2)) + eslint-module-utils: 2.12.0(@typescript-eslint/parser@8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint@9.19.0(jiti@2.4.2)) hasown: 2.0.2 is-core-module: 2.16.1 is-glob: 4.0.3 @@ -5631,7 +5455,7 @@ snapshots: string.prototype.trimend: 1.0.9 tsconfig-paths: 3.15.0 optionalDependencies: - '@typescript-eslint/parser': 8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3) + '@typescript-eslint/parser': 8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.9.3) transitivePeerDependencies: - eslint-import-resolver-typescript - eslint-import-resolver-webpack @@ -5736,6 +5560,8 @@ snapshots: esutils@2.0.3: {} + eventsource-parser@3.0.6: {} + execa@5.1.1: dependencies: cross-spawn: 7.0.6 @@ -5748,7 +5574,7 @@ snapshots: signal-exit: 3.0.7 strip-final-newline: 2.0.0 - expect-type@1.1.0: {} + expect-type@1.3.0: {} extendable-error@0.1.7: {} @@ -5778,9 +5604,9 @@ snapshots: dependencies: reusify: 1.0.4 - fdir@6.4.3(picomatch@4.0.2): + fdir@6.5.0(picomatch@4.0.3): optionalDependencies: - picomatch: 4.0.2 + picomatch: 4.0.3 file-entry-cache@8.0.0: dependencies: @@ -5800,6 +5626,12 @@ snapshots: locate-path: 6.0.0 path-exists: 4.0.0 + fix-dts-default-cjs-exports@1.0.1: + dependencies: + magic-string: 0.30.17 + mlly: 1.7.4 + rollup: 4.35.0 + flat-cache@4.0.1: dependencies: flatted: 3.3.1 @@ -5980,8 +5812,7 @@ snapshots: graphemer@1.4.0: {} - graphql@16.10.0: - optional: true + graphql@16.12.0: {} handlebars@4.7.8: dependencies: @@ -6022,8 +5853,7 @@ snapshots: dependencies: function-bind: 1.1.2 - headers-polyfill@4.0.3: - optional: true + headers-polyfill@4.0.3: {} hosted-git-info@2.8.9: {} @@ -6153,8 +5983,7 @@ snapshots: is-negative-zero@2.0.3: {} - is-node-process@1.2.0: - optional: true + is-node-process@1.2.0: {} is-number-object@1.0.7: dependencies: @@ -6265,6 +6094,8 @@ snapshots: js-tokens@4.0.0: {} + js-tokens@9.0.1: {} + js-yaml@3.14.1: dependencies: argparse: 1.0.10 @@ -6292,12 +6123,6 @@ snapshots: dependencies: minimist: 1.2.8 - jsondiffpatch@0.6.0: - dependencies: - '@types/diff-match-patch': 1.0.36 - chalk: 5.4.1 - diff-match-patch: 1.0.5 - jsonfile@4.0.0: optionalDependencies: graceful-fs: 4.2.11 @@ -6418,11 +6243,7 @@ snapshots: lodash@4.17.21: {} - loose-envify@1.4.0: - dependencies: - js-tokens: 4.0.0 - - loupe@3.1.3: {} + loupe@3.2.1: {} lru-cache@10.4.3: {} @@ -6511,34 +6332,32 @@ snapshots: ms@2.1.3: {} - msw@2.7.1(@types/node@22.13.10)(typescript@5.7.3): + msw@2.12.4(@types/node@22.13.10)(typescript@5.9.3): dependencies: - '@bundled-es-modules/cookie': 2.0.1 - '@bundled-es-modules/statuses': 1.0.1 - '@bundled-es-modules/tough-cookie': 0.1.6 '@inquirer/confirm': 5.1.6(@types/node@22.13.10) - '@mswjs/interceptors': 0.37.6 + '@mswjs/interceptors': 0.40.0 '@open-draft/deferred-promise': 2.2.0 - '@open-draft/until': 2.1.0 - '@types/cookie': 0.6.0 - '@types/statuses': 2.0.5 - graphql: 16.10.0 + '@types/statuses': 2.0.6 + cookie: 1.1.1 + graphql: 16.12.0 headers-polyfill: 4.0.3 is-node-process: 1.2.0 outvariant: 1.4.3 path-to-regexp: 6.3.0 picocolors: 1.1.1 + rettime: 0.7.0 + statuses: 2.0.2 strict-event-emitter: 0.5.1 - type-fest: 4.37.0 + tough-cookie: 6.0.0 + type-fest: 5.4.1 + until-async: 3.0.2 yargs: 17.7.2 optionalDependencies: - typescript: 5.7.3 + typescript: 5.9.3 transitivePeerDependencies: - '@types/node' - optional: true - mute-stream@2.0.0: - optional: true + mute-stream@2.0.0: {} mz@2.7.0: dependencies: @@ -6652,8 +6471,7 @@ snapshots: outdent@0.5.0: {} - outvariant@1.4.3: - optional: true + outvariant@1.4.3: {} own-keys@1.0.1: dependencies: @@ -6711,8 +6529,7 @@ snapshots: lru-cache: 10.4.3 minipass: 7.1.2 - path-to-regexp@6.3.0: - optional: true + path-to-regexp@6.3.0: {} path-type@4.0.0: {} @@ -6728,7 +6545,7 @@ snapshots: picomatch@2.3.1: {} - picomatch@4.0.2: {} + picomatch@4.0.3: {} pify@4.0.1: {} @@ -6763,16 +6580,8 @@ snapshots: prettier@3.4.2: {} - psl@1.15.0: - dependencies: - punycode: 2.3.1 - optional: true - punycode@2.3.1: {} - querystringify@2.2.0: - optional: true - queue-microtask@1.2.3: {} quick-lru@4.0.1: {} @@ -6782,10 +6591,6 @@ snapshots: defu: 6.1.4 destr: 2.0.3 - react@18.3.1: - dependencies: - loose-envify: 1.4.0 - read-pkg-up@7.0.1: dependencies: find-up: 4.1.0 @@ -6852,9 +6657,6 @@ snapshots: require-from-string@2.0.2: {} - requires-port@1.0.0: - optional: true - resolve-from@4.0.0: {} resolve-from@5.0.0: {} @@ -6877,32 +6679,9 @@ snapshots: path-parse: 1.0.7 supports-preserve-symlinks-flag: 1.0.0 - reusify@1.0.4: {} + rettime@0.7.0: {} - rollup@4.33.0: - dependencies: - '@types/estree': 1.0.6 - optionalDependencies: - '@rollup/rollup-android-arm-eabi': 4.33.0 - '@rollup/rollup-android-arm64': 4.33.0 - '@rollup/rollup-darwin-arm64': 4.33.0 - '@rollup/rollup-darwin-x64': 4.33.0 - '@rollup/rollup-freebsd-arm64': 4.33.0 - '@rollup/rollup-freebsd-x64': 4.33.0 - '@rollup/rollup-linux-arm-gnueabihf': 4.33.0 - '@rollup/rollup-linux-arm-musleabihf': 4.33.0 - '@rollup/rollup-linux-arm64-gnu': 4.33.0 - '@rollup/rollup-linux-arm64-musl': 4.33.0 - '@rollup/rollup-linux-loongarch64-gnu': 4.33.0 - '@rollup/rollup-linux-powerpc64le-gnu': 4.33.0 - '@rollup/rollup-linux-riscv64-gnu': 4.33.0 - '@rollup/rollup-linux-s390x-gnu': 4.33.0 - '@rollup/rollup-linux-x64-gnu': 4.33.0 - '@rollup/rollup-linux-x64-musl': 4.33.0 - '@rollup/rollup-win32-arm64-msvc': 4.33.0 - '@rollup/rollup-win32-ia32-msvc': 4.33.0 - '@rollup/rollup-win32-x64-msvc': 4.33.0 - fsevents: 2.3.3 + reusify@1.0.4: {} rollup@4.35.0: dependencies: @@ -6969,8 +6748,6 @@ snapshots: safer-buffer@2.1.2: {} - secure-json-parse@2.7.0: {} - semver@5.7.2: {} semver@6.3.1: {} @@ -7080,13 +6857,11 @@ snapshots: stackback@0.0.2: {} - statuses@2.0.1: - optional: true + statuses@2.0.2: {} - std-env@3.8.0: {} + std-env@3.10.0: {} - strict-event-emitter@0.5.1: - optional: true + strict-event-emitter@0.5.1: {} string-width@4.2.3: dependencies: @@ -7164,6 +6939,10 @@ snapshots: strip-json-comments@3.1.1: {} + strip-literal@3.1.0: + dependencies: + js-tokens: 9.0.1 + sucrase@3.35.0: dependencies: '@jridgewell/gen-mapping': 0.3.8 @@ -7180,11 +6959,7 @@ snapshots: supports-preserve-symlinks-flag@1.0.0: {} - swr@2.3.3(react@18.3.1): - dependencies: - dequal: 2.0.3 - react: 18.3.1 - use-sync-external-store: 1.4.0(react@18.3.1) + tagged-tag@1.0.0: {} tar@6.2.1: dependencies: @@ -7207,8 +6982,6 @@ snapshots: dependencies: any-promise: 1.3.0 - throttleit@2.1.0: {} - through2@4.0.2: dependencies: readable-stream: 3.6.2 @@ -7219,16 +6992,22 @@ snapshots: tinyexec@0.3.2: {} - tinyglobby@0.2.12: + tinyglobby@0.2.15: dependencies: - fdir: 6.4.3(picomatch@4.0.2) - picomatch: 4.0.2 + fdir: 6.5.0(picomatch@4.0.3) + picomatch: 4.0.3 + + tinypool@1.1.1: {} - tinypool@1.0.2: {} + tinyrainbow@2.0.0: {} - tinyrainbow@1.2.0: {} + tinyspy@4.0.4: {} - tinyspy@3.0.2: {} + tldts-core@7.0.19: {} + + tldts@7.0.19: + dependencies: + tldts-core: 7.0.19 tmp@0.0.33: dependencies: @@ -7238,13 +7017,9 @@ snapshots: dependencies: is-number: 7.0.0 - tough-cookie@4.1.4: + tough-cookie@6.0.0: dependencies: - psl: 1.15.0 - punycode: 2.3.1 - universalify: 0.2.0 - url-parse: 1.5.10 - optional: true + tldts: 7.0.19 tr46@0.0.3: {} @@ -7256,13 +7031,13 @@ snapshots: trim-newlines@3.0.1: {} - ts-api-utils@2.0.1(typescript@5.7.3): + ts-api-utils@2.0.1(typescript@5.9.3): dependencies: - typescript: 5.7.3 + typescript: 5.9.3 ts-interface-checker@0.1.13: {} - ts-node@10.9.2(@types/node@20.5.1)(typescript@5.6.3): + ts-node@10.9.2(@types/node@20.5.1)(typescript@5.9.3): dependencies: '@cspotcode/source-map-support': 0.8.1 '@tsconfig/node10': 1.0.11 @@ -7276,13 +7051,13 @@ snapshots: create-require: 1.1.1 diff: 4.0.2 make-error: 1.3.6 - typescript: 5.6.3 + typescript: 5.9.3 v8-compile-cache-lib: 3.0.1 yn: 3.1.1 - tsconfck@3.1.5(typescript@5.7.3): + tsconfck@3.1.5(typescript@5.9.3): optionalDependencies: - typescript: 5.7.3 + typescript: 5.9.3 tsconfig-paths@3.15.0: dependencies: @@ -7291,14 +7066,15 @@ snapshots: minimist: 1.2.8 strip-bom: 3.0.0 - tsup@8.4.0(jiti@2.4.2)(postcss@8.5.1)(tsx@4.19.3)(typescript@5.7.3)(yaml@2.7.0): + tsup@8.5.0(jiti@2.4.2)(postcss@8.5.1)(tsx@4.19.3)(typescript@5.9.3)(yaml@2.7.0): dependencies: bundle-require: 5.1.0(esbuild@0.25.1) cac: 6.7.14 chokidar: 4.0.3 consola: 3.4.0 - debug: 4.4.0 + debug: 4.4.3 esbuild: 0.25.1 + fix-dts-default-cjs-exports: 1.0.1 joycon: 3.1.1 picocolors: 1.1.1 postcss-load-config: 6.0.1(jiti@2.4.2)(postcss@8.5.1)(tsx@4.19.3)(yaml@2.7.0) @@ -7307,11 +7083,11 @@ snapshots: source-map: 0.8.0-beta.0 sucrase: 3.35.0 tinyexec: 0.3.2 - tinyglobby: 0.2.12 + tinyglobby: 0.2.15 tree-kill: 1.2.2 optionalDependencies: postcss: 8.5.1 - typescript: 5.7.3 + typescript: 5.9.3 transitivePeerDependencies: - jiti - supports-color @@ -7360,8 +7136,7 @@ snapshots: type-fest@0.18.1: {} - type-fest@0.21.3: - optional: true + type-fest@0.21.3: {} type-fest@0.6.0: {} @@ -7369,6 +7144,10 @@ snapshots: type-fest@4.37.0: {} + type-fest@5.4.1: + dependencies: + tagged-tag: 1.0.0 + typed-array-buffer@1.0.2: dependencies: call-bind: 1.0.7 @@ -7434,19 +7213,17 @@ snapshots: possible-typed-array-names: 1.0.0 reflect.getprototypeof: 1.0.10 - typescript-eslint@8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3): + typescript-eslint@8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.9.3): dependencies: - '@typescript-eslint/eslint-plugin': 8.26.1(@typescript-eslint/parser@8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3))(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3) - '@typescript-eslint/parser': 8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3) - '@typescript-eslint/utils': 8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3) + '@typescript-eslint/eslint-plugin': 8.26.1(@typescript-eslint/parser@8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.9.3))(eslint@9.19.0(jiti@2.4.2))(typescript@5.9.3) + '@typescript-eslint/parser': 8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.9.3) + '@typescript-eslint/utils': 8.26.1(eslint@9.19.0(jiti@2.4.2))(typescript@5.9.3) eslint: 9.19.0(jiti@2.4.2) - typescript: 5.7.3 + typescript: 5.9.3 transitivePeerDependencies: - supports-color - typescript@5.6.3: {} - - typescript@5.7.3: {} + typescript@5.9.3: {} ufo@1.5.4: {} @@ -7477,25 +7254,14 @@ snapshots: universalify@0.1.2: {} - universalify@0.2.0: - optional: true - universalify@2.0.1: {} + until-async@3.0.2: {} + uri-js@4.4.1: dependencies: punycode: 2.3.1 - url-parse@1.5.10: - dependencies: - querystringify: 2.2.0 - requires-port: 1.0.0 - optional: true - - use-sync-external-store@1.4.0(react@18.3.1): - dependencies: - react: 18.3.1 - util-deprecate@1.0.2: {} uuid@8.3.2: {} @@ -7507,12 +7273,12 @@ snapshots: spdx-correct: 3.2.0 spdx-expression-parse: 3.0.1 - vite-node@2.1.8(@types/node@22.13.10)(lightningcss@1.29.1): + vite-node@3.2.4(@types/node@22.13.10)(lightningcss@1.29.1): dependencies: cac: 6.7.14 - debug: 4.4.0 - es-module-lexer: 1.6.0 - pathe: 1.1.2 + debug: 4.4.3 + es-module-lexer: 1.7.0 + pathe: 2.0.3 vite: 5.4.14(@types/node@22.13.10)(lightningcss@1.29.1) transitivePeerDependencies: - '@types/node' @@ -7525,11 +7291,11 @@ snapshots: - supports-color - terser - vite-tsconfig-paths@5.1.4(typescript@5.7.3)(vite@5.4.14(@types/node@22.13.10)(lightningcss@1.29.1)): + vite-tsconfig-paths@5.1.4(typescript@5.9.3)(vite@5.4.14(@types/node@22.13.10)(lightningcss@1.29.1)): dependencies: debug: 4.4.0 globrex: 0.1.2 - tsconfck: 3.1.5(typescript@5.7.3) + tsconfck: 3.1.5(typescript@5.9.3) optionalDependencies: vite: 5.4.14(@types/node@22.13.10)(lightningcss@1.29.1) transitivePeerDependencies: @@ -7540,33 +7306,36 @@ snapshots: dependencies: esbuild: 0.21.5 postcss: 8.5.1 - rollup: 4.33.0 + rollup: 4.35.0 optionalDependencies: '@types/node': 22.13.10 fsevents: 2.3.3 lightningcss: 1.29.1 - vitest@2.1.8(@edge-runtime/vm@5.0.0)(@types/node@22.13.10)(lightningcss@1.29.1)(msw@2.7.1(@types/node@22.13.10)(typescript@5.7.3)): - dependencies: - '@vitest/expect': 2.1.8 - '@vitest/mocker': 2.1.8(msw@2.7.1(@types/node@22.13.10)(typescript@5.7.3))(vite@5.4.14(@types/node@22.13.10)(lightningcss@1.29.1)) - '@vitest/pretty-format': 2.1.8 - '@vitest/runner': 2.1.8 - '@vitest/snapshot': 2.1.8 - '@vitest/spy': 2.1.8 - '@vitest/utils': 2.1.8 - chai: 5.1.2 - debug: 4.4.0 - expect-type: 1.1.0 + vitest@3.2.4(@edge-runtime/vm@5.0.0)(@types/node@22.13.10)(lightningcss@1.29.1)(msw@2.12.4(@types/node@22.13.10)(typescript@5.9.3)): + dependencies: + '@types/chai': 5.2.3 + '@vitest/expect': 3.2.4 + '@vitest/mocker': 3.2.4(msw@2.12.4(@types/node@22.13.10)(typescript@5.9.3))(vite@5.4.14(@types/node@22.13.10)(lightningcss@1.29.1)) + '@vitest/pretty-format': 3.2.4 + '@vitest/runner': 3.2.4 + '@vitest/snapshot': 3.2.4 + '@vitest/spy': 3.2.4 + '@vitest/utils': 3.2.4 + chai: 5.3.3 + debug: 4.4.3 + expect-type: 1.3.0 magic-string: 0.30.17 - pathe: 1.1.2 - std-env: 3.8.0 + pathe: 2.0.3 + picomatch: 4.0.3 + std-env: 3.10.0 tinybench: 2.9.0 tinyexec: 0.3.2 - tinypool: 1.0.2 - tinyrainbow: 1.2.0 + tinyglobby: 0.2.15 + tinypool: 1.1.1 + tinyrainbow: 2.0.0 vite: 5.4.14(@types/node@22.13.10)(lightningcss@1.29.1) - vite-node: 2.1.8(@types/node@22.13.10)(lightningcss@1.29.1) + vite-node: 3.2.4(@types/node@22.13.10)(lightningcss@1.29.1) why-is-node-running: 2.3.0 optionalDependencies: '@edge-runtime/vm': 5.0.0 @@ -7671,7 +7440,6 @@ snapshots: ansi-styles: 4.3.0 string-width: 4.2.3 strip-ansi: 6.0.1 - optional: true wrap-ansi@7.0.0: dependencies: @@ -7712,11 +7480,6 @@ snapshots: yocto-queue@0.1.0: {} - yoctocolors-cjs@2.1.2: - optional: true - - zod-to-json-schema@3.24.4(zod@3.24.2): - dependencies: - zod: 3.24.2 + yoctocolors-cjs@2.1.2: {} - zod@3.24.2: {} + zod@4.3.6: {} diff --git a/pnpm-workspace.yaml b/pnpm-workspace.yaml index 9520e6d..73d231e 100644 --- a/pnpm-workspace.yaml +++ b/pnpm-workspace.yaml @@ -8,4 +8,4 @@ catalog: "@types/node": "^22.10.10" eslint: ^9.19.0 prettier: ^3.4.2 - typescript: ^5.7.3 + typescript: ^5.9.2 From 26754b5b90734dc4c557a667598a5a63478c9d08 Mon Sep 17 00:00:00 2001 From: Connor Chevli Date: Thu, 22 Jan 2026 21:06:20 -0800 Subject: [PATCH 03/22] run prettier --- packages/ai-sdk-provider-2/package.json | 2 + ...onvert-to-openrouter-chat-messages.test.ts | 585 ++++---- .../convert-to-openrouter-chat-messages.ts | 144 +- .../ai-sdk-provider-2/src/chat/errors.test.ts | 69 +- .../src/chat/file-parser-schema.test.ts | 71 +- .../src/chat/file-url-utils.ts | 65 +- .../src/chat/get-tool-choice.ts | 27 +- .../ai-sdk-provider-2/src/chat/index.test.ts | 1205 ++++++++--------- packages/ai-sdk-provider-2/src/chat/index.ts | 299 ++-- .../src/chat/large-pdf-response.test.ts | 75 +- .../src/chat/payload-comparison.test.ts | 72 +- .../ai-sdk-provider-2/src/chat/schemas.ts | 29 +- ...convert-to-openrouter-completion-prompt.ts | 66 +- .../src/completion/index.test.ts | 319 ++--- .../ai-sdk-provider-2/src/completion/index.ts | 120 +- .../src/completion/schemas.ts | 11 +- .../src/embedding/index.test.ts | 150 +- .../ai-sdk-provider-2/src/embedding/index.ts | 36 +- .../src/embedding/schemas.ts | 10 +- packages/ai-sdk-provider-2/src/facade.ts | 47 +- packages/ai-sdk-provider-2/src/index.ts | 6 +- .../src/internal copy/index.ts | 10 +- packages/ai-sdk-provider-2/src/provider.ts | 68 +- .../src/schemas/error-response.test.ts | 40 +- .../src/schemas/error-response.ts | 15 +- .../ai-sdk-provider-2/src/schemas/format.ts | 10 +- .../ai-sdk-provider-2/src/schemas/image.ts | 4 +- .../src/schemas/provider-metadata.ts | 11 +- .../src/schemas/reasoning-details.ts | 29 +- .../src/test-utils/test-server.ts | 32 +- .../src/tests/provider-options.test.ts | 30 +- .../src/tests/stream-usage-accounting.test.ts | 74 +- .../src/tests/usage-accounting.test.ts | 153 +-- packages/ai-sdk-provider-2/src/types/index.ts | 6 +- .../openrouter-chat-completions-input.ts | 42 +- .../src/types/openrouter-chat-settings.ts | 5 +- .../types/openrouter-completion-settings.ts | 2 +- .../types/openrouter-embedding-settings.ts | 6 +- .../src/utils/map-finish-reason.ts | 34 +- .../src/utils/remove-undefined.ts | 7 +- .../src/utils/with-user-agent-suffix.ts | 10 +- packages/ai-sdk-provider-2/src/version.ts | 2 +- pnpm-lock.yaml | 17 +- 43 files changed, 1905 insertions(+), 2110 deletions(-) diff --git a/packages/ai-sdk-provider-2/package.json b/packages/ai-sdk-provider-2/package.json index ffdd6b5..06a5d36 100644 --- a/packages/ai-sdk-provider-2/package.json +++ b/packages/ai-sdk-provider-2/package.json @@ -54,6 +54,8 @@ "@hyperbolic/eslint-config": "workspace:*", "@hyperbolic/prettier-config": "workspace:*", "@hyperbolic/tsconfig": "workspace:*", + "@openrouter/sdk": "^0.1.27", + "@types/json-schema": "7.0.15", "ai": "^6.0.48", "eslint": "catalog:", "handlebars": "^4.7.8", diff --git a/packages/ai-sdk-provider-2/src/chat/convert-to-openrouter-chat-messages.test.ts b/packages/ai-sdk-provider-2/src/chat/convert-to-openrouter-chat-messages.test.ts index b90f1ec..71c08fc 100644 --- a/packages/ai-sdk-provider-2/src/chat/convert-to-openrouter-chat-messages.test.ts +++ b/packages/ai-sdk-provider-2/src/chat/convert-to-openrouter-chat-messages.test.ts @@ -1,18 +1,18 @@ -import { ReasoningDetailType } from '../schemas/reasoning-details'; -import { convertToOpenRouterChatMessages } from './convert-to-openrouter-chat-messages'; -import { MIME_TO_FORMAT } from './file-url-utils'; +import { ReasoningDetailType } from "../schemas/reasoning-details"; +import { convertToOpenRouterChatMessages } from "./convert-to-openrouter-chat-messages"; +import { MIME_TO_FORMAT } from "./file-url-utils"; -describe('user messages', () => { - it('should convert image Uint8Array', async () => { +describe("user messages", () => { + it("should convert image Uint8Array", async () => { const result = convertToOpenRouterChatMessages([ { - role: 'user', + role: "user", content: [ - { type: 'text', text: 'Hello' }, + { type: "text", text: "Hello" }, { - type: 'file', + type: "file", data: new Uint8Array([0, 1, 2, 3]), - mediaType: 'image/png', + mediaType: "image/png", }, ], }, @@ -20,28 +20,28 @@ describe('user messages', () => { expect(result).toEqual([ { - role: 'user', + role: "user", content: [ - { type: 'text', text: 'Hello' }, + { type: "text", text: "Hello" }, { - type: 'image_url', - image_url: { url: 'data:image/png;base64,AAECAw==' }, + type: "image_url", + image_url: { url: "data:image/png;base64,AAECAw==" }, }, ], }, ]); }); - it('should convert image urls', async () => { + it("should convert image urls", async () => { const result = convertToOpenRouterChatMessages([ { - role: 'user', + role: "user", content: [ - { type: 'text', text: 'Hello' }, + { type: "text", text: "Hello" }, { - type: 'file', - data: 'https://example.com/image.png', - mediaType: 'image/png', + type: "file", + data: "https://example.com/image.png", + mediaType: "image/png", }, ], }, @@ -49,28 +49,28 @@ describe('user messages', () => { expect(result).toEqual([ { - role: 'user', + role: "user", content: [ - { type: 'text', text: 'Hello' }, + { type: "text", text: "Hello" }, { - type: 'image_url', - image_url: { url: 'https://example.com/image.png' }, + type: "image_url", + image_url: { url: "https://example.com/image.png" }, }, ], }, ]); }); - it('should convert messages with image base64', async () => { + it("should convert messages with image base64", async () => { const result = convertToOpenRouterChatMessages([ { - role: 'user', + role: "user", content: [ - { type: 'text', text: 'Hello' }, + { type: "text", text: "Hello" }, { - type: 'file', - data: 'data:image/png;base64,AAECAw==', - mediaType: 'image/png', + type: "file", + data: "data:image/png;base64,AAECAw==", + mediaType: "image/png", }, ], }, @@ -78,41 +78,38 @@ describe('user messages', () => { expect(result).toEqual([ { - role: 'user', + role: "user", content: [ - { type: 'text', text: 'Hello' }, + { type: "text", text: "Hello" }, { - type: 'image_url', - image_url: { url: 'data:image/png;base64,AAECAw==' }, + type: "image_url", + image_url: { url: "data:image/png;base64,AAECAw==" }, }, ], }, ]); }); - it('should convert messages with only a text part to a string content', async () => { + it("should convert messages with only a text part to a string content", async () => { const result = convertToOpenRouterChatMessages([ { - role: 'user', - content: [{ type: 'text', text: 'Hello' }], + role: "user", + content: [{ type: "text", text: "Hello" }], }, ]); - expect(result).toEqual([{ role: 'user', content: 'Hello' }]); + expect(result).toEqual([{ role: "user", content: "Hello" }]); }); it.each( - Object.entries(MIME_TO_FORMAT).map(([mimeSubtype, format]) => [ - `audio/${mimeSubtype}`, - format, - ]), - )('should convert %s to input_audio with %s format', (mediaType, expectedFormat) => { + Object.entries(MIME_TO_FORMAT).map(([mimeSubtype, format]) => [`audio/${mimeSubtype}`, format]), + )("should convert %s to input_audio with %s format", (mediaType, expectedFormat) => { const result = convertToOpenRouterChatMessages([ { - role: 'user', + role: "user", content: [ { - type: 'file', + type: "file", data: new Uint8Array([0, 1, 2, 3]), mediaType, }, @@ -122,12 +119,12 @@ describe('user messages', () => { expect(result).toEqual([ { - role: 'user', + role: "user", content: [ { - type: 'input_audio', + type: "input_audio", input_audio: { - data: 'AAECAw==', + data: "AAECAw==", format: expectedFormat, }, }, @@ -136,15 +133,15 @@ describe('user messages', () => { ]); }); - it('should convert audio base64 data URL to input_audio', async () => { + it("should convert audio base64 data URL to input_audio", async () => { const result = convertToOpenRouterChatMessages([ { - role: 'user', + role: "user", content: [ { - type: 'file', - data: 'data:audio/mpeg;base64,AAECAw==', - mediaType: 'audio/mpeg', + type: "file", + data: "data:audio/mpeg;base64,AAECAw==", + mediaType: "audio/mpeg", }, ], }, @@ -152,13 +149,13 @@ describe('user messages', () => { expect(result).toEqual([ { - role: 'user', + role: "user", content: [ { - type: 'input_audio', + type: "input_audio", input_audio: { - data: 'AAECAw==', - format: 'mp3', + data: "AAECAw==", + format: "mp3", }, }, ], @@ -166,15 +163,15 @@ describe('user messages', () => { ]); }); - it('should convert raw audio base64 string to input_audio', async () => { + it("should convert raw audio base64 string to input_audio", async () => { const result = convertToOpenRouterChatMessages([ { - role: 'user', + role: "user", content: [ { - type: 'file', - data: 'AAECAw==', - mediaType: 'audio/mpeg', + type: "file", + data: "AAECAw==", + mediaType: "audio/mpeg", }, ], }, @@ -182,13 +179,13 @@ describe('user messages', () => { expect(result).toEqual([ { - role: 'user', + role: "user", content: [ { - type: 'input_audio', + type: "input_audio", input_audio: { - data: 'AAECAw==', - format: 'mp3', + data: "AAECAw==", + format: "mp3", }, }, ], @@ -196,16 +193,16 @@ describe('user messages', () => { ]); }); - it('should throw error for audio URLs', async () => { + it("should throw error for audio URLs", async () => { expect(() => convertToOpenRouterChatMessages([ { - role: 'user', + role: "user", content: [ { - type: 'file', - data: 'https://example.com/audio.mp3', - mediaType: 'audio/mpeg', + type: "file", + data: "https://example.com/audio.mp3", + mediaType: "audio/mpeg", }, ], }, @@ -213,16 +210,16 @@ describe('user messages', () => { ).toThrow(/Audio files cannot be provided as URLs/); }); - it('should throw error for unsupported audio formats', async () => { + it("should throw error for unsupported audio formats", async () => { expect(() => convertToOpenRouterChatMessages([ { - role: 'user', + role: "user", content: [ { - type: 'file', + type: "file", data: new Uint8Array([0, 1, 2, 3]), - mediaType: 'audio/webm', + mediaType: "audio/webm", }, ], }, @@ -231,15 +228,15 @@ describe('user messages', () => { }); }); -describe('cache control', () => { - it('should pass cache control from system message provider metadata', () => { +describe("cache control", () => { + it("should pass cache control from system message provider metadata", () => { const result = convertToOpenRouterChatMessages([ { - role: 'system', - content: 'System prompt', + role: "system", + content: "System prompt", providerOptions: { anthropic: { - cacheControl: { type: 'ephemeral' }, + cacheControl: { type: "ephemeral" }, }, }, }, @@ -247,21 +244,21 @@ describe('cache control', () => { expect(result).toEqual([ { - role: 'system', - content: 'System prompt', - cache_control: { type: 'ephemeral' }, + role: "system", + content: "System prompt", + cache_control: { type: "ephemeral" }, }, ]); }); - it('should pass cache control from user message provider metadata (single text part)', () => { + it("should pass cache control from user message provider metadata (single text part)", () => { const result = convertToOpenRouterChatMessages([ { - role: 'user', - content: [{ type: 'text', text: 'Hello' }], + role: "user", + content: [{ type: "text", text: "Hello" }], providerOptions: { anthropic: { - cacheControl: { type: 'ephemeral' }, + cacheControl: { type: "ephemeral" }, }, }, }, @@ -269,29 +266,29 @@ describe('cache control', () => { expect(result).toEqual([ { - role: 'user', + role: "user", content: [ { - type: 'text', - text: 'Hello', - cache_control: { type: 'ephemeral' }, + type: "text", + text: "Hello", + cache_control: { type: "ephemeral" }, }, ], }, ]); }); - it('should pass cache control from content part provider metadata (single text part)', () => { + it("should pass cache control from content part provider metadata (single text part)", () => { const result = convertToOpenRouterChatMessages([ { - role: 'user', + role: "user", content: [ { - type: 'text', - text: 'Hello', + type: "text", + text: "Hello", providerOptions: { anthropic: { - cacheControl: { type: 'ephemeral' }, + cacheControl: { type: "ephemeral" }, }, }, }, @@ -301,33 +298,33 @@ describe('cache control', () => { expect(result).toEqual([ { - role: 'user', + role: "user", content: [ { - type: 'text', - text: 'Hello', - cache_control: { type: 'ephemeral' }, + type: "text", + text: "Hello", + cache_control: { type: "ephemeral" }, }, ], }, ]); }); - it('should pass cache control from user message provider metadata (multiple parts)', () => { + it("should pass cache control from user message provider metadata (multiple parts)", () => { const result = convertToOpenRouterChatMessages([ { - role: 'user', + role: "user", content: [ - { type: 'text', text: 'Hello' }, + { type: "text", text: "Hello" }, { - type: 'file', + type: "file", data: new Uint8Array([0, 1, 2, 3]), - mediaType: 'image/png', + mediaType: "image/png", }, ], providerOptions: { anthropic: { - cacheControl: { type: 'ephemeral' }, + cacheControl: { type: "ephemeral" }, }, }, }, @@ -335,59 +332,59 @@ describe('cache control', () => { expect(result).toEqual([ { - role: 'user', + role: "user", content: [ { - type: 'text', - text: 'Hello', - cache_control: { type: 'ephemeral' }, + type: "text", + text: "Hello", + cache_control: { type: "ephemeral" }, }, { - type: 'image_url', - image_url: { url: 'data:image/png;base64,AAECAw==' }, - cache_control: { type: 'ephemeral' }, + type: "image_url", + image_url: { url: "data:image/png;base64,AAECAw==" }, + cache_control: { type: "ephemeral" }, }, ], }, ]); }); - it('should pass cache control from user message provider metadata without cache control (single text part)', () => { + it("should pass cache control from user message provider metadata without cache control (single text part)", () => { const result = convertToOpenRouterChatMessages([ { - role: 'user', - content: [{ type: 'text', text: 'Hello' }], + role: "user", + content: [{ type: "text", text: "Hello" }], }, ]); expect(result).toEqual([ { - role: 'user', - content: 'Hello', + role: "user", + content: "Hello", }, ]); }); - it('should pass cache control to multiple image parts from user message provider metadata', () => { + it("should pass cache control to multiple image parts from user message provider metadata", () => { const result = convertToOpenRouterChatMessages([ { - role: 'user', + role: "user", content: [ - { type: 'text', text: 'Hello' }, + { type: "text", text: "Hello" }, { - type: 'file', + type: "file", data: new Uint8Array([0, 1, 2, 3]), - mediaType: 'image/png', + mediaType: "image/png", }, { - type: 'file', + type: "file", data: new Uint8Array([4, 5, 6, 7]), - mediaType: 'image/jpeg', + mediaType: "image/jpeg", }, ], providerOptions: { anthropic: { - cacheControl: { type: 'ephemeral' }, + cacheControl: { type: "ephemeral" }, }, }, }, @@ -395,48 +392,48 @@ describe('cache control', () => { expect(result).toEqual([ { - role: 'user', + role: "user", content: [ { - type: 'text', - text: 'Hello', - cache_control: { type: 'ephemeral' }, + type: "text", + text: "Hello", + cache_control: { type: "ephemeral" }, }, { - type: 'image_url', - image_url: { url: 'data:image/png;base64,AAECAw==' }, - cache_control: { type: 'ephemeral' }, + type: "image_url", + image_url: { url: "data:image/png;base64,AAECAw==" }, + cache_control: { type: "ephemeral" }, }, { - type: 'image_url', - image_url: { url: 'data:image/jpeg;base64,BAUGBw==' }, - cache_control: { type: 'ephemeral' }, + type: "image_url", + image_url: { url: "data:image/jpeg;base64,BAUGBw==" }, + cache_control: { type: "ephemeral" }, }, ], }, ]); }); - it('should pass cache control to file parts from user message provider metadata', () => { + it("should pass cache control to file parts from user message provider metadata", () => { const result = convertToOpenRouterChatMessages([ { - role: 'user', + role: "user", content: [ - { type: 'text', text: 'Hello' }, + { type: "text", text: "Hello" }, { - type: 'file', - data: 'ZmlsZSBjb250ZW50', - mediaType: 'text/plain', + type: "file", + data: "ZmlsZSBjb250ZW50", + mediaType: "text/plain", providerOptions: { openrouter: { - filename: 'file.txt', + filename: "file.txt", }, }, }, ], providerOptions: { anthropic: { - cacheControl: { type: 'ephemeral' }, + cacheControl: { type: "ephemeral" }, }, }, }, @@ -444,53 +441,53 @@ describe('cache control', () => { expect(result).toEqual([ { - role: 'user', + role: "user", content: [ { - type: 'text', - text: 'Hello', - cache_control: { type: 'ephemeral' }, + type: "text", + text: "Hello", + cache_control: { type: "ephemeral" }, }, { - type: 'file', + type: "file", file: { - filename: 'file.txt', - file_data: 'data:text/plain;base64,ZmlsZSBjb250ZW50', + filename: "file.txt", + file_data: "data:text/plain;base64,ZmlsZSBjb250ZW50", }, - cache_control: { type: 'ephemeral' }, + cache_control: { type: "ephemeral" }, }, ], }, ]); }); - it('should handle mixed part-specific and message-level cache control for multiple parts', () => { + it("should handle mixed part-specific and message-level cache control for multiple parts", () => { const result = convertToOpenRouterChatMessages([ { - role: 'user', + role: "user", content: [ { - type: 'text', - text: 'Hello', + type: "text", + text: "Hello", // No part-specific provider metadata }, { - type: 'file', + type: "file", data: new Uint8Array([0, 1, 2, 3]), - mediaType: 'image/png', + mediaType: "image/png", providerOptions: { anthropic: { - cacheControl: { type: 'ephemeral' }, + cacheControl: { type: "ephemeral" }, }, }, }, { - type: 'file', - data: 'ZmlsZSBjb250ZW50', - mediaType: 'text/plain', + type: "file", + data: "ZmlsZSBjb250ZW50", + mediaType: "text/plain", providerOptions: { openrouter: { - filename: 'file.txt', + filename: "file.txt", }, }, // No part-specific provider metadata @@ -498,7 +495,7 @@ describe('cache control', () => { ], providerOptions: { anthropic: { - cacheControl: { type: 'ephemeral' }, + cacheControl: { type: "ephemeral" }, }, }, }, @@ -506,49 +503,49 @@ describe('cache control', () => { expect(result).toEqual([ { - role: 'user', + role: "user", content: [ { - type: 'text', - text: 'Hello', - cache_control: { type: 'ephemeral' }, + type: "text", + text: "Hello", + cache_control: { type: "ephemeral" }, }, { - type: 'image_url', - image_url: { url: 'data:image/png;base64,AAECAw==' }, - cache_control: { type: 'ephemeral' }, + type: "image_url", + image_url: { url: "data:image/png;base64,AAECAw==" }, + cache_control: { type: "ephemeral" }, }, { - type: 'file', + type: "file", file: { - filename: 'file.txt', - file_data: 'data:text/plain;base64,ZmlsZSBjb250ZW50', + filename: "file.txt", + file_data: "data:text/plain;base64,ZmlsZSBjb250ZW50", }, - cache_control: { type: 'ephemeral' }, + cache_control: { type: "ephemeral" }, }, ], }, ]); }); - it('should pass cache control from individual content part provider metadata', () => { + it("should pass cache control from individual content part provider metadata", () => { const result = convertToOpenRouterChatMessages([ { - role: 'user', + role: "user", content: [ { - type: 'text', - text: 'Hello', + type: "text", + text: "Hello", providerOptions: { anthropic: { - cacheControl: { type: 'ephemeral' }, + cacheControl: { type: "ephemeral" }, }, }, }, { - type: 'file', + type: "file", data: new Uint8Array([0, 1, 2, 3]), - mediaType: 'image/png', + mediaType: "image/png", }, ], }, @@ -556,30 +553,30 @@ describe('cache control', () => { expect(result).toEqual([ { - role: 'user', + role: "user", content: [ { - type: 'text', - text: 'Hello', - cache_control: { type: 'ephemeral' }, + type: "text", + text: "Hello", + cache_control: { type: "ephemeral" }, }, { - type: 'image_url', - image_url: { url: 'data:image/png;base64,AAECAw==' }, + type: "image_url", + image_url: { url: "data:image/png;base64,AAECAw==" }, }, ], }, ]); }); - it('should pass cache control from assistant message provider metadata', () => { + it("should pass cache control from assistant message provider metadata", () => { const result = convertToOpenRouterChatMessages([ { - role: 'assistant', - content: [{ type: 'text', text: 'Assistant response' }], + role: "assistant", + content: [{ type: "text", text: "Assistant response" }], providerOptions: { anthropic: { - cacheControl: { type: 'ephemeral' }, + cacheControl: { type: "ephemeral" }, }, }, }, @@ -587,31 +584,31 @@ describe('cache control', () => { expect(result).toEqual([ { - role: 'assistant', - content: 'Assistant response', - cache_control: { type: 'ephemeral' }, + role: "assistant", + content: "Assistant response", + cache_control: { type: "ephemeral" }, }, ]); }); - it('should pass cache control from tool message provider metadata', () => { + it("should pass cache control from tool message provider metadata", () => { const result = convertToOpenRouterChatMessages([ { - role: 'tool', + role: "tool", content: [ { - type: 'tool-result', - toolCallId: 'call-123', - toolName: 'calculator', + type: "tool-result", + toolCallId: "call-123", + toolName: "calculator", output: { - type: 'json', + type: "json", value: { answer: 42 }, }, }, ], providerOptions: { anthropic: { - cacheControl: { type: 'ephemeral' }, + cacheControl: { type: "ephemeral" }, }, }, }, @@ -619,22 +616,22 @@ describe('cache control', () => { expect(result).toEqual([ { - role: 'tool', - tool_call_id: 'call-123', + role: "tool", + tool_call_id: "call-123", content: JSON.stringify({ answer: 42 }), - cache_control: { type: 'ephemeral' }, + cache_control: { type: "ephemeral" }, }, ]); }); - it('should support the alias cache_control field', () => { + it("should support the alias cache_control field", () => { const result = convertToOpenRouterChatMessages([ { - role: 'system', - content: 'System prompt', + role: "system", + content: "System prompt", providerOptions: { anthropic: { - cache_control: { type: 'ephemeral' }, + cache_control: { type: "ephemeral" }, }, }, }, @@ -642,28 +639,28 @@ describe('cache control', () => { expect(result).toEqual([ { - role: 'system', - content: 'System prompt', - cache_control: { type: 'ephemeral' }, + role: "system", + content: "System prompt", + cache_control: { type: "ephemeral" }, }, ]); }); - it('should support cache control on last message in content array', () => { + it("should support cache control on last message in content array", () => { const result = convertToOpenRouterChatMessages([ { - role: 'system', - content: 'System prompt', + role: "system", + content: "System prompt", }, { - role: 'user', + role: "user", content: [ - { type: 'text', text: 'User prompt' }, + { type: "text", text: "User prompt" }, { - type: 'text', - text: 'User prompt 2', + type: "text", + text: "User prompt 2", providerOptions: { - anthropic: { cacheControl: { type: 'ephemeral' } }, + anthropic: { cacheControl: { type: "ephemeral" } }, }, }, ], @@ -672,38 +669,38 @@ describe('cache control', () => { expect(result).toEqual([ { - role: 'system', - content: 'System prompt', + role: "system", + content: "System prompt", }, { - role: 'user', + role: "user", content: [ - { type: 'text', text: 'User prompt' }, + { type: "text", text: "User prompt" }, { - type: 'text', - text: 'User prompt 2', - cache_control: { type: 'ephemeral' }, + type: "text", + text: "User prompt 2", + cache_control: { type: "ephemeral" }, }, ], }, ]); }); - it('should pass cache control to audio input parts from user message provider metadata', () => { + it("should pass cache control to audio input parts from user message provider metadata", () => { const result = convertToOpenRouterChatMessages([ { - role: 'user', + role: "user", content: [ - { type: 'text', text: 'Listen to this' }, + { type: "text", text: "Listen to this" }, { - type: 'file', + type: "file", data: new Uint8Array([0, 1, 2, 3]), - mediaType: 'audio/mpeg', + mediaType: "audio/mpeg", }, ], providerOptions: { anthropic: { - cacheControl: { type: 'ephemeral' }, + cacheControl: { type: "ephemeral" }, }, }, }, @@ -711,20 +708,20 @@ describe('cache control', () => { expect(result).toEqual([ { - role: 'user', + role: "user", content: [ { - type: 'text', - text: 'Listen to this', - cache_control: { type: 'ephemeral' }, + type: "text", + text: "Listen to this", + cache_control: { type: "ephemeral" }, }, { - type: 'input_audio', + type: "input_audio", input_audio: { - data: 'AAECAw==', - format: 'mp3', + data: "AAECAw==", + format: "mp3", }, - cache_control: { type: 'ephemeral' }, + cache_control: { type: "ephemeral" }, }, ], }, @@ -732,43 +729,43 @@ describe('cache control', () => { }); }); -describe('reasoning_details accumulation', () => { - it('should accumulate reasoning_details from reasoning part providerOptions', () => { +describe("reasoning_details accumulation", () => { + it("should accumulate reasoning_details from reasoning part providerOptions", () => { const result = convertToOpenRouterChatMessages([ { - role: 'assistant', + role: "assistant", content: [ { - type: 'reasoning', - text: 'First reasoning chunk', + type: "reasoning", + text: "First reasoning chunk", providerOptions: { openrouter: { reasoning_details: [ { type: ReasoningDetailType.Text, - text: 'First reasoning chunk', + text: "First reasoning chunk", }, ], }, }, }, { - type: 'reasoning', - text: 'Second reasoning chunk', + type: "reasoning", + text: "Second reasoning chunk", providerOptions: { openrouter: { reasoning_details: [ { type: ReasoningDetailType.Text, - text: 'Second reasoning chunk', + text: "Second reasoning chunk", }, ], }, }, }, { - type: 'text', - text: 'Final response', + type: "text", + text: "Final response", }, ], providerOptions: { @@ -776,11 +773,11 @@ describe('reasoning_details accumulation', () => { reasoning_details: [ { type: ReasoningDetailType.Text, - text: 'First reasoning chunk', + text: "First reasoning chunk", }, { type: ReasoningDetailType.Text, - text: 'Second reasoning chunk', + text: "Second reasoning chunk", }, ], }, @@ -790,36 +787,36 @@ describe('reasoning_details accumulation', () => { expect(result).toEqual([ { - role: 'assistant', - content: 'Final response', - reasoning: 'First reasoning chunkSecond reasoning chunk', + role: "assistant", + content: "Final response", + reasoning: "First reasoning chunkSecond reasoning chunk", reasoning_details: [ { type: ReasoningDetailType.Text, - text: 'First reasoning chunk', + text: "First reasoning chunk", }, { type: ReasoningDetailType.Text, - text: 'Second reasoning chunk', + text: "Second reasoning chunk", }, ], }, ]); }); - it('should use preserved reasoning_details from message-level providerOptions when available', () => { + it("should use preserved reasoning_details from message-level providerOptions when available", () => { const result = convertToOpenRouterChatMessages([ { - role: 'assistant', + role: "assistant", content: [ { - type: 'reasoning', - text: 'Reasoning text', + type: "reasoning", + text: "Reasoning text", // No providerOptions on part }, { - type: 'text', - text: 'Response', + type: "text", + text: "Response", }, ], providerOptions: { @@ -827,11 +824,11 @@ describe('reasoning_details accumulation', () => { reasoning_details: [ { type: ReasoningDetailType.Text, - text: 'Preserved reasoning detail', + text: "Preserved reasoning detail", }, { type: ReasoningDetailType.Summary, - summary: 'Preserved summary', + summary: "Preserved summary", }, ], }, @@ -841,36 +838,36 @@ describe('reasoning_details accumulation', () => { expect(result).toEqual([ { - role: 'assistant', - content: 'Response', - reasoning: 'Reasoning text', + role: "assistant", + content: "Response", + reasoning: "Reasoning text", reasoning_details: [ { type: ReasoningDetailType.Text, - text: 'Preserved reasoning detail', + text: "Preserved reasoning detail", }, { type: ReasoningDetailType.Summary, - summary: 'Preserved summary', + summary: "Preserved summary", }, ], }, ]); }); - it('should not include reasoning_details when not present in providerOptions', () => { + it("should not include reasoning_details when not present in providerOptions", () => { const result = convertToOpenRouterChatMessages([ { - role: 'assistant', + role: "assistant", content: [ { - type: 'reasoning', - text: 'Reasoning text', + type: "reasoning", + text: "Reasoning text", // No providerOptions }, { - type: 'text', - text: 'Response', + type: "text", + text: "Response", }, ], // No providerOptions @@ -879,42 +876,42 @@ describe('reasoning_details accumulation', () => { expect(result).toEqual([ { - role: 'assistant', - content: 'Response', - reasoning: 'Reasoning text', + role: "assistant", + content: "Response", + reasoning: "Reasoning text", // reasoning_details should be undefined when not preserved reasoning_details: undefined, }, ]); }); - it('should handle mixed reasoning parts with and without providerOptions', () => { + it("should handle mixed reasoning parts with and without providerOptions", () => { const result = convertToOpenRouterChatMessages([ { - role: 'assistant', + role: "assistant", content: [ { - type: 'reasoning', - text: 'First chunk', + type: "reasoning", + text: "First chunk", providerOptions: { openrouter: { reasoning_details: [ { type: ReasoningDetailType.Text, - text: 'First chunk', + text: "First chunk", }, ], }, }, }, { - type: 'reasoning', - text: 'Second chunk', + type: "reasoning", + text: "Second chunk", // No providerOptions }, { - type: 'text', - text: 'Response', + type: "text", + text: "Response", }, ], providerOptions: { @@ -922,7 +919,7 @@ describe('reasoning_details accumulation', () => { reasoning_details: [ { type: ReasoningDetailType.Text, - text: 'First chunk', + text: "First chunk", }, ], }, @@ -932,13 +929,13 @@ describe('reasoning_details accumulation', () => { expect(result).toEqual([ { - role: 'assistant', - content: 'Response', - reasoning: 'First chunkSecond chunk', + role: "assistant", + content: "Response", + reasoning: "First chunkSecond chunk", reasoning_details: [ { type: ReasoningDetailType.Text, - text: 'First chunk', + text: "First chunk", }, ], }, diff --git a/packages/ai-sdk-provider-2/src/chat/convert-to-openrouter-chat-messages.ts b/packages/ai-sdk-provider-2/src/chat/convert-to-openrouter-chat-messages.ts index 1309f1f..18efcf4 100644 --- a/packages/ai-sdk-provider-2/src/chat/convert-to-openrouter-chat-messages.ts +++ b/packages/ai-sdk-provider-2/src/chat/convert-to-openrouter-chat-messages.ts @@ -4,19 +4,19 @@ import type { LanguageModelV3TextPart, LanguageModelV3ToolResultPart, SharedV3ProviderMetadata, -} from '@ai-sdk/provider'; -import type { ReasoningDetailUnion } from '../schemas/reasoning-details'; +} from "@ai-sdk/provider"; + +import type { ReasoningDetailUnion } from "../schemas/reasoning-details"; import type { ChatCompletionContentPart, OpenRouterChatCompletionsInput, -} from '../types/openrouter-chat-completions-input'; - -import { OpenRouterProviderOptionsSchema } from '../schemas/provider-metadata'; -import { getFileUrl, getInputAudioData } from './file-url-utils'; -import { isUrl } from './is-url'; +} from "../types/openrouter-chat-completions-input"; +import { OpenRouterProviderOptionsSchema } from "../schemas/provider-metadata"; +import { getFileUrl, getInputAudioData } from "./file-url-utils"; +import { isUrl } from "./is-url"; // Type for OpenRouter Cache Control following Anthropic's pattern -export type OpenRouterCacheControl = { type: 'ephemeral' }; +export type OpenRouterCacheControl = { type: "ephemeral" }; function getCacheControl( providerMetadata: SharedV3ProviderMetadata | undefined, @@ -37,32 +37,30 @@ export function convertToOpenRouterChatMessages( const messages: OpenRouterChatCompletionsInput = []; for (const { role, content, providerOptions } of prompt) { switch (role) { - case 'system': { + case "system": { messages.push({ - role: 'system', + role: "system", content, cache_control: getCacheControl(providerOptions), }); break; } - case 'user': { - if (content.length === 1 && content[0]?.type === 'text') { + case "user": { + if (content.length === 1 && content[0]?.type === "text") { const cacheControl = - getCacheControl(providerOptions) ?? - getCacheControl(content[0].providerOptions); - const contentWithCacheControl: string | ChatCompletionContentPart[] = - cacheControl - ? [ - { - type: 'text', - text: content[0].text, - cache_control: cacheControl, - }, - ] - : content[0].text; + getCacheControl(providerOptions) ?? getCacheControl(content[0].providerOptions); + const contentWithCacheControl: string | ChatCompletionContentPart[] = cacheControl + ? [ + { + type: "text", + text: content[0].text, + cache_control: cacheControl, + }, + ] + : content[0].text; messages.push({ - role: 'user', + role: "user", content: contentWithCacheControl, }); break; @@ -72,25 +70,24 @@ export function convertToOpenRouterChatMessages( const messageCacheControl = getCacheControl(providerOptions); const contentParts: ChatCompletionContentPart[] = content.map( (part: LanguageModelV3TextPart | LanguageModelV3FilePart) => { - const cacheControl = - getCacheControl(part.providerOptions) ?? messageCacheControl; + const cacheControl = getCacheControl(part.providerOptions) ?? messageCacheControl; switch (part.type) { - case 'text': + case "text": return { - type: 'text' as const, + type: "text" as const, text: part.text, // For text parts, only use part-specific cache control cache_control: cacheControl, }; - case 'file': { - if (part.mediaType?.startsWith('image/')) { + case "file": { + if (part.mediaType?.startsWith("image/")) { const url = getFileUrl({ part, - defaultMediaType: 'image/jpeg', + defaultMediaType: "image/jpeg", }); return { - type: 'image_url' as const, + type: "image_url" as const, image_url: { url, }, @@ -100,33 +97,31 @@ export function convertToOpenRouterChatMessages( } // Handle audio files for input_audio format - if (part.mediaType?.startsWith('audio/')) { + if (part.mediaType?.startsWith("audio/")) { return { - type: 'input_audio' as const, + type: "input_audio" as const, input_audio: getInputAudioData(part), cache_control: cacheControl, }; } const fileName = String( - part.providerOptions?.openrouter?.filename ?? - part.filename ?? - '', + part.providerOptions?.openrouter?.filename ?? part.filename ?? "", ); const fileData = getFileUrl({ part, - defaultMediaType: 'application/pdf', + defaultMediaType: "application/pdf", }); if ( isUrl({ url: fileData, - protocols: new Set(['http:', 'https:'] as const), + protocols: new Set(["http:", "https:"] as const), }) ) { return { - type: 'file' as const, + type: "file" as const, file: { filename: fileName, file_data: fileData, @@ -135,7 +130,7 @@ export function convertToOpenRouterChatMessages( } return { - type: 'file' as const, + type: "file" as const, file: { filename: fileName, file_data: fileData, @@ -145,8 +140,8 @@ export function convertToOpenRouterChatMessages( } default: { return { - type: 'text' as const, - text: '', + type: "text" as const, + text: "", cache_control: cacheControl, }; } @@ -156,34 +151,33 @@ export function convertToOpenRouterChatMessages( // For multi-part messages, don't add cache_control at the root level messages.push({ - role: 'user', + role: "user", content: contentParts, }); break; } - case 'assistant': { - let text = ''; - let reasoning = ''; + case "assistant": { + let text = ""; + let reasoning = ""; const toolCalls: Array<{ id: string; - type: 'function'; + type: "function"; function: { name: string; arguments: string }; }> = []; const accumulatedReasoningDetails: ReasoningDetailUnion[] = []; for (const part of content) { switch (part.type) { - case 'text': { + case "text": { text += part.text; break; } - case 'tool-call': { - const partReasoningDetails = ( - part.providerOptions as Record - )?.openrouter as Record | undefined; + case "tool-call": { + const partReasoningDetails = (part.providerOptions as Record) + ?.openrouter as Record | undefined; if ( partReasoningDetails?.reasoning_details && Array.isArray(partReasoningDetails.reasoning_details) @@ -194,7 +188,7 @@ export function convertToOpenRouterChatMessages( } toolCalls.push({ id: part.toolCallId, - type: 'function', + type: "function", function: { name: part.toolName, arguments: JSON.stringify(part.input), @@ -202,23 +196,23 @@ export function convertToOpenRouterChatMessages( }); break; } - case 'reasoning': { + case "reasoning": { reasoning += part.text; - const parsedPartProviderOptions = - OpenRouterProviderOptionsSchema.safeParse(part.providerOptions); + const parsedPartProviderOptions = OpenRouterProviderOptionsSchema.safeParse( + part.providerOptions, + ); if ( parsedPartProviderOptions.success && parsedPartProviderOptions.data?.openrouter?.reasoning_details ) { accumulatedReasoningDetails.push( - ...parsedPartProviderOptions.data.openrouter - .reasoning_details, + ...parsedPartProviderOptions.data.openrouter.reasoning_details, ); } break; } - case 'file': + case "file": break; default: { break; @@ -227,8 +221,7 @@ export function convertToOpenRouterChatMessages( } // Check message-level providerOptions for preserved reasoning_details and annotations - const parsedProviderOptions = - OpenRouterProviderOptionsSchema.safeParse(providerOptions); + const parsedProviderOptions = OpenRouterProviderOptionsSchema.safeParse(providerOptions); const messageReasoningDetails = parsedProviderOptions.success ? parsedProviderOptions.data?.openrouter?.reasoning_details : undefined; @@ -247,7 +240,7 @@ export function convertToOpenRouterChatMessages( : undefined; messages.push({ - role: 'assistant', + role: "assistant", content: text, tool_calls: toolCalls.length > 0 ? toolCalls : undefined, reasoning: reasoning || undefined, @@ -259,21 +252,20 @@ export function convertToOpenRouterChatMessages( break; } - case 'tool': { + case "tool": { for (const toolResponse of content) { // Skip tool approval responses - only process tool results - if (toolResponse.type === 'tool-approval-response') { + if (toolResponse.type === "tool-approval-response") { continue; } const content = getToolResultContent(toolResponse); messages.push({ - role: 'tool', + role: "tool", tool_call_id: toolResponse.toolCallId, content, cache_control: - getCacheControl(providerOptions) ?? - getCacheControl(toolResponse.providerOptions), + getCacheControl(providerOptions) ?? getCacheControl(toolResponse.providerOptions), }); } break; @@ -290,14 +282,14 @@ export function convertToOpenRouterChatMessages( function getToolResultContent(input: LanguageModelV3ToolResultPart): string { switch (input.output.type) { - case 'text': - case 'error-text': + case "text": + case "error-text": return input.output.value; - case 'json': - case 'error-json': - case 'content': + case "json": + case "error-json": + case "content": return JSON.stringify(input.output.value); - case 'execution-denied': - return input.output.reason ?? 'Tool execution denied'; + case "execution-denied": + return input.output.reason ?? "Tool execution denied"; } } diff --git a/packages/ai-sdk-provider-2/src/chat/errors.test.ts b/packages/ai-sdk-provider-2/src/chat/errors.test.ts index 9589191..45309d4 100644 --- a/packages/ai-sdk-provider-2/src/chat/errors.test.ts +++ b/packages/ai-sdk-provider-2/src/chat/errors.test.ts @@ -1,67 +1,63 @@ -import type { LanguageModelV3Prompt } from '@ai-sdk/provider'; +import type { LanguageModelV3Prompt } from "@ai-sdk/provider"; +import { describe, expect, it } from "vitest"; -import { describe, expect, it } from 'vitest'; -import { createOpenRouter } from '../provider'; -import { createTestServer } from '../test-utils/test-server'; +import { createOpenRouter } from "../provider"; +import { createTestServer } from "../test-utils/test-server"; const TEST_PROMPT: LanguageModelV3Prompt = [ - { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, + { role: "user", content: [{ type: "text", text: "Hello" }] }, ]; const provider = createOpenRouter({ - baseURL: 'https://test.openrouter.ai/api/v1', - apiKey: 'test-api-key', + baseURL: "https://test.openrouter.ai/api/v1", + apiKey: "test-api-key", }); const server = createTestServer({ - 'https://test.openrouter.ai/api/v1/chat/completions': {}, + "https://test.openrouter.ai/api/v1/chat/completions": {}, }); -describe('HTTP 200 Error Response Handling', () => { - describe('doGenerate', () => { - it('should throw APICallError for HTTP 200 responses with error payloads', async () => { +describe("HTTP 200 Error Response Handling", () => { + describe("doGenerate", () => { + it("should throw APICallError for HTTP 200 responses with error payloads", async () => { // OpenRouter sometimes returns HTTP 200 with an error object instead of choices // This can occur for various server errors (e.g., internal errors, processing failures) - server.urls[ - 'https://test.openrouter.ai/api/v1/chat/completions' - ]!.response = { - type: 'json-value', + server.urls["https://test.openrouter.ai/api/v1/chat/completions"]!.response = { + type: "json-value", body: { error: { - message: 'Internal Server Error', + message: "Internal Server Error", code: 500, }, - user_id: 'org_abc123', + user_id: "org_abc123", }, }; - const model = provider('anthropic/claude-3.5-sonnet'); + const model = provider("anthropic/claude-3.5-sonnet"); await expect( model.doGenerate({ prompt: TEST_PROMPT, }), - ).rejects.toThrow('Internal Server Error'); + ).rejects.toThrow("Internal Server Error"); }); - it('should parse successful responses normally when no error present', async () => { + it("should parse successful responses normally when no error present", async () => { // Normal successful response without error - server.urls[ - 'https://test.openrouter.ai/api/v1/chat/completions' - ]!.response = { - type: 'json-value', + server.urls["https://test.openrouter.ai/api/v1/chat/completions"]!.response = { + type: "json-value", body: { - id: 'gen-123', - model: 'anthropic/claude-3.5-sonnet', - provider: 'Anthropic', + id: "gen-123", + model: "anthropic/claude-3.5-sonnet", + provider: "Anthropic", choices: [ { index: 0, message: { - role: 'assistant', - content: 'Hello! How can I help you?', + role: "assistant", + content: "Hello! How can I help you?", }, - finish_reason: 'stop', + finish_reason: "stop", }, ], usage: { @@ -72,7 +68,7 @@ describe('HTTP 200 Error Response Handling', () => { }, }; - const model = provider('anthropic/claude-3.5-sonnet'); + const model = provider("anthropic/claude-3.5-sonnet"); const result = await model.doGenerate({ prompt: TEST_PROMPT, @@ -80,14 +76,13 @@ describe('HTTP 200 Error Response Handling', () => { expect(result.content).toMatchObject([ { - type: 'text', - text: 'Hello! How can I help you?', + type: "text", + text: "Hello! How can I help you?", }, ]); - expect( - (result.usage.inputTokens?.total ?? 0) + - (result.usage.outputTokens?.total ?? 0), - ).toBe(18); + expect((result.usage.inputTokens?.total ?? 0) + (result.usage.outputTokens?.total ?? 0)).toBe( + 18, + ); }); }); }); diff --git a/packages/ai-sdk-provider-2/src/chat/file-parser-schema.test.ts b/packages/ai-sdk-provider-2/src/chat/file-parser-schema.test.ts index 084eabc..3e83ddd 100644 --- a/packages/ai-sdk-provider-2/src/chat/file-parser-schema.test.ts +++ b/packages/ai-sdk-provider-2/src/chat/file-parser-schema.test.ts @@ -1,35 +1,36 @@ -import { describe, expect, it } from 'vitest'; -import { OpenRouterNonStreamChatCompletionResponseSchema } from './schemas'; +import { describe, expect, it } from "vitest"; -describe('FileParser annotation schema', () => { - it('should parse response with all real API fields', () => { +import { OpenRouterNonStreamChatCompletionResponseSchema } from "./schemas"; + +describe("FileParser annotation schema", () => { + it("should parse response with all real API fields", () => { // This is based on actual API response structure (anonymized) const response = { - id: 'gen-xxx', - provider: 'Amazon Bedrock', - model: 'anthropic/claude-3.5-sonnet', - object: 'chat.completion', + id: "gen-xxx", + provider: "Amazon Bedrock", + model: "anthropic/claude-3.5-sonnet", + object: "chat.completion", created: 1763157299, choices: [ { logprobs: null, - finish_reason: 'stop', - native_finish_reason: 'stop', + finish_reason: "stop", + native_finish_reason: "stop", index: 0, message: { - role: 'assistant' as const, - content: 'Test response content', + role: "assistant" as const, + content: "Test response content", refusal: null, reasoning: null, annotations: [ { - type: 'file' as const, + type: "file" as const, file: { - hash: 'abc123', - name: '', + hash: "abc123", + name: "", content: [ { - type: 'text', + type: "text", text: '', }, ], @@ -46,43 +47,42 @@ describe('FileParser annotation schema', () => { }, }; - const result = - OpenRouterNonStreamChatCompletionResponseSchema.parse(response); + const result = OpenRouterNonStreamChatCompletionResponseSchema.parse(response); expect(result).toBeDefined(); }); - it('should parse file annotation with content array and extra fields', () => { + it("should parse file annotation with content array and extra fields", () => { const response = { - id: 'gen-test', - provider: 'Amazon Bedrock', - model: 'anthropic/claude-3.5-sonnet', - object: 'chat.completion', + id: "gen-test", + provider: "Amazon Bedrock", + model: "anthropic/claude-3.5-sonnet", + object: "chat.completion", created: 1763157061, choices: [ { logprobs: null, - finish_reason: 'stop', - native_finish_reason: 'stop', // Extra field from API + finish_reason: "stop", + native_finish_reason: "stop", // Extra field from API index: 0, message: { - role: 'assistant' as const, - content: 'Test response', + role: "assistant" as const, + content: "Test response", refusal: null, // Extra field from API reasoning: null, annotations: [ { - type: 'file' as const, + type: "file" as const, file: { - hash: '85bd49b97b7ff5be002d9f654776119f253c1cae333b49ba8f4a53da346284ba', - name: '', + hash: "85bd49b97b7ff5be002d9f654776119f253c1cae333b49ba8f4a53da346284ba", + name: "", content: [ { - type: 'text', + type: "text", text: '', }, { - type: 'text', - text: 'Some file content', + type: "text", + text: "Some file content", }, ], }, @@ -98,8 +98,7 @@ describe('FileParser annotation schema', () => { }, }; - const result = - OpenRouterNonStreamChatCompletionResponseSchema.parse(response); + const result = OpenRouterNonStreamChatCompletionResponseSchema.parse(response); // Check that parsing succeeded expect(result).toBeDefined(); @@ -107,6 +106,6 @@ describe('FileParser annotation schema', () => { // @ts-expect-error test intentionally inspects passthrough data const firstChoice = result.choices?.[0]; expect(firstChoice?.message.annotations).toBeDefined(); - expect(firstChoice?.message.annotations?.[0]?.type).toBe('file'); + expect(firstChoice?.message.annotations?.[0]?.type).toBe("file"); }); }); diff --git a/packages/ai-sdk-provider-2/src/chat/file-url-utils.ts b/packages/ai-sdk-provider-2/src/chat/file-url-utils.ts index d1ae6ae..54ef8bd 100644 --- a/packages/ai-sdk-provider-2/src/chat/file-url-utils.ts +++ b/packages/ai-sdk-provider-2/src/chat/file-url-utils.ts @@ -1,9 +1,9 @@ -import type { LanguageModelV3FilePart } from '@ai-sdk/provider'; -import type { OpenRouterAudioFormat } from '../types/openrouter-chat-completions-input'; +import type { LanguageModelV3FilePart } from "@ai-sdk/provider"; +import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils"; -import { convertUint8ArrayToBase64 } from '@ai-sdk/provider-utils'; -import { OPENROUTER_AUDIO_FORMATS } from '../types/openrouter-chat-completions-input'; -import { isUrl } from './is-url'; +import type { OpenRouterAudioFormat } from "../types/openrouter-chat-completions-input"; +import { OPENROUTER_AUDIO_FORMATS } from "../types/openrouter-chat-completions-input"; +import { isUrl } from "./is-url"; export function getFileUrl({ part, @@ -22,21 +22,18 @@ export function getFileUrl({ if ( isUrl({ url: stringUrl, - protocols: new Set(['http:', 'https:'] as const), + protocols: new Set(["http:", "https:"] as const), }) ) { return stringUrl; } - return stringUrl.startsWith('data:') + return stringUrl.startsWith("data:") ? stringUrl : `data:${part.mediaType ?? defaultMediaType};base64,${stringUrl}`; } -export function getMediaType( - dataUrl: string, - defaultMediaType: string, -): string { +export function getMediaType(dataUrl: string, defaultMediaType: string): string { const match = dataUrl.match(/^data:([^;]+)/); return match ? (match[1] ?? defaultMediaType) : defaultMediaType; } @@ -49,31 +46,31 @@ export function getBase64FromDataUrl(dataUrl: string): string { /** MIME type to format mapping for normalization */ export const MIME_TO_FORMAT: Record = { // MP3 variants - mpeg: 'mp3', - mp3: 'mp3', + mpeg: "mp3", + mp3: "mp3", // WAV variants - 'x-wav': 'wav', - wave: 'wav', - wav: 'wav', + "x-wav": "wav", + wave: "wav", + wav: "wav", // OGG variants - ogg: 'ogg', - vorbis: 'ogg', + ogg: "ogg", + vorbis: "ogg", // AAC variants - aac: 'aac', - 'x-aac': 'aac', + aac: "aac", + "x-aac": "aac", // M4A variants - m4a: 'm4a', - 'x-m4a': 'm4a', - mp4: 'm4a', + m4a: "m4a", + "x-m4a": "m4a", + mp4: "m4a", // AIFF variants - aiff: 'aiff', - 'x-aiff': 'aiff', + aiff: "aiff", + "x-aiff": "aiff", // FLAC - flac: 'flac', - 'x-flac': 'flac', + flac: "flac", + "x-flac": "flac", // PCM variants - pcm16: 'pcm16', - pcm24: 'pcm24', + pcm16: "pcm16", + pcm24: "pcm24", }; /** @@ -106,14 +103,14 @@ export function getInputAudioData(part: LanguageModelV3FilePart): { } { const fileData = getFileUrl({ part, - defaultMediaType: 'audio/mpeg', + defaultMediaType: "audio/mpeg", }); // OpenRouter's input_audio doesn't support URLs directly if ( isUrl({ url: fileData, - protocols: new Set(['http:', 'https:'] as const), + protocols: new Set(["http:", "https:"] as const), }) ) { throw new Error( @@ -131,14 +128,14 @@ export function getInputAudioData(part: LanguageModelV3FilePart): { const data = getBase64FromDataUrl(fileData); // Map media type to format - const mediaType = part.mediaType || 'audio/mpeg'; - const rawFormat = mediaType.replace('audio/', ''); + const mediaType = part.mediaType || "audio/mpeg"; + const rawFormat = mediaType.replace("audio/", ""); // Normalize format names for OpenRouter using MIME type mapping const format = MIME_TO_FORMAT[rawFormat]; if (format === undefined) { - const supportedList = OPENROUTER_AUDIO_FORMATS.join(', '); + const supportedList = OPENROUTER_AUDIO_FORMATS.join(", "); throw new Error( `Unsupported audio format: "${mediaType}"\n\n` + `OpenRouter supports the following audio formats: ${supportedList}\n\n` + diff --git a/packages/ai-sdk-provider-2/src/chat/get-tool-choice.ts b/packages/ai-sdk-provider-2/src/chat/get-tool-choice.ts index bba13d8..edde4d6 100644 --- a/packages/ai-sdk-provider-2/src/chat/get-tool-choice.ts +++ b/packages/ai-sdk-provider-2/src/chat/get-tool-choice.ts @@ -1,14 +1,13 @@ -import type { LanguageModelV3ToolChoice } from '@ai-sdk/provider'; - -import { InvalidArgumentError } from '@ai-sdk/provider'; -import { z } from 'zod/v4'; +import type { LanguageModelV3ToolChoice } from "@ai-sdk/provider"; +import { InvalidArgumentError } from "@ai-sdk/provider"; +import { z } from "zod/v4"; const ChatCompletionToolChoiceSchema = z.union([ - z.literal('auto'), - z.literal('none'), - z.literal('required'), + z.literal("auto"), + z.literal("none"), + z.literal("required"), z.object({ - type: z.literal('function'), + type: z.literal("function"), function: z.object({ name: z.string(), }), @@ -21,20 +20,20 @@ export function getChatCompletionToolChoice( toolChoice: LanguageModelV3ToolChoice, ): ChatCompletionToolChoice { switch (toolChoice.type) { - case 'auto': - case 'none': - case 'required': + case "auto": + case "none": + case "required": return toolChoice.type; - case 'tool': { + case "tool": { return { - type: 'function', + type: "function", function: { name: toolChoice.toolName }, }; } default: { toolChoice satisfies never; throw new InvalidArgumentError({ - argument: 'toolChoice', + argument: "toolChoice", message: `Invalid tool choice type: ${JSON.stringify(toolChoice)}`, }); } diff --git a/packages/ai-sdk-provider-2/src/chat/index.test.ts b/packages/ai-sdk-provider-2/src/chat/index.test.ts index 14e1658..f5517cd 100644 --- a/packages/ai-sdk-provider-2/src/chat/index.test.ts +++ b/packages/ai-sdk-provider-2/src/chat/index.test.ts @@ -1,115 +1,109 @@ -import type { - LanguageModelV3Prompt, - LanguageModelV3StreamPart, -} from '@ai-sdk/provider'; -import type { JSONSchema7 } from 'json-schema'; -import type { ImageResponse } from '../schemas/image'; -import type { ReasoningDetailUnion } from '../schemas/reasoning-details'; - -import { vi } from 'vitest'; -import { createOpenRouter } from '../provider'; -import { ReasoningDetailType } from '../schemas/reasoning-details'; -import { - convertReadableStreamToArray, - createTestServer, -} from '../test-utils/test-server'; - -vi.mock('@/src/version', () => ({ - VERSION: '0.0.0-test', +import type { LanguageModelV3Prompt, LanguageModelV3StreamPart } from "@ai-sdk/provider"; +import type { JSONSchema7 } from "json-schema"; +import { vi } from "vitest"; + +import type { ImageResponse } from "../schemas/image"; +import type { ReasoningDetailUnion } from "../schemas/reasoning-details"; +import { createOpenRouter } from "../provider"; +import { ReasoningDetailType } from "../schemas/reasoning-details"; +import { convertReadableStreamToArray, createTestServer } from "../test-utils/test-server"; + +vi.mock("../version", () => ({ + VERSION: "0.0.0-test", })); const TEST_PROMPT: LanguageModelV3Prompt = [ - { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, + { role: "user", content: [{ type: "text", text: "Hello" }] }, ]; const TEST_LOGPROBS = { content: [ { - token: 'Hello', + token: "Hello", logprob: -0.0009994634, top_logprobs: [ { - token: 'Hello', + token: "Hello", logprob: -0.0009994634, }, ], }, { - token: '!', + token: "!", logprob: -0.13410144, top_logprobs: [ { - token: '!', + token: "!", logprob: -0.13410144, }, ], }, { - token: ' How', + token: " How", logprob: -0.0009250381, top_logprobs: [ { - token: ' How', + token: " How", logprob: -0.0009250381, }, ], }, { - token: ' can', + token: " can", logprob: -0.047709424, top_logprobs: [ { - token: ' can', + token: " can", logprob: -0.047709424, }, ], }, { - token: ' I', + token: " I", logprob: -0.000009014684, top_logprobs: [ { - token: ' I', + token: " I", logprob: -0.000009014684, }, ], }, { - token: ' assist', + token: " assist", logprob: -0.009125131, top_logprobs: [ { - token: ' assist', + token: " assist", logprob: -0.009125131, }, ], }, { - token: ' you', + token: " you", logprob: -0.0000066306106, top_logprobs: [ { - token: ' you', + token: " you", logprob: -0.0000066306106, }, ], }, { - token: ' today', + token: " today", logprob: -0.00011093382, top_logprobs: [ { - token: ' today', + token: " today", logprob: -0.00011093382, }, ], }, { - token: '?', + token: "?", logprob: -0.00004596782, top_logprobs: [ { - token: '?', + token: "?", logprob: -0.00004596782, }, ], @@ -119,51 +113,52 @@ const TEST_LOGPROBS = { const TEST_IMAGE_URL = `data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABAAAAAQACAIAAADwf7zUAAAAiXpUWHRSYXcgcHJvZmlsZSB0eXBlIGlwdGMAAAiZTYwxDgIxDAT7vOKekDjrtV1T0VHwgbtcIiEhgfh/QaDgmGlWW0w6X66n5fl6jNu9p+ULkapDENgzpj+Kl5aFfa6KnYWgSjZjGOiSYRxTY/v8KIijI==`; -const TEST_IMAGE_BASE64 = TEST_IMAGE_URL.split(',')[1]!; +// eslint-disable-next-line @typescript-eslint/no-non-null-assertion +const TEST_IMAGE_BASE64 = TEST_IMAGE_URL.split(",")[1]!; const provider = createOpenRouter({ - apiKey: 'test-api-key', - compatibility: 'strict', + apiKey: "test-api-key", + compatibility: "strict", }); -const model = provider.chat('anthropic/claude-3.5-sonnet'); +const model = provider.chat("anthropic/claude-3.5-sonnet"); function isReasoningDeltaPart(part: LanguageModelV3StreamPart): part is Extract< LanguageModelV3StreamPart, { - type: 'reasoning-delta'; + type: "reasoning-delta"; } > { - return part.type === 'reasoning-delta'; + return part.type === "reasoning-delta"; } function isReasoningStartPart(part: LanguageModelV3StreamPart): part is Extract< LanguageModelV3StreamPart, { - type: 'reasoning-start'; + type: "reasoning-start"; } > { - return part.type === 'reasoning-start'; + return part.type === "reasoning-start"; } function isTextDeltaPart(part: LanguageModelV3StreamPart): part is Extract< LanguageModelV3StreamPart, { - type: 'text-delta'; + type: "text-delta"; } > { - return part.type === 'text-delta'; + return part.type === "text-delta"; } -describe('doGenerate', () => { +describe("doGenerate", () => { const server = createTestServer({ - 'https://openrouter.ai/api/v1/chat/completions': { - response: { type: 'json-value', body: {} }, + "https://openrouter.ai/api/v1/chat/completions": { + response: { type: "json-value", body: {} }, }, }); function prepareJsonResponse({ - content = '', + content = "", reasoning, reasoning_details, images, @@ -174,7 +169,7 @@ describe('doGenerate', () => { completion_tokens: 30, }, logprobs = null, - finish_reason = 'stop', + finish_reason = "stop", }: { content?: string; reasoning?: string; @@ -182,7 +177,7 @@ describe('doGenerate', () => { images?: Array; tool_calls?: Array<{ id: string; - type: 'function'; + type: "function"; function: { name: string; arguments: string }; }>; usage?: { @@ -201,18 +196,19 @@ describe('doGenerate', () => { } | null; finish_reason?: string; } = {}) { - server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { - type: 'json-value', + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + type: "json-value", body: { - id: 'chatcmpl-95ZTZkhr0mHNKqerQfiwkuox3PHAd', - object: 'chat.completion', + id: "chatcmpl-95ZTZkhr0mHNKqerQfiwkuox3PHAd", + object: "chat.completion", created: 1711115037, - model: 'gpt-3.5-turbo-0125', + model: "gpt-3.5-turbo-0125", choices: [ { index: 0, message: { - role: 'assistant', + role: "assistant", content, reasoning, reasoning_details, @@ -224,27 +220,27 @@ describe('doGenerate', () => { }, ], usage, - system_fingerprint: 'fp_3bc1b5746c', + system_fingerprint: "fp_3bc1b5746c", }, }; } - it('should extract text response', async () => { - prepareJsonResponse({ content: 'Hello, World!' }); + it("should extract text response", async () => { + prepareJsonResponse({ content: "Hello, World!" }); const result = await model.doGenerate({ prompt: TEST_PROMPT, }); expect(result.content[0]).toStrictEqual({ - type: 'text', - text: 'Hello, World!', + type: "text", + text: "Hello, World!", }); }); - it('should extract usage', async () => { + it("should extract usage", async () => { prepareJsonResponse({ - content: '', + content: "", usage: { prompt_tokens: 20, total_tokens: 25, completion_tokens: 5 }, }); @@ -267,20 +263,20 @@ describe('doGenerate', () => { }); }); - it('should extract logprobs', async () => { + it("should extract logprobs", async () => { prepareJsonResponse({ logprobs: TEST_LOGPROBS, }); - await provider.chat('openai/gpt-3.5-turbo', { logprobs: 1 }).doGenerate({ + await provider.chat("openai/gpt-3.5-turbo", { logprobs: 1 }).doGenerate({ prompt: TEST_PROMPT, }); }); - it('should extract finish reason', async () => { + it("should extract finish reason", async () => { prepareJsonResponse({ - content: '', - finish_reason: 'stop', + content: "", + finish_reason: "stop", }); const response = await model.doGenerate({ @@ -288,15 +284,15 @@ describe('doGenerate', () => { }); expect(response.finishReason).toStrictEqual({ - unified: 'stop', - raw: 'stop', + unified: "stop", + raw: "stop", }); }); - it('should support unknown finish reason', async () => { + it("should support unknown finish reason", async () => { prepareJsonResponse({ - content: '', - finish_reason: 'eos', + content: "", + finish_reason: "eos", }); const response = await model.doGenerate({ @@ -304,16 +300,16 @@ describe('doGenerate', () => { }); expect(response.finishReason).toStrictEqual({ - unified: 'other', - raw: 'eos', + unified: "other", + raw: "eos", }); }); - it('should extract reasoning content from reasoning field', async () => { + it("should extract reasoning content from reasoning field", async () => { prepareJsonResponse({ - content: 'Hello!', + content: "Hello!", reasoning: - 'I need to think about this... The user said hello, so I should respond with a greeting.', + "I need to think about this... The user said hello, so I should respond with a greeting.", }); const result = await model.doGenerate({ @@ -322,27 +318,27 @@ describe('doGenerate', () => { expect(result.content).toStrictEqual([ { - type: 'reasoning', - text: 'I need to think about this... The user said hello, so I should respond with a greeting.', + type: "reasoning", + text: "I need to think about this... The user said hello, so I should respond with a greeting.", }, { - type: 'text', - text: 'Hello!', + type: "text", + text: "Hello!", }, ]); }); - it('should extract reasoning content from reasoning_details', async () => { + it("should extract reasoning content from reasoning_details", async () => { prepareJsonResponse({ - content: 'Hello!', + content: "Hello!", reasoning_details: [ { type: ReasoningDetailType.Text, - text: 'Let me analyze this request...', + text: "Let me analyze this request...", }, { type: ReasoningDetailType.Summary, - summary: 'The user wants a greeting response.', + summary: "The user wants a greeting response.", }, ], }); @@ -353,47 +349,47 @@ describe('doGenerate', () => { expect(result.content).toStrictEqual([ { - type: 'reasoning', - text: 'Let me analyze this request...', + type: "reasoning", + text: "Let me analyze this request...", providerMetadata: { openrouter: { reasoning_details: [ { - type: 'reasoning.text', - text: 'Let me analyze this request...', + type: "reasoning.text", + text: "Let me analyze this request...", }, ], }, }, }, { - type: 'reasoning', - text: 'The user wants a greeting response.', + type: "reasoning", + text: "The user wants a greeting response.", providerMetadata: { openrouter: { reasoning_details: [ { - type: 'reasoning.summary', - summary: 'The user wants a greeting response.', + type: "reasoning.summary", + summary: "The user wants a greeting response.", }, ], }, }, }, { - type: 'text', - text: 'Hello!', + type: "text", + text: "Hello!", }, ]); }); - it('should handle encrypted reasoning details', async () => { + it("should handle encrypted reasoning details", async () => { prepareJsonResponse({ - content: 'Hello!', + content: "Hello!", reasoning_details: [ { type: ReasoningDetailType.Encrypted, - data: 'encrypted_reasoning_data_here', + data: "encrypted_reasoning_data_here", }, ], }); @@ -404,38 +400,38 @@ describe('doGenerate', () => { expect(result.content).toStrictEqual([ { - type: 'reasoning', - text: '[REDACTED]', + type: "reasoning", + text: "[REDACTED]", providerMetadata: { openrouter: { reasoning_details: [ { - type: 'reasoning.encrypted', - data: 'encrypted_reasoning_data_here', + type: "reasoning.encrypted", + data: "encrypted_reasoning_data_here", }, ], }, }, }, { - type: 'text', - text: 'Hello!', + type: "text", + text: "Hello!", }, ]); }); - it('should prioritize reasoning_details over reasoning when both are present', async () => { + it("should prioritize reasoning_details over reasoning when both are present", async () => { prepareJsonResponse({ - content: 'Hello!', - reasoning: 'This should be ignored when reasoning_details is present', + content: "Hello!", + reasoning: "This should be ignored when reasoning_details is present", reasoning_details: [ { type: ReasoningDetailType.Text, - text: 'Processing from reasoning_details...', + text: "Processing from reasoning_details...", }, { type: ReasoningDetailType.Summary, - summary: 'Summary from reasoning_details', + summary: "Summary from reasoning_details", }, ], }); @@ -446,55 +442,55 @@ describe('doGenerate', () => { expect(result.content).toStrictEqual([ { - type: 'reasoning', - text: 'Processing from reasoning_details...', + type: "reasoning", + text: "Processing from reasoning_details...", providerMetadata: { openrouter: { reasoning_details: [ { - type: 'reasoning.text', - text: 'Processing from reasoning_details...', + type: "reasoning.text", + text: "Processing from reasoning_details...", }, ], }, }, }, { - type: 'reasoning', - text: 'Summary from reasoning_details', + type: "reasoning", + text: "Summary from reasoning_details", providerMetadata: { openrouter: { reasoning_details: [ { - type: 'reasoning.summary', - summary: 'Summary from reasoning_details', + type: "reasoning.summary", + summary: "Summary from reasoning_details", }, ], }, }, }, { - type: 'text', - text: 'Hello!', + type: "text", + text: "Hello!", }, ]); // Verify that the reasoning field content is not included expect(result.content).not.toContainEqual({ - type: 'reasoning', - text: 'This should be ignored when reasoning_details is present', + type: "reasoning", + text: "This should be ignored when reasoning_details is present", }); }); - it('should override finishReason to tool-calls when tool calls and encrypted reasoning are present', async () => { + it("should override finishReason to tool-calls when tool calls and encrypted reasoning are present", async () => { prepareJsonResponse({ - content: '', + content: "", tool_calls: [ { - id: 'call_123', - type: 'function', + id: "call_123", + type: "function", function: { - name: 'get_weather', + name: "get_weather", arguments: '{"location":"San Francisco"}', }, }, @@ -502,11 +498,11 @@ describe('doGenerate', () => { reasoning_details: [ { type: ReasoningDetailType.Encrypted, - data: 'encrypted_reasoning_data_here', + data: "encrypted_reasoning_data_here", }, ], // Gemini 3 returns 'stop' instead of 'tool_calls' when using thoughtSignature - finish_reason: 'stop', + finish_reason: "stop", }); const result = await model.doGenerate({ @@ -515,236 +511,241 @@ describe('doGenerate', () => { // Should override to 'tool-calls' when encrypted reasoning + tool calls + stop expect(result.finishReason).toStrictEqual({ - unified: 'tool-calls', - raw: 'stop', + unified: "tool-calls", + raw: "stop", }); // Should still have the tool call in content expect(result.content).toContainEqual( expect.objectContaining({ - type: 'tool-call', - toolCallId: 'call_123', - toolName: 'get_weather', + type: "tool-call", + toolCallId: "call_123", + toolName: "get_weather", }), ); }); - it('should pass the model and the messages', async () => { - prepareJsonResponse({ content: '' }); + it("should pass the model and the messages", async () => { + prepareJsonResponse({ content: "" }); await model.doGenerate({ prompt: TEST_PROMPT, }); + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ - model: 'anthropic/claude-3.5-sonnet', - messages: [{ role: 'user', content: 'Hello' }], + model: "anthropic/claude-3.5-sonnet", + messages: [{ role: "user", content: "Hello" }], }); }); - it('should pass the models array when provided', async () => { - prepareJsonResponse({ content: '' }); + it("should pass the models array when provided", async () => { + prepareJsonResponse({ content: "" }); - const customModel = provider.chat('anthropic/claude-3.5-sonnet', { - models: ['anthropic/claude-2', 'gryphe/mythomax-l2-13b'], + const customModel = provider.chat("anthropic/claude-3.5-sonnet", { + models: ["anthropic/claude-2", "gryphe/mythomax-l2-13b"], }); await customModel.doGenerate({ prompt: TEST_PROMPT, }); + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ - model: 'anthropic/claude-3.5-sonnet', - models: ['anthropic/claude-2', 'gryphe/mythomax-l2-13b'], - messages: [{ role: 'user', content: 'Hello' }], + model: "anthropic/claude-3.5-sonnet", + models: ["anthropic/claude-2", "gryphe/mythomax-l2-13b"], + messages: [{ role: "user", content: "Hello" }], }); }); - it('should pass settings', async () => { + it("should pass settings", async () => { prepareJsonResponse(); await provider - .chat('openai/gpt-3.5-turbo', { + .chat("openai/gpt-3.5-turbo", { logitBias: { 50256: -100 }, logprobs: 2, parallelToolCalls: false, - user: 'test-user-id', + user: "test-user-id", }) .doGenerate({ prompt: TEST_PROMPT, }); + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ - model: 'openai/gpt-3.5-turbo', - messages: [{ role: 'user', content: 'Hello' }], + model: "openai/gpt-3.5-turbo", + messages: [{ role: "user", content: "Hello" }], logprobs: true, top_logprobs: 2, logit_bias: { 50256: -100 }, parallel_tool_calls: false, - user: 'test-user-id', + user: "test-user-id", }); }); - it('should pass tools and toolChoice', async () => { - prepareJsonResponse({ content: '' }); + it("should pass tools and toolChoice", async () => { + prepareJsonResponse({ content: "" }); await model.doGenerate({ prompt: TEST_PROMPT, tools: [ { - type: 'function', - name: 'test-tool', - description: 'Test tool', + type: "function", + name: "test-tool", + description: "Test tool", inputSchema: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], + type: "object", + properties: { value: { type: "string" } }, + required: ["value"], additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', + $schema: "http://json-schema.org/draft-07/schema#", }, }, ], toolChoice: { - type: 'tool', - toolName: 'test-tool', + type: "tool", + toolName: "test-tool", }, }); + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ - model: 'anthropic/claude-3.5-sonnet', - messages: [{ role: 'user', content: 'Hello' }], + model: "anthropic/claude-3.5-sonnet", + messages: [{ role: "user", content: "Hello" }], tools: [ { - type: 'function', + type: "function", function: { - name: 'test-tool', - description: 'Test tool', + name: "test-tool", + description: "Test tool", parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], + type: "object", + properties: { value: { type: "string" } }, + required: ["value"], additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', + $schema: "http://json-schema.org/draft-07/schema#", }, }, }, ], tool_choice: { - type: 'function', - function: { name: 'test-tool' }, + type: "function", + function: { name: "test-tool" }, }, }); }); - it('should pass headers', async () => { - prepareJsonResponse({ content: '' }); + it("should pass headers", async () => { + prepareJsonResponse({ content: "" }); const provider = createOpenRouter({ - apiKey: 'test-api-key', + apiKey: "test-api-key", headers: { - 'Custom-Provider-Header': 'provider-header-value', + "Custom-Provider-Header": "provider-header-value", }, }); - await provider.chat('openai/gpt-3.5-turbo').doGenerate({ + await provider.chat("openai/gpt-3.5-turbo").doGenerate({ prompt: TEST_PROMPT, headers: { - 'Custom-Request-Header': 'request-header-value', + "Custom-Request-Header": "request-header-value", }, }); + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion const requestHeaders = server.calls[0]!.requestHeaders; expect(requestHeaders).toMatchObject({ - authorization: 'Bearer test-api-key', - 'content-type': 'application/json', - 'custom-provider-header': 'provider-header-value', - 'custom-request-header': 'request-header-value', + authorization: "Bearer test-api-key", + "content-type": "application/json", + "custom-provider-header": "provider-header-value", + "custom-request-header": "request-header-value", }); - expect(requestHeaders['user-agent']).toContain( - 'ai-sdk/openrouter/0.0.0-test', - ); + expect(requestHeaders["user-agent"]).toContain("ai-sdk/openrouter/0.0.0-test"); }); - it('should pass responseFormat for JSON schema structured outputs', async () => { + it("should pass responseFormat for JSON schema structured outputs", async () => { prepareJsonResponse({ content: '{"name": "John", "age": 30}' }); const testSchema: JSONSchema7 = { - type: 'object', + type: "object", properties: { - name: { type: 'string' }, - age: { type: 'number' }, + name: { type: "string" }, + age: { type: "number" }, }, - required: ['name', 'age'], + required: ["name", "age"], additionalProperties: false, }; await model.doGenerate({ prompt: TEST_PROMPT, responseFormat: { - type: 'json', + type: "json", schema: testSchema, - name: 'PersonResponse', - description: 'A person object', + name: "PersonResponse", + description: "A person object", }, }); + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ - model: 'anthropic/claude-3.5-sonnet', - messages: [{ role: 'user', content: 'Hello' }], + model: "anthropic/claude-3.5-sonnet", + messages: [{ role: "user", content: "Hello" }], response_format: { - type: 'json_schema', + type: "json_schema", json_schema: { schema: testSchema, strict: true, - name: 'PersonResponse', - description: 'A person object', + name: "PersonResponse", + description: "A person object", }, }, }); }); - it('should use default name when name is not provided in responseFormat', async () => { + it("should use default name when name is not provided in responseFormat", async () => { prepareJsonResponse({ content: '{"name": "John", "age": 30}' }); const testSchema: JSONSchema7 = { - type: 'object', + type: "object", properties: { - name: { type: 'string' }, - age: { type: 'number' }, + name: { type: "string" }, + age: { type: "number" }, }, - required: ['name', 'age'], + required: ["name", "age"], additionalProperties: false, }; await model.doGenerate({ prompt: TEST_PROMPT, responseFormat: { - type: 'json', + type: "json", schema: testSchema, }, }); + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ - model: 'anthropic/claude-3.5-sonnet', - messages: [{ role: 'user', content: 'Hello' }], + model: "anthropic/claude-3.5-sonnet", + messages: [{ role: "user", content: "Hello" }], response_format: { - type: 'json_schema', + type: "json_schema", json_schema: { schema: testSchema, strict: true, - name: 'response', + name: "response", }, }, }); }); - it('should pass images', async () => { + it("should pass images", async () => { prepareJsonResponse({ - content: '', + content: "", images: [ { - type: 'image_url', + type: "image_url", image_url: { url: TEST_IMAGE_URL }, }, ], @@ -757,18 +758,18 @@ describe('doGenerate', () => { expect(result.content).toStrictEqual([ { - type: 'file', - mediaType: 'image/png', + type: "file", + mediaType: "image/png", data: TEST_IMAGE_BASE64, }, ]); }); }); -describe('doStream', () => { +describe("doStream", () => { const server = createTestServer({ - 'https://openrouter.ai/api/v1/chat/completions': { - response: { type: 'json-value', body: {} }, + "https://openrouter.ai/api/v1/chat/completions": { + response: { type: "json-value", body: {} }, }, }); @@ -780,7 +781,7 @@ describe('doStream', () => { completion_tokens: 227, }, logprobs = null, - finish_reason = 'stop', + finish_reason = "stop", }: { content: string[]; usage?: { @@ -809,8 +810,9 @@ describe('doStream', () => { } | null; finish_reason?: string; }) { - server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { - type: 'stream-chunks', + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + type: "stream-chunks", chunks: [ `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` + `"system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null}]}\n\n`, @@ -823,15 +825,15 @@ describe('doStream', () => { `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":${JSON.stringify( usage, )}}\n\n`, - 'data: [DONE]\n\n', + "data: [DONE]\n\n", ], }; } - it('should stream text deltas', async () => { + it("should stream text deltas", async () => { prepareStreamResponse({ - content: ['Hello', ', ', 'World!'], - finish_reason: 'stop', + content: ["Hello", ", ", "World!"], + finish_reason: "stop", usage: { prompt_tokens: 17, total_tokens: 244, @@ -848,64 +850,64 @@ describe('doStream', () => { const elements = await convertReadableStreamToArray(stream); expect(elements).toStrictEqual([ { - type: 'response-metadata', - id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", }, { - type: 'response-metadata', - modelId: 'gpt-3.5-turbo-0613', + type: "response-metadata", + modelId: "gpt-3.5-turbo-0613", }, { - type: 'response-metadata', - id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", }, { - type: 'response-metadata', - modelId: 'gpt-3.5-turbo-0613', + type: "response-metadata", + modelId: "gpt-3.5-turbo-0613", }, - { type: 'text-start', id: expect.any(String) }, - { type: 'text-delta', delta: 'Hello', id: expect.any(String) }, + { type: "text-start", id: expect.any(String) }, + { type: "text-delta", delta: "Hello", id: expect.any(String) }, { - type: 'response-metadata', - id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", }, { - type: 'response-metadata', - modelId: 'gpt-3.5-turbo-0613', + type: "response-metadata", + modelId: "gpt-3.5-turbo-0613", }, - { type: 'text-delta', delta: ', ', id: expect.any(String) }, + { type: "text-delta", delta: ", ", id: expect.any(String) }, { - type: 'response-metadata', - id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", }, { - type: 'response-metadata', - modelId: 'gpt-3.5-turbo-0613', + type: "response-metadata", + modelId: "gpt-3.5-turbo-0613", }, - { type: 'text-delta', delta: 'World!', id: expect.any(String) }, + { type: "text-delta", delta: "World!", id: expect.any(String) }, { - type: 'response-metadata', - id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", }, { - type: 'response-metadata', - modelId: 'gpt-3.5-turbo-0613', + type: "response-metadata", + modelId: "gpt-3.5-turbo-0613", }, { - type: 'response-metadata', - id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", }, { - type: 'response-metadata', - modelId: 'gpt-3.5-turbo-0613', + type: "response-metadata", + modelId: "gpt-3.5-turbo-0613", }, { - type: 'text-end', + type: "text-end", id: expect.any(String), }, { - type: 'finish', - finishReason: { unified: 'stop', raw: 'stop' }, + type: "finish", + finishReason: { unified: "stop", raw: "stop" }, providerMetadata: { openrouter: { @@ -934,9 +936,9 @@ describe('doStream', () => { ]); }); - it('should include upstream inference cost in finish metadata when provided', async () => { + it("should include upstream inference cost in finish metadata when provided", async () => { prepareStreamResponse({ - content: ['Hello'], + content: ["Hello"], usage: { prompt_tokens: 17, total_tokens: 244, @@ -951,14 +953,10 @@ describe('doStream', () => { prompt: TEST_PROMPT, }); - const elements = (await convertReadableStreamToArray( - stream, - )) as LanguageModelV3StreamPart[]; + const elements = (await convertReadableStreamToArray(stream)) as LanguageModelV3StreamPart[]; const finishChunk = elements.find( - ( - chunk, - ): chunk is Extract => - chunk.type === 'finish', + (chunk): chunk is Extract => + chunk.type === "finish", ); const openrouterUsage = ( finishChunk?.providerMetadata?.openrouter as { @@ -973,9 +971,9 @@ describe('doStream', () => { }); }); - it('should handle both normal cost and upstream inference cost in finish metadata when both are provided', async () => { + it("should handle both normal cost and upstream inference cost in finish metadata when both are provided", async () => { prepareStreamResponse({ - content: ['Hello'], + content: ["Hello"], usage: { prompt_tokens: 17, total_tokens: 244, @@ -991,14 +989,10 @@ describe('doStream', () => { prompt: TEST_PROMPT, }); - const elements = (await convertReadableStreamToArray( - stream, - )) as LanguageModelV3StreamPart[]; + const elements = (await convertReadableStreamToArray(stream)) as LanguageModelV3StreamPart[]; const finishChunk = elements.find( - ( - chunk, - ): chunk is Extract => - chunk.type === 'finish', + (chunk): chunk is Extract => + chunk.type === "finish", ); const openrouterUsage = ( finishChunk?.providerMetadata?.openrouter as { @@ -1014,11 +1008,12 @@ describe('doStream', () => { expect(openrouterUsage?.cost).toBe(0.0042); }); - it('should prioritize reasoning_details over reasoning when both are present in streaming', async () => { + it("should prioritize reasoning_details over reasoning when both are present in streaming", async () => { // This test verifies that when the API returns both 'reasoning' and 'reasoning_details' fields, // we prioritize reasoning_details and ignore the reasoning field to avoid duplicates. - server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { - type: 'stream-chunks', + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + type: "stream-chunks", chunks: [ // First chunk: both reasoning and reasoning_details with different content `data: {"id":"chatcmpl-reasoning","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + @@ -1047,7 +1042,7 @@ describe('doStream', () => { `"logprobs":null,"finish_reason":"stop"}]}\n\n`, `data: {"id":"chatcmpl-reasoning","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":17,"completion_tokens":30,"total_tokens":47}}\n\n`, - 'data: [DONE]\n\n', + "data: [DONE]\n\n", ], }; @@ -1060,9 +1055,9 @@ describe('doStream', () => { // Filter for reasoning-related elements const reasoningElements = elements.filter( (el) => - el.type === 'reasoning-start' || - el.type === 'reasoning-delta' || - el.type === 'reasoning-end', + el.type === "reasoning-start" || + el.type === "reasoning-delta" || + el.type === "reasoning-end", ); // Debug output to see what we're getting @@ -1074,20 +1069,18 @@ describe('doStream', () => { expect(reasoningElements).toHaveLength(6); // Verify the content comes from reasoning_details, not reasoning field - const reasoningDeltas = reasoningElements - .filter(isReasoningDeltaPart) - .map((el) => el.delta); + const reasoningDeltas = reasoningElements.filter(isReasoningDeltaPart).map((el) => el.delta); expect(reasoningDeltas).toEqual([ - 'Let me think about this...', // from reasoning_details text - 'User wants a greeting', // from reasoning_details summary - '[REDACTED]', // from reasoning_details encrypted - 'This reasoning is used', // from reasoning field (no reasoning_details) + "Let me think about this...", // from reasoning_details text + "User wants a greeting", // from reasoning_details summary + "[REDACTED]", // from reasoning_details encrypted + "This reasoning is used", // from reasoning field (no reasoning_details) ]); // Verify that "This should be ignored..." and "Also ignored" are NOT in the output - expect(reasoningDeltas).not.toContain('This should be ignored...'); - expect(reasoningDeltas).not.toContain('Also ignored'); + expect(reasoningDeltas).not.toContain("This should be ignored..."); + expect(reasoningDeltas).not.toContain("Also ignored"); // Verify that reasoning-delta chunks include providerMetadata with reasoning_details const reasoningDeltaElements = elements.filter(isReasoningDeltaPart); @@ -1098,7 +1091,7 @@ describe('doStream', () => { reasoning_details: [ { type: ReasoningDetailType.Text, - text: 'Let me think about this...', + text: "Let me think about this...", }, ], }, @@ -1110,11 +1103,11 @@ describe('doStream', () => { reasoning_details: [ { type: ReasoningDetailType.Summary, - summary: 'User wants a greeting', + summary: "User wants a greeting", }, { type: ReasoningDetailType.Encrypted, - data: 'secret', + data: "secret", }, ], }, @@ -1125,11 +1118,11 @@ describe('doStream', () => { reasoning_details: [ { type: ReasoningDetailType.Summary, - summary: 'User wants a greeting', + summary: "User wants a greeting", }, { type: ReasoningDetailType.Encrypted, - data: 'secret', + data: "secret", }, ], }, @@ -1139,11 +1132,12 @@ describe('doStream', () => { expect(reasoningDeltaElements[3]?.providerMetadata).toBeUndefined(); }); - it('should emit reasoning_details in providerMetadata for all reasoning delta chunks', async () => { + it("should emit reasoning_details in providerMetadata for all reasoning delta chunks", async () => { // This test verifies that reasoning_details are included in providerMetadata // for all reasoning-delta chunks, enabling users to accumulate them for multi-turn conversations - server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { - type: 'stream-chunks', + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + type: "stream-chunks", chunks: [ // First chunk: reasoning_details with Text type `data: {"id":"chatcmpl-metadata-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + @@ -1166,7 +1160,7 @@ describe('doStream', () => { `"logprobs":null,"finish_reason":"stop"}]}\n\n`, `data: {"id":"chatcmpl-metadata-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":17,"completion_tokens":30,"total_tokens":47}}\n\n`, - 'data: [DONE]\n\n', + "data: [DONE]\n\n", ], }; @@ -1186,7 +1180,7 @@ describe('doStream', () => { reasoning_details: [ { type: ReasoningDetailType.Text, - text: 'First reasoning chunk', + text: "First reasoning chunk", }, ], }, @@ -1197,7 +1191,7 @@ describe('doStream', () => { reasoning_details: [ { type: ReasoningDetailType.Summary, - summary: 'Summary reasoning', + summary: "Summary reasoning", }, ], }, @@ -1208,7 +1202,7 @@ describe('doStream', () => { reasoning_details: [ { type: ReasoningDetailType.Encrypted, - data: 'encrypted_data', + data: "encrypted_data", }, ], }, @@ -1222,18 +1216,19 @@ describe('doStream', () => { reasoning_details: [ { type: ReasoningDetailType.Text, - text: 'First reasoning chunk', + text: "First reasoning chunk", }, ], }, }); }); - it('should maintain correct reasoning order when content comes after reasoning (issue #7824)', async () => { + it("should maintain correct reasoning order when content comes after reasoning (issue #7824)", async () => { // This test reproduces the issue where reasoning appears first but then gets "pushed down" // by content that comes later in the stream - server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { - type: 'stream-chunks', + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + type: "stream-chunks", chunks: [ // First chunk: Start with reasoning `data: {"id":"chatcmpl-order-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + @@ -1264,7 +1259,7 @@ describe('doStream', () => { `"logprobs":null,"finish_reason":"stop"}]}\n\n`, `data: {"id":"chatcmpl-order-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":17,"completion_tokens":30,"total_tokens":47}}\n\n`, - 'data: [DONE]\n\n', + "data: [DONE]\n\n", ], }; @@ -1285,34 +1280,33 @@ describe('doStream', () => { const streamOrder = elements.map((el) => el.type); // Find the positions of key events - const reasoningStartIndex = streamOrder.indexOf('reasoning-start'); - const reasoningEndIndex = streamOrder.indexOf('reasoning-end'); - const textStartIndex = streamOrder.indexOf('text-start'); + const reasoningStartIndex = streamOrder.indexOf("reasoning-start"); + const reasoningEndIndex = streamOrder.indexOf("reasoning-end"); + const textStartIndex = streamOrder.indexOf("text-start"); // Reasoning should come before text and end before text starts expect(reasoningStartIndex).toBeLessThan(textStartIndex); expect(reasoningEndIndex).toBeLessThan(textStartIndex); // Verify reasoning content - const reasoningDeltas = elements - .filter(isReasoningDeltaPart) - .map((el) => el.delta); + const reasoningDeltas = elements.filter(isReasoningDeltaPart).map((el) => el.delta); expect(reasoningDeltas).toEqual([ - 'I need to think about this step by step...', - ' First, I should analyze the request.', - ' Then I should provide a helpful response.', + "I need to think about this step by step...", + " First, I should analyze the request.", + " Then I should provide a helpful response.", ]); // Verify text content const textDeltas = elements.filter(isTextDeltaPart).map((el) => el.delta); - expect(textDeltas).toEqual(['Hello! ', 'How can I help you today?']); + expect(textDeltas).toEqual(["Hello! ", "How can I help you today?"]); }); - it('should stream tool deltas', async () => { - server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { - type: 'stream-chunks', + it("should stream tool deltas", async () => { + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + type: "stream-chunks", chunks: [ `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"role":"assistant","content":null,` + @@ -1343,21 +1337,21 @@ describe('doStream', () => { `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}]}\n\n`, `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":53,"completion_tokens":17,"total_tokens":70}}\n\n`, - 'data: [DONE]\n\n', + "data: [DONE]\n\n", ], }; const { stream } = await model.doStream({ tools: [ { - type: 'function', - name: 'test-tool', + type: "function", + name: "test-tool", inputSchema: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], + type: "object", + properties: { value: { type: "string" } }, + required: ["value"], additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', + $schema: "http://json-schema.org/draft-07/schema#", }, }, ], @@ -1366,113 +1360,113 @@ describe('doStream', () => { expect(await convertReadableStreamToArray(stream)).toStrictEqual([ { - type: 'response-metadata', - id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", }, { - type: 'response-metadata', - modelId: 'gpt-3.5-turbo-0125', + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", }, { - type: 'response-metadata', - id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", }, { - type: 'response-metadata', - modelId: 'gpt-3.5-turbo-0125', + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", }, { - id: 'call_O17Uplv4lJvD6DVdIvFFeRMw', - toolName: 'test-tool', - type: 'tool-input-start', + id: "call_O17Uplv4lJvD6DVdIvFFeRMw", + toolName: "test-tool", + type: "tool-input-start", }, { - type: 'tool-input-delta', - id: 'call_O17Uplv4lJvD6DVdIvFFeRMw', + type: "tool-input-delta", + id: "call_O17Uplv4lJvD6DVdIvFFeRMw", delta: '{"', }, { - type: 'response-metadata', - id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", }, { - type: 'response-metadata', - modelId: 'gpt-3.5-turbo-0125', + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", }, { - type: 'tool-input-delta', - id: 'call_O17Uplv4lJvD6DVdIvFFeRMw', - delta: 'value', + type: "tool-input-delta", + id: "call_O17Uplv4lJvD6DVdIvFFeRMw", + delta: "value", }, { - type: 'response-metadata', - id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", }, { - type: 'response-metadata', - modelId: 'gpt-3.5-turbo-0125', + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", }, { - type: 'tool-input-delta', - id: 'call_O17Uplv4lJvD6DVdIvFFeRMw', + type: "tool-input-delta", + id: "call_O17Uplv4lJvD6DVdIvFFeRMw", delta: '":"', }, { - type: 'response-metadata', - id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", }, { - type: 'response-metadata', - modelId: 'gpt-3.5-turbo-0125', + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", }, { - type: 'tool-input-delta', - id: 'call_O17Uplv4lJvD6DVdIvFFeRMw', - delta: 'Spark', + type: "tool-input-delta", + id: "call_O17Uplv4lJvD6DVdIvFFeRMw", + delta: "Spark", }, { - type: 'response-metadata', - id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", }, { - type: 'response-metadata', - modelId: 'gpt-3.5-turbo-0125', + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", }, { - type: 'tool-input-delta', - id: 'call_O17Uplv4lJvD6DVdIvFFeRMw', - delta: 'le', + type: "tool-input-delta", + id: "call_O17Uplv4lJvD6DVdIvFFeRMw", + delta: "le", }, { - type: 'response-metadata', - id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", }, { - type: 'response-metadata', - modelId: 'gpt-3.5-turbo-0125', + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", }, { - type: 'tool-input-delta', - id: 'call_O17Uplv4lJvD6DVdIvFFeRMw', - delta: ' Day', + type: "tool-input-delta", + id: "call_O17Uplv4lJvD6DVdIvFFeRMw", + delta: " Day", }, { - type: 'response-metadata', - id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", }, { - type: 'response-metadata', - modelId: 'gpt-3.5-turbo-0125', + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", }, { - type: 'tool-input-delta', - id: 'call_O17Uplv4lJvD6DVdIvFFeRMw', + type: "tool-input-delta", + id: "call_O17Uplv4lJvD6DVdIvFFeRMw", delta: '"}', }, { - type: 'tool-call', - toolCallId: 'call_O17Uplv4lJvD6DVdIvFFeRMw', - toolName: 'test-tool', + type: "tool-call", + toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", + toolName: "test-tool", input: '{"value":"Sparkle Day"}', providerMetadata: { openrouter: { @@ -1481,24 +1475,24 @@ describe('doStream', () => { }, }, { - type: 'response-metadata', - id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", }, { - type: 'response-metadata', - modelId: 'gpt-3.5-turbo-0125', + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", }, { - type: 'response-metadata', - id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", }, { - type: 'response-metadata', - modelId: 'gpt-3.5-turbo-0125', + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", }, { - type: 'finish', - finishReason: { unified: 'tool-calls', raw: 'tool_calls' }, + type: "finish", + finishReason: { unified: "tool-calls", raw: "tool_calls" }, providerMetadata: { openrouter: { usage: { @@ -1526,9 +1520,10 @@ describe('doStream', () => { ]); }); - it('should stream tool call that is sent in one chunk', async () => { - server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { - type: 'stream-chunks', + it("should stream tool call that is sent in one chunk", async () => { + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + type: "stream-chunks", chunks: [ `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"role":"assistant","content":null,` + @@ -1538,21 +1533,21 @@ describe('doStream', () => { `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}]}\n\n`, `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":53,"completion_tokens":17,"total_tokens":70}}\n\n`, - 'data: [DONE]\n\n', + "data: [DONE]\n\n", ], }; const { stream } = await model.doStream({ tools: [ { - type: 'function', - name: 'test-tool', + type: "function", + name: "test-tool", inputSchema: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], + type: "object", + properties: { value: { type: "string" } }, + required: ["value"], additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', + $schema: "http://json-schema.org/draft-07/schema#", }, }, ], @@ -1562,31 +1557,31 @@ describe('doStream', () => { const elements = await convertReadableStreamToArray(stream); expect(elements).toStrictEqual([ { - type: 'response-metadata', - id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", }, { - type: 'response-metadata', - modelId: 'gpt-3.5-turbo-0125', + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", }, { - type: 'tool-input-start', - id: 'call_O17Uplv4lJvD6DVdIvFFeRMw', - toolName: 'test-tool', + type: "tool-input-start", + id: "call_O17Uplv4lJvD6DVdIvFFeRMw", + toolName: "test-tool", }, { - type: 'tool-input-delta', - id: 'call_O17Uplv4lJvD6DVdIvFFeRMw', + type: "tool-input-delta", + id: "call_O17Uplv4lJvD6DVdIvFFeRMw", delta: '{"value":"Sparkle Day"}', }, { - type: 'tool-input-end', - id: 'call_O17Uplv4lJvD6DVdIvFFeRMw', + type: "tool-input-end", + id: "call_O17Uplv4lJvD6DVdIvFFeRMw", }, { - type: 'tool-call', - toolCallId: 'call_O17Uplv4lJvD6DVdIvFFeRMw', - toolName: 'test-tool', + type: "tool-call", + toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", + toolName: "test-tool", input: '{"value":"Sparkle Day"}', providerMetadata: { openrouter: { @@ -1595,24 +1590,24 @@ describe('doStream', () => { }, }, { - type: 'response-metadata', - id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", }, { - type: 'response-metadata', - modelId: 'gpt-3.5-turbo-0125', + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", }, { - type: 'response-metadata', - id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", }, { - type: 'response-metadata', - modelId: 'gpt-3.5-turbo-0125', + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", }, { - type: 'finish', - finishReason: { unified: 'tool-calls', raw: 'tool_calls' }, + type: "finish", + finishReason: { unified: "tool-calls", raw: "tool_calls" }, providerMetadata: { openrouter: { usage: { @@ -1640,9 +1635,10 @@ describe('doStream', () => { ]); }); - it('should override finishReason to tool-calls in streaming when tool calls and encrypted reasoning are present', async () => { - server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { - type: 'stream-chunks', + it("should override finishReason to tool-calls in streaming when tool calls and encrypted reasoning are present", async () => { + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + type: "stream-chunks", chunks: [ // First chunk: reasoning_details with encrypted data `data: {"id":"chatcmpl-gemini3","object":"chat.completion.chunk","created":1711357598,"model":"google/gemini-3-pro",` + @@ -1659,21 +1655,21 @@ describe('doStream', () => { `"system_fingerprint":"fp_gemini3","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}\n\n`, `data: {"id":"chatcmpl-gemini3","object":"chat.completion.chunk","created":1711357598,"model":"google/gemini-3-pro",` + `"system_fingerprint":"fp_gemini3","choices":[],"usage":{"prompt_tokens":10,"completion_tokens":20,"total_tokens":30}}\n\n`, - 'data: [DONE]\n\n', + "data: [DONE]\n\n", ], }; const { stream } = await model.doStream({ tools: [ { - type: 'function', - name: 'get_weather', + type: "function", + name: "get_weather", inputSchema: { - type: 'object', - properties: { location: { type: 'string' } }, - required: ['location'], + type: "object", + properties: { location: { type: "string" } }, + required: ["location"], additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', + $schema: "http://json-schema.org/draft-07/schema#", }, }, ], @@ -1684,28 +1680,27 @@ describe('doStream', () => { // Find the finish event const finishEvent = elements.find( - (el): el is LanguageModelV3StreamPart & { type: 'finish' } => - el.type === 'finish', + (el): el is LanguageModelV3StreamPart & { type: "finish" } => el.type === "finish", ); // Should override to 'tool-calls' when encrypted reasoning + tool calls + stop expect(finishEvent?.finishReason).toStrictEqual({ - unified: 'tool-calls', - raw: 'stop', + unified: "tool-calls", + raw: "stop", }); // Should have the tool call const toolCallEvent = elements.find( - (el): el is LanguageModelV3StreamPart & { type: 'tool-call' } => - el.type === 'tool-call', + (el): el is LanguageModelV3StreamPart & { type: "tool-call" } => el.type === "tool-call", ); - expect(toolCallEvent?.toolName).toBe('get_weather'); - expect(toolCallEvent?.toolCallId).toBe('call_gemini3_123'); + expect(toolCallEvent?.toolName).toBe("get_weather"); + expect(toolCallEvent?.toolCallId).toBe("call_gemini3_123"); }); - it('should stream images', async () => { - server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { - type: 'stream-chunks', + it("should stream images", async () => { + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + type: "stream-chunks", chunks: [ `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"role":"assistant","content":"",` + @@ -1713,7 +1708,7 @@ describe('doStream', () => { `"logprobs":null,"finish_reason":"stop"}]}\n\n`, `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":53,"completion_tokens":17,"total_tokens":70}}\n\n`, - 'data: [DONE]\n\n', + "data: [DONE]\n\n", ], }; @@ -1723,29 +1718,29 @@ describe('doStream', () => { expect(await convertReadableStreamToArray(stream)).toStrictEqual([ { - type: 'response-metadata', - id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", }, { - type: 'response-metadata', - modelId: 'gpt-3.5-turbo-0125', + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", }, { - type: 'file', - mediaType: 'image/png', + type: "file", + mediaType: "image/png", data: TEST_IMAGE_BASE64, }, { - type: 'response-metadata', - id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + type: "response-metadata", + id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", }, { - type: 'response-metadata', - modelId: 'gpt-3.5-turbo-0125', + type: "response-metadata", + modelId: "gpt-3.5-turbo-0125", }, { - type: 'finish', - finishReason: { unified: 'stop', raw: 'stop' }, + type: "finish", + finishReason: { unified: "stop", raw: "stop" }, providerMetadata: { openrouter: { usage: { @@ -1773,13 +1768,14 @@ describe('doStream', () => { ]); }); - it('should handle error stream parts', async () => { - server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { - type: 'stream-chunks', + it("should handle error stream parts", async () => { + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + type: "stream-chunks", chunks: [ `data: {"error":{"message": "The server had an error processing your request. Sorry about that! You can retry your request, or contact us through our ` + `help center at help.openrouter.com if you keep seeing this error.","type":"server_error","param":null,"code":null}}\n\n`, - 'data: [DONE]\n\n', + "data: [DONE]\n\n", ], }; @@ -1789,25 +1785,25 @@ describe('doStream', () => { expect(await convertReadableStreamToArray(stream)).toStrictEqual([ { - type: 'error', + type: "error", error: { message: - 'The server had an error processing your request. Sorry about that! ' + - 'You can retry your request, or contact us through our help center at ' + - 'help.openrouter.com if you keep seeing this error.', - type: 'server_error', + "The server had an error processing your request. Sorry about that! " + + "You can retry your request, or contact us through our help center at " + + "help.openrouter.com if you keep seeing this error.", + type: "server_error", code: null, param: null, }, }, { - finishReason: { unified: 'error', raw: undefined }, + finishReason: { unified: "error", raw: undefined }, providerMetadata: { openrouter: { usage: {}, }, }, - type: 'finish', + type: "finish", usage: { inputTokens: { total: undefined, @@ -1825,10 +1821,11 @@ describe('doStream', () => { ]); }); - it('should handle unparsable stream parts', async () => { - server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { - type: 'stream-chunks', - chunks: ['data: {unparsable}\n\n', 'data: [DONE]\n\n'], + it("should handle unparsable stream parts", async () => { + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + type: "stream-chunks", + chunks: ["data: {unparsable}\n\n", "data: [DONE]\n\n"], }; const { stream } = await model.doStream({ @@ -1838,11 +1835,11 @@ describe('doStream', () => { const elements = await convertReadableStreamToArray(stream); expect(elements.length).toBe(2); - expect(elements[0]?.type).toBe('error'); + expect(elements[0]?.type).toBe("error"); expect(elements[1]).toStrictEqual({ - finishReason: { unified: 'error', raw: undefined }, + finishReason: { unified: "error", raw: undefined }, - type: 'finish', + type: "finish", providerMetadata: { openrouter: { usage: {}, @@ -1864,201 +1861,201 @@ describe('doStream', () => { }); }); - it('should pass the messages and the model', async () => { + it("should pass the messages and the model", async () => { prepareStreamResponse({ content: [] }); await model.doStream({ prompt: TEST_PROMPT, }); + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ stream: true, stream_options: { include_usage: true }, - model: 'anthropic/claude-3.5-sonnet', - messages: [{ role: 'user', content: 'Hello' }], + model: "anthropic/claude-3.5-sonnet", + messages: [{ role: "user", content: "Hello" }], }); }); - it('should pass headers', async () => { + it("should pass headers", async () => { prepareStreamResponse({ content: [] }); const provider = createOpenRouter({ - apiKey: 'test-api-key', + apiKey: "test-api-key", headers: { - 'Custom-Provider-Header': 'provider-header-value', + "Custom-Provider-Header": "provider-header-value", }, }); - await provider.chat('openai/gpt-3.5-turbo').doStream({ + await provider.chat("openai/gpt-3.5-turbo").doStream({ prompt: TEST_PROMPT, headers: { - 'Custom-Request-Header': 'request-header-value', + "Custom-Request-Header": "request-header-value", }, }); + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion const requestHeaders = server.calls[0]!.requestHeaders; expect(requestHeaders).toMatchObject({ - authorization: 'Bearer test-api-key', - 'content-type': 'application/json', - 'custom-provider-header': 'provider-header-value', - 'custom-request-header': 'request-header-value', + authorization: "Bearer test-api-key", + "content-type": "application/json", + "custom-provider-header": "provider-header-value", + "custom-request-header": "request-header-value", }); - expect(requestHeaders['user-agent']).toContain( - 'ai-sdk/openrouter/0.0.0-test', - ); + expect(requestHeaders["user-agent"]).toContain("ai-sdk/openrouter/0.0.0-test"); }); - it('should pass extra body', async () => { + it("should pass extra body", async () => { prepareStreamResponse({ content: [] }); const provider = createOpenRouter({ - apiKey: 'test-api-key', + apiKey: "test-api-key", extraBody: { - custom_field: 'custom_value', + custom_field: "custom_value", providers: { anthropic: { - custom_field: 'custom_value', + custom_field: "custom_value", }, }, }, }); - await provider.chat('anthropic/claude-3.5-sonnet').doStream({ + await provider.chat("anthropic/claude-3.5-sonnet").doStream({ prompt: TEST_PROMPT, }); + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion const requestBody = await server.calls[0]!.requestBodyJson; - expect(requestBody).toHaveProperty('custom_field', 'custom_value'); - expect(requestBody).toHaveProperty( - 'providers.anthropic.custom_field', - 'custom_value', - ); + expect(requestBody).toHaveProperty("custom_field", "custom_value"); + expect(requestBody).toHaveProperty("providers.anthropic.custom_field", "custom_value"); }); - it('should pass responseFormat for JSON schema structured outputs', async () => { + it("should pass responseFormat for JSON schema structured outputs", async () => { prepareStreamResponse({ content: ['{"name": "John", "age": 30}'] }); const testSchema: JSONSchema7 = { - type: 'object', + type: "object", properties: { - name: { type: 'string' }, - age: { type: 'number' }, + name: { type: "string" }, + age: { type: "number" }, }, - required: ['name', 'age'], + required: ["name", "age"], additionalProperties: false, }; await model.doStream({ prompt: TEST_PROMPT, responseFormat: { - type: 'json', + type: "json", schema: testSchema, - name: 'PersonResponse', - description: 'A person object', + name: "PersonResponse", + description: "A person object", }, }); + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ stream: true, stream_options: { include_usage: true }, - model: 'anthropic/claude-3.5-sonnet', - messages: [{ role: 'user', content: 'Hello' }], + model: "anthropic/claude-3.5-sonnet", + messages: [{ role: "user", content: "Hello" }], response_format: { - type: 'json_schema', + type: "json_schema", json_schema: { schema: testSchema, strict: true, - name: 'PersonResponse', - description: 'A person object', + name: "PersonResponse", + description: "A person object", }, }, }); }); - it('should pass responseFormat AND tools together', async () => { + it("should pass responseFormat AND tools together", async () => { prepareStreamResponse({ content: ['{"name": "John", "age": 30}'] }); const testSchema: JSONSchema7 = { - type: 'object', + type: "object", properties: { - name: { type: 'string' }, - age: { type: 'number' }, + name: { type: "string" }, + age: { type: "number" }, }, - required: ['name', 'age'], + required: ["name", "age"], additionalProperties: false, }; await model.doStream({ prompt: TEST_PROMPT, responseFormat: { - type: 'json', + type: "json", schema: testSchema, - name: 'PersonResponse', - description: 'A person object', + name: "PersonResponse", + description: "A person object", }, tools: [ { - type: 'function', - name: 'test-tool', - description: 'Test tool', + type: "function", + name: "test-tool", + description: "Test tool", inputSchema: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], + type: "object", + properties: { value: { type: "string" } }, + required: ["value"], additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', + $schema: "http://json-schema.org/draft-07/schema#", }, }, ], toolChoice: { - type: 'tool', - toolName: 'test-tool', + type: "tool", + toolName: "test-tool", }, }); + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ stream: true, stream_options: { include_usage: true }, - model: 'anthropic/claude-3.5-sonnet', - messages: [{ role: 'user', content: 'Hello' }], + model: "anthropic/claude-3.5-sonnet", + messages: [{ role: "user", content: "Hello" }], response_format: { - type: 'json_schema', + type: "json_schema", json_schema: { schema: testSchema, strict: true, - name: 'PersonResponse', - description: 'A person object', + name: "PersonResponse", + description: "A person object", }, }, tools: [ { - type: 'function', + type: "function", function: { - name: 'test-tool', - description: 'Test tool', + name: "test-tool", + description: "Test tool", parameters: { - type: 'object', - properties: { value: { type: 'string' } }, - required: ['value'], + type: "object", + properties: { value: { type: "string" } }, + required: ["value"], additionalProperties: false, - $schema: 'http://json-schema.org/draft-07/schema#', + $schema: "http://json-schema.org/draft-07/schema#", }, }, }, ], tool_choice: { - type: 'function', - function: { name: 'test-tool' }, + type: "function", + function: { name: "test-tool" }, }, }); }); - it('should pass debug settings', async () => { - prepareStreamResponse({ content: ['Hello'] }); + it("should pass debug settings", async () => { + prepareStreamResponse({ content: ["Hello"] }); - const debugModel = provider.chat('anthropic/claude-3.5-sonnet', { + const debugModel = provider.chat("anthropic/claude-3.5-sonnet", { debug: { echo_upstream_body: true, }, @@ -2068,22 +2065,24 @@ describe('doStream', () => { prompt: TEST_PROMPT, }); + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ stream: true, stream_options: { include_usage: true }, - model: 'anthropic/claude-3.5-sonnet', - messages: [{ role: 'user', content: 'Hello' }], + model: "anthropic/claude-3.5-sonnet", + messages: [{ role: "user", content: "Hello" }], debug: { echo_upstream_body: true, }, }); }); - it('should include file annotations in finish metadata when streamed', async () => { + it("should include file annotations in finish metadata when streamed", async () => { // This test verifies that file annotations from FileParserPlugin are accumulated // during streaming and included in the finish event's providerMetadata - server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { - type: 'stream-chunks', + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + type: "stream-chunks", chunks: [ // First chunk with role and content `data: {"id":"chatcmpl-file-annotations","object":"chat.completion.chunk","created":1711357598,"model":"gpt-4o-mini",` + @@ -2100,7 +2099,7 @@ describe('doStream', () => { `"logprobs":null,"finish_reason":"stop"}]}\n\n`, `data: {"id":"chatcmpl-file-annotations","object":"chat.completion.chunk","created":1711357598,"model":"gpt-4o-mini",` + `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":100,"completion_tokens":20,"total_tokens":120}}\n\n`, - 'data: [DONE]\n\n', + "data: [DONE]\n\n", ], }; @@ -2108,16 +2107,12 @@ describe('doStream', () => { prompt: TEST_PROMPT, }); - const elements = (await convertReadableStreamToArray( - stream, - )) as LanguageModelV3StreamPart[]; + const elements = (await convertReadableStreamToArray(stream)) as LanguageModelV3StreamPart[]; // Find the finish chunk const finishChunk = elements.find( - ( - chunk, - ): chunk is Extract => - chunk.type === 'finish', + (chunk): chunk is Extract => + chunk.type === "finish", ); expect(finishChunk).toBeDefined(); @@ -2125,7 +2120,7 @@ describe('doStream', () => { // Verify file annotations are included in providerMetadata const openrouterMetadata = finishChunk?.providerMetadata?.openrouter as { annotations?: Array<{ - type: 'file'; + type: "file"; file: { hash: string; name: string; @@ -2136,23 +2131,24 @@ describe('doStream', () => { expect(openrouterMetadata?.annotations).toStrictEqual([ { - type: 'file', + type: "file", file: { - hash: 'abc123def456', - name: 'bitcoin.pdf', + hash: "abc123def456", + name: "bitcoin.pdf", content: [ - { type: 'text', text: 'Page 1 content' }, - { type: 'text', text: 'Page 2 content' }, + { type: "text", text: "Page 1 content" }, + { type: "text", text: "Page 2 content" }, ], }, }, ]); }); - it('should accumulate multiple file annotations from stream', async () => { + it("should accumulate multiple file annotations from stream", async () => { // This test verifies that multiple file annotations are accumulated correctly - server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { - type: 'stream-chunks', + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + type: "stream-chunks", chunks: [ // First chunk with content `data: {"id":"chatcmpl-multi-files","object":"chat.completion.chunk","created":1711357598,"model":"gpt-4o-mini",` + @@ -2174,7 +2170,7 @@ describe('doStream', () => { `"logprobs":null,"finish_reason":"stop"}]}\n\n`, `data: {"id":"chatcmpl-multi-files","object":"chat.completion.chunk","created":1711357598,"model":"gpt-4o-mini",` + `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":100,"completion_tokens":20,"total_tokens":120}}\n\n`, - 'data: [DONE]\n\n', + "data: [DONE]\n\n", ], }; @@ -2182,20 +2178,16 @@ describe('doStream', () => { prompt: TEST_PROMPT, }); - const elements = (await convertReadableStreamToArray( - stream, - )) as LanguageModelV3StreamPart[]; + const elements = (await convertReadableStreamToArray(stream)) as LanguageModelV3StreamPart[]; const finishChunk = elements.find( - ( - chunk, - ): chunk is Extract => - chunk.type === 'finish', + (chunk): chunk is Extract => + chunk.type === "finish", ); const openrouterMetadata = finishChunk?.providerMetadata?.openrouter as { annotations?: Array<{ - type: 'file'; + type: "file"; file: { hash: string; name: string; @@ -2206,34 +2198,35 @@ describe('doStream', () => { // Both file annotations should be accumulated expect(openrouterMetadata?.annotations).toHaveLength(2); - expect(openrouterMetadata?.annotations?.[0]?.file.hash).toBe('hash1'); - expect(openrouterMetadata?.annotations?.[1]?.file.hash).toBe('hash2'); + expect(openrouterMetadata?.annotations?.[0]?.file.hash).toBe("hash1"); + expect(openrouterMetadata?.annotations?.[1]?.file.hash).toBe("hash2"); }); }); -describe('debug settings', () => { +describe("debug settings", () => { const server = createTestServer({ - 'https://openrouter.ai/api/v1/chat/completions': { - response: { type: 'json-value', body: {} }, + "https://openrouter.ai/api/v1/chat/completions": { + response: { type: "json-value", body: {} }, }, }); - function prepareJsonResponse({ content = '' }: { content?: string } = {}) { - server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = { - type: 'json-value', + function prepareJsonResponse({ content = "" }: { content?: string } = {}) { + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + type: "json-value", body: { - id: 'chatcmpl-test', - object: 'chat.completion', + id: "chatcmpl-test", + object: "chat.completion", created: 1711115037, - model: 'anthropic/claude-3.5-sonnet', + model: "anthropic/claude-3.5-sonnet", choices: [ { index: 0, message: { - role: 'assistant', + role: "assistant", content, }, - finish_reason: 'stop', + finish_reason: "stop", }, ], usage: { @@ -2245,10 +2238,10 @@ describe('debug settings', () => { }; } - it('should pass debug settings in doGenerate', async () => { - prepareJsonResponse({ content: 'Hello!' }); + it("should pass debug settings in doGenerate", async () => { + prepareJsonResponse({ content: "Hello!" }); - const debugModel = provider.chat('anthropic/claude-3.5-sonnet', { + const debugModel = provider.chat("anthropic/claude-3.5-sonnet", { debug: { echo_upstream_body: true, }, @@ -2258,23 +2251,25 @@ describe('debug settings', () => { prompt: TEST_PROMPT, }); + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ - model: 'anthropic/claude-3.5-sonnet', - messages: [{ role: 'user', content: 'Hello' }], + model: "anthropic/claude-3.5-sonnet", + messages: [{ role: "user", content: "Hello" }], debug: { echo_upstream_body: true, }, }); }); - it('should not include debug when not set', async () => { - prepareJsonResponse({ content: 'Hello!' }); + it("should not include debug when not set", async () => { + prepareJsonResponse({ content: "Hello!" }); await model.doGenerate({ prompt: TEST_PROMPT, }); + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion const requestBody = await server.calls[0]!.requestBodyJson; - expect(requestBody).not.toHaveProperty('debug'); + expect(requestBody).not.toHaveProperty("debug"); }); }); diff --git a/packages/ai-sdk-provider-2/src/chat/index.ts b/packages/ai-sdk-provider-2/src/chat/index.ts index 673ace7..94eef03 100644 --- a/packages/ai-sdk-provider-2/src/chat/index.ts +++ b/packages/ai-sdk-provider-2/src/chat/index.ts @@ -10,22 +10,10 @@ import type { SharedV3Headers, SharedV3ProviderMetadata, SharedV3Warning, -} from '@ai-sdk/provider'; -import type { ParseResult } from '@ai-sdk/provider-utils'; -import type { z } from 'zod/v4'; -import type { ReasoningDetailUnion } from '@/src/schemas/reasoning-details'; -import type { OpenRouterUsageAccounting } from '@/src/types/index'; -import type { FileAnnotation } from '../schemas/provider-metadata'; -import type { - OpenRouterChatModelId, - OpenRouterChatSettings, -} from '../types/openrouter-chat-settings'; - -import { - APICallError, - InvalidResponseDataError, - NoContentGeneratedError, -} from '@ai-sdk/provider'; +} from "@ai-sdk/provider"; +import type { ParseResult } from "@ai-sdk/provider-utils"; +import type { z } from "zod/v4"; +import { APICallError, InvalidResponseDataError, NoContentGeneratedError } from "@ai-sdk/provider"; import { combineHeaders, createEventSourceResponseHandler, @@ -33,25 +21,30 @@ import { generateId, isParsableJson, postJsonToApi, -} from '@ai-sdk/provider-utils'; -import { ReasoningDetailType } from '@/src/schemas/reasoning-details'; -import { openrouterFailedResponseHandler } from '../schemas/error-response'; -import { OpenRouterProviderMetadataSchema } from '../schemas/provider-metadata'; -import { - createFinishReason, - mapOpenRouterFinishReason, -} from '../utils/map-finish-reason'; -import { convertToOpenRouterChatMessages } from './convert-to-openrouter-chat-messages'; -import { getBase64FromDataUrl, getMediaType } from './file-url-utils'; -import { getChatCompletionToolChoice } from './get-tool-choice'; +} from "@ai-sdk/provider-utils"; + +import type { FileAnnotation } from "../schemas/provider-metadata"; +import type { ReasoningDetailUnion } from "../schemas/reasoning-details"; +import type { OpenRouterUsageAccounting } from "../types/index"; +import type { + OpenRouterChatModelId, + OpenRouterChatSettings, +} from "../types/openrouter-chat-settings"; +import { openrouterFailedResponseHandler } from "../schemas/error-response"; +import { OpenRouterProviderMetadataSchema } from "../schemas/provider-metadata"; +import { ReasoningDetailType } from "../schemas/reasoning-details"; +import { createFinishReason, mapOpenRouterFinishReason } from "../utils/map-finish-reason"; +import { convertToOpenRouterChatMessages } from "./convert-to-openrouter-chat-messages"; +import { getBase64FromDataUrl, getMediaType } from "./file-url-utils"; +import { getChatCompletionToolChoice } from "./get-tool-choice"; import { OpenRouterNonStreamChatCompletionResponseSchema, OpenRouterStreamChatCompletionChunkSchema, -} from './schemas'; +} from "./schemas"; type OpenRouterChatConfig = { provider: string; - compatibility: 'strict' | 'compatible'; + compatibility: "strict" | "compatible"; headers: () => Record; url: (options: { modelId: string; path: string }) => string; fetch?: typeof fetch; @@ -59,19 +52,16 @@ type OpenRouterChatConfig = { }; export class OpenRouterChatLanguageModel implements LanguageModelV3 { - readonly specificationVersion = 'v3' as const; - readonly provider = 'openrouter'; - readonly defaultObjectGenerationMode = 'tool' as const; + readonly specificationVersion = "v3" as const; + readonly provider = "openrouter"; + readonly defaultObjectGenerationMode = "tool" as const; readonly modelId: OpenRouterChatModelId; readonly supportsImageUrls = true; readonly supportedUrls: Record = { - 'image/*': [ - /^data:image\/[a-zA-Z]+;base64,/, - /^https?:\/\/.+\.(jpg|jpeg|png|gif|webp)$/i, - ], + "image/*": [/^data:image\/[a-zA-Z]+;base64,/, /^https?:\/\/.+\.(jpg|jpeg|png|gif|webp)$/i], // 'text/*': [/^data:text\//, /^https?:\/\/.+$/], - 'application/*': [/^data:application\//, /^https?:\/\/.+$/], + "application/*": [/^data:application\//, /^https?:\/\/.+$/], }; readonly settings: OpenRouterChatSettings; @@ -109,14 +99,13 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { // model specific settings: logit_bias: this.settings.logitBias, logprobs: - this.settings.logprobs === true || - typeof this.settings.logprobs === 'number' + this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : undefined, top_logprobs: - typeof this.settings.logprobs === 'number' + typeof this.settings.logprobs === "number" ? this.settings.logprobs - : typeof this.settings.logprobs === 'boolean' + : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : undefined @@ -134,20 +123,20 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { stop: stopSequences, response_format: - responseFormat?.type === 'json' + responseFormat?.type === "json" ? responseFormat.schema != null ? { - type: 'json_schema', + type: "json_schema", json_schema: { schema: responseFormat.schema, strict: true, - name: responseFormat.name ?? 'response', + name: responseFormat.name ?? "response", ...(responseFormat.description && { description: responseFormat.description, }), }, } - : { type: 'json_object' } + : { type: "json_object" } : undefined, top_k: topK, @@ -175,12 +164,9 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { if (tools && tools.length > 0) { // TODO: support built-in tools const mappedTools = tools - .filter( - (tool): tool is LanguageModelV3FunctionTool => - tool.type === 'function', - ) + .filter((tool): tool is LanguageModelV3FunctionTool => tool.type === "function") .map((tool) => ({ - type: 'function' as const, + type: "function" as const, function: { name: tool.name, description: tool.description, @@ -191,9 +177,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { return { ...baseArgs, tools: mappedTools, - tool_choice: toolChoice - ? getChatCompletionToolChoice(toolChoice) - : undefined, + tool_choice: toolChoice ? getChatCompletionToolChoice(toolChoice) : undefined, }; } @@ -228,7 +212,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { const { value: responseValue, responseHeaders } = await postJsonToApi({ url: this.config.url({ - path: '/chat/completions', + path: "/chat/completions", modelId: this.modelId, }), headers: combineHeaders(this.config.headers(), options.headers), @@ -242,7 +226,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { }); // Check if response is an error (HTTP 200 with error payload) - if ('error' in responseValue) { + if ("error" in responseValue) { const errorData = responseValue.error as { message: string; code?: string; @@ -250,7 +234,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { throw new APICallError({ message: errorData.message, url: this.config.url({ - path: '/chat/completions', + path: "/chat/completions", modelId: this.modelId, }), requestBodyValues: args, @@ -267,7 +251,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { if (!choice) { throw new NoContentGeneratedError({ - message: 'No choice in response', + message: "No choice in response", }); } @@ -277,16 +261,13 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { inputTokens: { total: response.usage.prompt_tokens ?? 0, noCache: undefined, - cacheRead: - response.usage.prompt_tokens_details?.cached_tokens ?? undefined, + cacheRead: response.usage.prompt_tokens_details?.cached_tokens ?? undefined, cacheWrite: undefined, }, outputTokens: { total: response.usage.completion_tokens ?? 0, text: undefined, - reasoning: - response.usage.completion_tokens_details?.reasoning_tokens ?? - undefined, + reasoning: response.usage.completion_tokens_details?.reasoning_tokens ?? undefined, }, } : { @@ -313,7 +294,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { case ReasoningDetailType.Text: { if (detail.text) { return { - type: 'reasoning' as const, + type: "reasoning" as const, text: detail.text, providerMetadata: { openrouter: { @@ -327,7 +308,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { case ReasoningDetailType.Summary: { if (detail.summary) { return { - type: 'reasoning' as const, + type: "reasoning" as const, text: detail.summary, providerMetadata: { openrouter: { @@ -342,8 +323,8 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { // For encrypted reasoning, we include a redacted placeholder if (detail.data) { return { - type: 'reasoning' as const, - text: '[REDACTED]', + type: "reasoning" as const, + text: "[REDACTED]", providerMetadata: { openrouter: { reasoning_details: [detail], @@ -363,7 +344,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { : choice.message.reasoning ? [ { - type: 'reasoning' as const, + type: "reasoning" as const, text: choice.message.reasoning, }, ] @@ -376,7 +357,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { if (choice.message.content) { content.push({ - type: 'text' as const, + type: "text" as const, text: choice.message.content, }); } @@ -384,7 +365,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { if (choice.message.tool_calls) { for (const toolCall of choice.message.tool_calls) { content.push({ - type: 'tool-call' as const, + type: "tool-call" as const, toolCallId: toolCall.id ?? generateId(), toolName: toolCall.function.name, input: toolCall.function.arguments, @@ -400,8 +381,8 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { if (choice.message.images) { for (const image of choice.message.images) { content.push({ - type: 'file' as const, - mediaType: getMediaType(image.image_url.url, 'image/jpeg'), + type: "file" as const, + mediaType: getMediaType(image.image_url.url, "image/jpeg"), data: getBase64FromDataUrl(image.image_url.url), }); } @@ -409,16 +390,16 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { if (choice.message.annotations) { for (const annotation of choice.message.annotations) { - if (annotation.type === 'url_citation') { + if (annotation.type === "url_citation") { content.push({ - type: 'source' as const, - sourceType: 'url' as const, + type: "source" as const, + sourceType: "url" as const, id: annotation.url_citation.url, url: annotation.url_citation.url, title: annotation.url_citation.title, providerMetadata: { openrouter: { - content: annotation.url_citation.content || '', + content: annotation.url_citation.content || "", }, }, }); @@ -431,28 +412,27 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { ( a, ): a is { - type: 'file'; + type: "file"; file: { hash: string; name: string; content?: Array<{ type: string; text?: string }>; }; - } => a.type === 'file', + } => a.type === "file", ); // Fix for Gemini 3 thoughtSignature: when there are tool calls with encrypted // reasoning (thoughtSignature), the model returns 'stop' but expects continuation. // Override to 'tool-calls' so the SDK knows to continue the conversation. - const hasToolCalls = - choice.message.tool_calls && choice.message.tool_calls.length > 0; + const hasToolCalls = choice.message.tool_calls && choice.message.tool_calls.length > 0; const hasEncryptedReasoning = reasoningDetails.some( (d) => d.type === ReasoningDetailType.Encrypted && d.data, ); const shouldOverrideFinishReason = - hasToolCalls && hasEncryptedReasoning && choice.finish_reason === 'stop'; + hasToolCalls && hasEncryptedReasoning && choice.finish_reason === "stop"; const effectiveFinishReason = shouldOverrideFinishReason - ? createFinishReason('tool-calls', choice.finish_reason ?? undefined) + ? createFinishReason("tool-calls", choice.finish_reason ?? undefined) : mapOpenRouterFinishReason(choice.finish_reason); return { @@ -462,41 +442,32 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { warnings: [], providerMetadata: { openrouter: OpenRouterProviderMetadataSchema.parse({ - provider: response.provider ?? '', + provider: response.provider ?? "", reasoning_details: choice.message.reasoning_details ?? [], - annotations: - fileAnnotations && fileAnnotations.length > 0 - ? fileAnnotations - : undefined, + annotations: fileAnnotations && fileAnnotations.length > 0 ? fileAnnotations : undefined, usage: { promptTokens: usageInfo.inputTokens.total ?? 0, completionTokens: usageInfo.outputTokens.total ?? 0, - totalTokens: - (usageInfo.inputTokens.total ?? 0) + - (usageInfo.outputTokens.total ?? 0), + totalTokens: (usageInfo.inputTokens.total ?? 0) + (usageInfo.outputTokens.total ?? 0), cost: response.usage?.cost, ...(response.usage?.prompt_tokens_details?.cached_tokens != null ? { promptTokensDetails: { - cachedTokens: - response.usage.prompt_tokens_details.cached_tokens, + cachedTokens: response.usage.prompt_tokens_details.cached_tokens, }, } : {}), - ...(response.usage?.completion_tokens_details?.reasoning_tokens != - null + ...(response.usage?.completion_tokens_details?.reasoning_tokens != null ? { completionTokensDetails: { - reasoningTokens: - response.usage.completion_tokens_details.reasoning_tokens, + reasoningTokens: response.usage.completion_tokens_details.reasoning_tokens, }, } : {}), ...(response.usage?.cost_details?.upstream_inference_cost != null ? { costDetails: { - upstreamInferenceCost: - response.usage.cost_details.upstream_inference_cost, + upstreamInferenceCost: response.usage.cost_details.upstream_inference_cost, }, } : {}), @@ -531,7 +502,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { const { value: response, responseHeaders } = await postJsonToApi({ url: this.config.url({ - path: '/chat/completions', + path: "/chat/completions", modelId: this.modelId, }), headers: combineHeaders(this.config.headers(), options.headers), @@ -541,13 +512,11 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { // only include stream_options when in strict compatibility mode: stream_options: - this.config.compatibility === 'strict' + this.config.compatibility === "strict" ? { include_usage: true, // If user has requested usage accounting, make sure we get it in the stream - ...(this.settings.usage?.include - ? { include_usage: true } - : {}), + ...(this.settings.usage?.include ? { include_usage: true } : {}), } : undefined, }, @@ -561,7 +530,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { const toolCalls: Array<{ id: string; - type: 'function'; + type: "function"; function: { name: string; arguments: string; @@ -570,7 +539,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { sent: boolean; }> = []; - let finishReason: LanguageModelV3FinishReason = createFinishReason('other'); + let finishReason: LanguageModelV3FinishReason = createFinishReason("other"); const usage: LanguageModelV3Usage = { inputTokens: { total: undefined, @@ -604,25 +573,23 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { return { stream: response.pipeThrough( new TransformStream< - ParseResult< - z.infer - >, + ParseResult>, LanguageModelV3StreamPart >({ transform(chunk, controller) { // handle failed chunk parsing / validation: if (!chunk.success) { - finishReason = createFinishReason('error'); - controller.enqueue({ type: 'error', error: chunk.error }); + finishReason = createFinishReason("error"); + controller.enqueue({ type: "error", error: chunk.error }); return; } const value = chunk.value; // handle error chunks: - if ('error' in value) { - finishReason = createFinishReason('error'); - controller.enqueue({ type: 'error', error: value.error }); + if ("error" in value) { + finishReason = createFinishReason("error"); + controller.enqueue({ type: "error", error: value.error }); return; } @@ -633,14 +600,14 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { if (value.id) { openrouterResponseId = value.id; controller.enqueue({ - type: 'response-metadata', + type: "response-metadata", id: value.id, }); } if (value.model) { controller.enqueue({ - type: 'response-metadata', + type: "response-metadata", modelId: value.model, }); } @@ -653,8 +620,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { openrouterUsage.promptTokens = value.usage.prompt_tokens; if (value.usage.prompt_tokens_details) { - const cachedInputTokens = - value.usage.prompt_tokens_details.cached_tokens ?? 0; + const cachedInputTokens = value.usage.prompt_tokens_details.cached_tokens ?? 0; usage.inputTokens.cacheRead = cachedInputTokens; openrouterUsage.promptTokensDetails = { @@ -664,8 +630,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { openrouterUsage.completionTokens = value.usage.completion_tokens; if (value.usage.completion_tokens_details) { - const reasoningTokens = - value.usage.completion_tokens_details.reasoning_tokens ?? 0; + const reasoningTokens = value.usage.completion_tokens_details.reasoning_tokens ?? 0; usage.outputTokens.reasoning = reasoningTokens; openrouterUsage.completionTokensDetails = { @@ -675,8 +640,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { openrouterUsage.cost = value.usage.cost; openrouterUsage.totalTokens = value.usage.total_tokens; - const upstreamInferenceCost = - value.usage.cost_details?.upstream_inference_cost; + const upstreamInferenceCost = value.usage.cost_details?.upstream_inference_cost; if (upstreamInferenceCost != null) { openrouterUsage.costDetails = { upstreamInferenceCost, @@ -704,14 +668,14 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { reasoningId = openrouterResponseId || generateId(); controller.enqueue({ providerMetadata, - type: 'reasoning-start', + type: "reasoning-start", id: reasoningId, }); reasoningStarted = true; } controller.enqueue({ providerMetadata, - type: 'reasoning-delta', + type: "reasoning-delta", delta: chunkText, id: reasoningId || generateId(), }); @@ -723,16 +687,12 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { for (const detail of delta.reasoning_details) { if (detail.type === ReasoningDetailType.Text) { const lastDetail = - accumulatedReasoningDetails[ - accumulatedReasoningDetails.length - 1 - ]; + accumulatedReasoningDetails[accumulatedReasoningDetails.length - 1]; if (lastDetail?.type === ReasoningDetailType.Text) { // Merge with the previous text detail - lastDetail.text = - (lastDetail.text || '') + (detail.text || ''); + lastDetail.text = (lastDetail.text || "") + (detail.text || ""); - lastDetail.signature = - lastDetail.signature || detail.signature; + lastDetail.signature = lastDetail.signature || detail.signature; lastDetail.format = lastDetail.format || detail.format; } else { @@ -763,7 +723,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { } case ReasoningDetailType.Encrypted: { if (detail.data) { - emitReasoningChunk('[REDACTED]', reasoningMetadata); + emitReasoningChunk("[REDACTED]", reasoningMetadata); } break; } @@ -788,7 +748,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { // we should end the reasoning first to maintain proper order if (reasoningStarted && !textStarted) { controller.enqueue({ - type: 'reasoning-end', + type: "reasoning-end", id: reasoningId || generateId(), }); reasoningStarted = false; // Mark as ended so we don't end it again in flush @@ -797,13 +757,13 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { if (!textStarted) { textId = openrouterResponseId || generateId(); controller.enqueue({ - type: 'text-start', + type: "text-start", id: textId, }); textStarted = true; } controller.enqueue({ - type: 'text-delta', + type: "text-delta", delta: delta.content, id: textId || generateId(), }); @@ -811,32 +771,25 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { if (delta.annotations) { for (const annotation of delta.annotations) { - if (annotation.type === 'url_citation') { + if (annotation.type === "url_citation") { controller.enqueue({ - type: 'source', - sourceType: 'url' as const, + type: "source", + sourceType: "url" as const, id: annotation.url_citation.url, url: annotation.url_citation.url, title: annotation.url_citation.title, providerMetadata: { openrouter: { - content: annotation.url_citation.content || '', + content: annotation.url_citation.content || "", }, }, }); - } else if (annotation.type === 'file') { + } else if (annotation.type === "file") { // Accumulate file annotations to expose in providerMetadata // Type guard to validate structure matches expected shape const file = (annotation as { file?: unknown }).file; - if ( - file && - typeof file === 'object' && - 'hash' in file && - 'name' in file - ) { - accumulatedFileAnnotations.push( - annotation as FileAnnotation, - ); + if (file && typeof file === "object" && "hash" in file && "name" in file) { + accumulatedFileAnnotations.push(annotation as FileAnnotation); } } } @@ -848,7 +801,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { // Tool call start. OpenRouter returns all information except the arguments in the first chunk. if (toolCalls[index] == null) { - if (toolCallDelta.type !== 'function') { + if (toolCallDelta.type !== "function") { throw new InvalidResponseDataError({ data: toolCallDelta, message: `Expected 'function' type.`, @@ -871,10 +824,10 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { toolCalls[index] = { id: toolCallDelta.id, - type: 'function', + type: "function", function: { name: toolCallDelta.function.name, - arguments: toolCallDelta.function.arguments ?? '', + arguments: toolCallDelta.function.arguments ?? "", }, inputStarted: false, sent: false, @@ -898,26 +851,26 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { toolCall.inputStarted = true; controller.enqueue({ - type: 'tool-input-start', + type: "tool-input-start", id: toolCall.id, toolName: toolCall.function.name, }); // send delta controller.enqueue({ - type: 'tool-input-delta', + type: "tool-input-delta", id: toolCall.id, delta: toolCall.function.arguments, }); controller.enqueue({ - type: 'tool-input-end', + type: "tool-input-end", id: toolCall.id, }); // send tool call controller.enqueue({ - type: 'tool-call', + type: "tool-call", toolCallId: toolCall.id, toolName: toolCall.function.name, input: toolCall.function.arguments, @@ -951,22 +904,21 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { if (!toolCall.inputStarted) { toolCall.inputStarted = true; controller.enqueue({ - type: 'tool-input-start', + type: "tool-input-start", id: toolCall.id, toolName: toolCall.function.name, }); } if (toolCallDelta.function?.arguments != null) { - toolCall.function.arguments += - toolCallDelta.function?.arguments ?? ''; + toolCall.function.arguments += toolCallDelta.function?.arguments ?? ""; } // send delta controller.enqueue({ - type: 'tool-input-delta', + type: "tool-input-delta", id: toolCall.id, - delta: toolCallDelta.function.arguments ?? '', + delta: toolCallDelta.function.arguments ?? "", }); // check if tool call is complete @@ -976,7 +928,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { isParsableJson(toolCall.function.arguments) ) { controller.enqueue({ - type: 'tool-call', + type: "tool-call", toolCallId: toolCall.id ?? generateId(), toolName: toolCall.function.name, input: toolCall.function.arguments, @@ -995,8 +947,8 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { if (delta.images != null) { for (const image of delta.images) { controller.enqueue({ - type: 'file', - mediaType: getMediaType(image.image_url.url, 'image/jpeg'), + type: "file", + mediaType: getMediaType(image.image_url.url, "image/jpeg"), data: getBase64FromDataUrl(image.image_url.url), }); } @@ -1011,26 +963,22 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { const hasEncryptedReasoning = accumulatedReasoningDetails.some( (d) => d.type === ReasoningDetailType.Encrypted && d.data, ); - if ( - hasToolCalls && - hasEncryptedReasoning && - finishReason.unified === 'stop' - ) { - finishReason = createFinishReason('tool-calls', finishReason.raw); + if (hasToolCalls && hasEncryptedReasoning && finishReason.unified === "stop") { + finishReason = createFinishReason("tool-calls", finishReason.raw); } // Forward any unsent tool calls if finish reason is 'tool-calls' - if (finishReason.unified === 'tool-calls') { + if (finishReason.unified === "tool-calls") { for (const toolCall of toolCalls) { if (toolCall && !toolCall.sent) { controller.enqueue({ - type: 'tool-call', + type: "tool-call", toolCallId: toolCall.id ?? generateId(), toolName: toolCall.function.name, // Coerce invalid arguments to an empty JSON object input: isParsableJson(toolCall.function.arguments) ? toolCall.function.arguments - : '{}', + : "{}", providerMetadata: { openrouter: { reasoning_details: accumulatedReasoningDetails, @@ -1045,13 +993,13 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { // End reasoning first if it was started, to maintain proper order if (reasoningStarted) { controller.enqueue({ - type: 'reasoning-end', + type: "reasoning-end", id: reasoningId || generateId(), }); } if (textStarted) { controller.enqueue({ - type: 'text-end', + type: "text-end", id: textId || generateId(), }); } @@ -1072,8 +1020,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { // Include accumulated reasoning_details if any were received if (accumulatedReasoningDetails.length > 0) { - openrouterMetadata.reasoning_details = - accumulatedReasoningDetails; + openrouterMetadata.reasoning_details = accumulatedReasoningDetails; } // Include accumulated file annotations if any were received @@ -1082,7 +1029,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { } controller.enqueue({ - type: 'finish', + type: "finish", finishReason, usage, providerMetadata: { diff --git a/packages/ai-sdk-provider-2/src/chat/large-pdf-response.test.ts b/packages/ai-sdk-provider-2/src/chat/large-pdf-response.test.ts index c623df4..0805a1f 100644 --- a/packages/ai-sdk-provider-2/src/chat/large-pdf-response.test.ts +++ b/packages/ai-sdk-provider-2/src/chat/large-pdf-response.test.ts @@ -1,76 +1,72 @@ -import type { LanguageModelV3Prompt } from '@ai-sdk/provider'; +import type { LanguageModelV3Prompt } from "@ai-sdk/provider"; +import { describe, expect, it } from "vitest"; -import { describe, expect, it } from 'vitest'; -import { createOpenRouter } from '../provider'; -import { createTestServer } from '../test-utils/test-server'; +import { createOpenRouter } from "../provider"; +import { createTestServer } from "../test-utils/test-server"; const TEST_PROMPT: LanguageModelV3Prompt = [ - { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, + { role: "user", content: [{ type: "text", text: "Hello" }] }, ]; const provider = createOpenRouter({ - baseURL: 'https://test.openrouter.ai/api/v1', - apiKey: 'test-api-key', + baseURL: "https://test.openrouter.ai/api/v1", + apiKey: "test-api-key", }); const server = createTestServer({ - 'https://test.openrouter.ai/api/v1/chat/completions': {}, + "https://test.openrouter.ai/api/v1/chat/completions": {}, }); -describe('Large PDF Response Handling', () => { - describe('doGenerate', () => { - it('should handle HTTP 200 responses with error payloads (500 internal errors)', async () => { +describe("Large PDF Response Handling", () => { + describe("doGenerate", () => { + it("should handle HTTP 200 responses with error payloads (500 internal errors)", async () => { // This is the actual response OpenRouter returns for large PDF failures // HTTP 200 status but contains error object instead of choices - server.urls[ - 'https://test.openrouter.ai/api/v1/chat/completions' - ]!.response = { - type: 'json-value', + server.urls["https://test.openrouter.ai/api/v1/chat/completions"]!.response = { + type: "json-value", body: { error: { - message: 'Internal Server Error', + message: "Internal Server Error", code: 500, }, - user_id: 'org_abc123', + user_id: "org_abc123", }, }; - const model = provider('anthropic/claude-3.5-sonnet'); + const model = provider("anthropic/claude-3.5-sonnet"); await expect( model.doGenerate({ prompt: TEST_PROMPT, }), - ).rejects.toThrow('Internal Server Error'); + ).rejects.toThrow("Internal Server Error"); }); - it('should parse successful large PDF responses with file annotations', async () => { + it("should parse successful large PDF responses with file annotations", async () => { // Successful response with file annotations from FileParserPlugin - server.urls[ - 'https://test.openrouter.ai/api/v1/chat/completions' - ]!.response = { - type: 'json-value', + server.urls["https://test.openrouter.ai/api/v1/chat/completions"]!.response = { + type: "json-value", body: { - id: 'gen-123', - model: 'anthropic/claude-3.5-sonnet', - provider: 'Anthropic', + id: "gen-123", + model: "anthropic/claude-3.5-sonnet", + provider: "Anthropic", choices: [ { index: 0, message: { - role: 'assistant', - content: 'LARGE-M9N3T', + role: "assistant", + content: "LARGE-M9N3T", annotations: [ { - type: 'file_annotation', + type: "file_annotation", file_annotation: { - file_id: 'file_abc123', - quote: 'extracted text', + file_id: "file_abc123", + quote: "extracted text", }, }, ], }, - finish_reason: 'stop', + finish_reason: "stop", }, ], usage: { @@ -81,7 +77,7 @@ describe('Large PDF Response Handling', () => { }, }; - const model = provider('anthropic/claude-3.5-sonnet', { + const model = provider("anthropic/claude-3.5-sonnet", { usage: { include: true }, }); @@ -91,14 +87,13 @@ describe('Large PDF Response Handling', () => { expect(result.content).toMatchObject([ { - type: 'text', - text: 'LARGE-M9N3T', + type: "text", + text: "LARGE-M9N3T", }, ]); - expect( - (result.usage.inputTokens?.total ?? 0) + - (result.usage.outputTokens?.total ?? 0), - ).toBe(120); + expect((result.usage.inputTokens?.total ?? 0) + (result.usage.outputTokens?.total ?? 0)).toBe( + 120, + ); }); }); }); diff --git a/packages/ai-sdk-provider-2/src/chat/payload-comparison.test.ts b/packages/ai-sdk-provider-2/src/chat/payload-comparison.test.ts index 6eeac7a..62d7de0 100644 --- a/packages/ai-sdk-provider-2/src/chat/payload-comparison.test.ts +++ b/packages/ai-sdk-provider-2/src/chat/payload-comparison.test.ts @@ -1,16 +1,16 @@ -import type { LanguageModelV3Prompt } from '@ai-sdk/provider'; -import type { OpenRouterChatCompletionsInput } from '../types/openrouter-chat-completions-input'; -import type { OpenRouterChatSettings } from '../types/openrouter-chat-settings'; +import type { LanguageModelV3Prompt } from "@ai-sdk/provider"; +import { describe, expect, it, vi } from "vitest"; -import { describe, expect, it, vi } from 'vitest'; -import { createOpenRouter } from '../provider'; +import type { OpenRouterChatCompletionsInput } from "../types/openrouter-chat-completions-input"; +import type { OpenRouterChatSettings } from "../types/openrouter-chat-settings"; +import { createOpenRouter } from "../provider"; -describe('Payload Comparison - Large PDF', () => { - it('should send payload matching fetch baseline for large PDFs', async () => { +describe("Payload Comparison - Large PDF", () => { + it("should send payload matching fetch baseline for large PDFs", async () => { interface CapturedRequestBody { model: string; messages: OpenRouterChatCompletionsInput; - plugins?: OpenRouterChatSettings['plugins']; + plugins?: OpenRouterChatSettings["plugins"]; usage?: { include: boolean }; } @@ -20,23 +20,21 @@ describe('Payload Comparison - Large PDF', () => { const mockFetch = vi.fn(async (_url: string, init?: RequestInit) => { // Capture the request body if (init?.body) { - capturedRequestBody = JSON.parse( - init.body as string, - ) as CapturedRequestBody; + capturedRequestBody = JSON.parse(init.body as string) as CapturedRequestBody; } // Return a minimal success response return new Response( JSON.stringify({ - id: 'test-123', - model: 'anthropic/claude-3.5-sonnet', + id: "test-123", + model: "anthropic/claude-3.5-sonnet", choices: [ { message: { - role: 'assistant', - content: 'Test response', + role: "assistant", + content: "Test response", }, - finish_reason: 'stop', + finish_reason: "stop", }, ], usage: { @@ -47,39 +45,39 @@ describe('Payload Comparison - Large PDF', () => { }), { status: 200, - headers: { 'Content-Type': 'application/json' }, + headers: { "Content-Type": "application/json" }, }, ); }) as typeof fetch; const provider = createOpenRouter({ - apiKey: 'test-key', + apiKey: "test-key", fetch: mockFetch, }); // Simulate a large PDF (use a small base64 for testing, but structure matters) - const smallPdfBase64 = 'JVBERi0xLjQKJeLjz9MKM...(truncated)'; + const smallPdfBase64 = "JVBERi0xLjQKJeLjz9MKM...(truncated)"; const dataUrl = `data:application/pdf;base64,${smallPdfBase64}`; const prompt: LanguageModelV3Prompt = [ { - role: 'user', + role: "user", content: [ { - type: 'text', - text: 'Extract the verification code. Reply with ONLY the code.', + type: "text", + text: "Extract the verification code. Reply with ONLY the code.", }, { - type: 'file', + type: "file", data: dataUrl, - mediaType: 'application/pdf', + mediaType: "application/pdf", }, ], }, ]; - const model = provider('anthropic/claude-3.5-sonnet', { - plugins: [{ id: 'file-parser', pdf: { engine: 'mistral-ocr' } }], + const model = provider("anthropic/claude-3.5-sonnet", { + plugins: [{ id: "file-parser", pdf: { engine: "mistral-ocr" } }], usage: { include: true }, }); @@ -105,32 +103,32 @@ describe('Payload Comparison - Large PDF', () => { const messages = capturedRequestBody!.messages; expect(messages).toHaveLength(1); - expect(messages[0]?.role).toBe('user'); + expect(messages[0]?.role).toBe("user"); expect(messages[0]?.content).toBeInstanceOf(Array); const content = messages[0]?.content; if (!Array.isArray(content)) { - throw new Error('Content should be an array'); + throw new Error("Content should be an array"); } // Find the file part - const filePart = content.find((part) => part.type === 'file'); + const filePart = content.find((part) => part.type === "file"); expect(filePart).toBeDefined(); // CRITICAL ASSERTION: The file part should have a nested 'file' object with 'file_data' // This is what the fetch example sends and what OpenRouter expects expect(filePart).toMatchObject({ - type: 'file', + type: "file", file: { - file_data: expect.stringContaining('data:application/pdf;base64,'), + file_data: expect.stringContaining("data:application/pdf;base64,"), }, }); // Find the text part - const textPart = content.find((part) => part.type === 'text'); + const textPart = content.find((part) => part.type === "text"); expect(textPart).toMatchObject({ - type: 'text', - text: 'Extract the verification code. Reply with ONLY the code.', + type: "text", + text: "Extract the verification code. Reply with ONLY the code.", }); // Check for plugins array @@ -139,13 +137,13 @@ describe('Payload Comparison - Large PDF', () => { const { plugins } = capturedRequestBody!; if (!plugins) { - throw new Error('Plugins should be defined'); + throw new Error("Plugins should be defined"); } - const fileParserPlugin = plugins.find((p) => p.id === 'file-parser'); + const fileParserPlugin = plugins.find((p) => p.id === "file-parser"); expect(fileParserPlugin).toBeDefined(); expect(fileParserPlugin).toMatchObject({ - id: 'file-parser', + id: "file-parser", pdf: { engine: expect.stringMatching(/^(mistral-ocr|pdf-text|native)$/), }, diff --git a/packages/ai-sdk-provider-2/src/chat/schemas.ts b/packages/ai-sdk-provider-2/src/chat/schemas.ts index 6367a04..c8b5e7e 100644 --- a/packages/ai-sdk-provider-2/src/chat/schemas.ts +++ b/packages/ai-sdk-provider-2/src/chat/schemas.ts @@ -1,7 +1,8 @@ -import { z } from 'zod/v4'; -import { OpenRouterErrorResponseSchema } from '../schemas/error-response'; -import { ImageResponseArraySchema } from '../schemas/image'; -import { ReasoningDetailArraySchema } from '../schemas/reasoning-details'; +import { z } from "zod/v4"; + +import { OpenRouterErrorResponseSchema } from "../schemas/error-response"; +import { ImageResponseArraySchema } from "../schemas/image"; +import { ReasoningDetailArraySchema } from "../schemas/reasoning-details"; const OpenRouterChatCompletionBaseResponseSchema = z .object({ @@ -47,7 +48,7 @@ export const OpenRouterNonStreamChatCompletionResponseSchema = z.union([ .object({ message: z .object({ - role: z.literal('assistant'), + role: z.literal("assistant"), content: z.string().nullable().optional(), reasoning: z.string().nullable().optional(), reasoning_details: ReasoningDetailArraySchema.nullish(), @@ -58,7 +59,7 @@ export const OpenRouterNonStreamChatCompletionResponseSchema = z.union([ z .object({ id: z.string().optional().nullable(), - type: z.literal('function'), + type: z.literal("function"), function: z .object({ name: z.string(), @@ -76,7 +77,7 @@ export const OpenRouterNonStreamChatCompletionResponseSchema = z.union([ // URL citation from web search z .object({ - type: z.literal('url_citation'), + type: z.literal("url_citation"), url_citation: z .object({ end_index: z.number(), @@ -91,7 +92,7 @@ export const OpenRouterNonStreamChatCompletionResponseSchema = z.union([ // File annotation from FileParserPlugin (old format) z .object({ - type: z.literal('file_annotation'), + type: z.literal("file_annotation"), file_annotation: z .object({ file_id: z.string(), @@ -103,7 +104,7 @@ export const OpenRouterNonStreamChatCompletionResponseSchema = z.union([ // File annotation from FileParserPlugin (new format) z .object({ - type: z.literal('file'), + type: z.literal("file"), file: z .object({ hash: z.string(), @@ -171,7 +172,7 @@ export const OpenRouterStreamChatCompletionChunkSchema = z.union([ .object({ delta: z .object({ - role: z.enum(['assistant']).optional(), + role: z.enum(["assistant"]).optional(), content: z.string().nullish(), reasoning: z.string().nullish().optional(), reasoning_details: ReasoningDetailArraySchema.nullish(), @@ -182,7 +183,7 @@ export const OpenRouterStreamChatCompletionChunkSchema = z.union([ .object({ index: z.number().nullish(), id: z.string().nullish(), - type: z.literal('function').optional(), + type: z.literal("function").optional(), function: z .object({ name: z.string().nullish(), @@ -200,7 +201,7 @@ export const OpenRouterStreamChatCompletionChunkSchema = z.union([ // URL citation from web search z .object({ - type: z.literal('url_citation'), + type: z.literal("url_citation"), url_citation: z .object({ end_index: z.number(), @@ -215,7 +216,7 @@ export const OpenRouterStreamChatCompletionChunkSchema = z.union([ // File annotation from FileParserPlugin (old format) z .object({ - type: z.literal('file_annotation'), + type: z.literal("file_annotation"), file_annotation: z .object({ file_id: z.string(), @@ -227,7 +228,7 @@ export const OpenRouterStreamChatCompletionChunkSchema = z.union([ // File annotation from FileParserPlugin (new format) z .object({ - type: z.literal('file'), + type: z.literal("file"), file: z .object({ hash: z.string(), diff --git a/packages/ai-sdk-provider-2/src/completion/convert-to-openrouter-completion-prompt.ts b/packages/ai-sdk-provider-2/src/completion/convert-to-openrouter-completion-prompt.ts index b9af689..4b122ee 100644 --- a/packages/ai-sdk-provider-2/src/completion/convert-to-openrouter-completion-prompt.ts +++ b/packages/ai-sdk-provider-2/src/completion/convert-to-openrouter-completion-prompt.ts @@ -5,21 +5,17 @@ import type { LanguageModelV3TextPart, LanguageModelV3ToolCallPart, LanguageModelV3ToolResultPart, -} from '@ai-sdk/provider'; - -import { - InvalidPromptError, - UnsupportedFunctionalityError, -} from '@ai-sdk/provider'; +} from "@ai-sdk/provider"; +import { InvalidPromptError, UnsupportedFunctionalityError } from "@ai-sdk/provider"; export function convertToOpenRouterCompletionPrompt({ prompt, inputFormat, - user = 'user', - assistant = 'assistant', + user = "user", + assistant = "assistant", }: { prompt: LanguageModelV3Prompt; - inputFormat: 'prompt' | 'messages'; + inputFormat: "prompt" | "messages"; user?: string; assistant?: string; }): { @@ -27,60 +23,60 @@ export function convertToOpenRouterCompletionPrompt({ } { // When the user supplied a prompt input, we don't transform it: if ( - inputFormat === 'prompt' && + inputFormat === "prompt" && prompt.length === 1 && prompt[0] && - prompt[0].role === 'user' && + prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0] && - prompt[0].content[0].type === 'text' + prompt[0].content[0].type === "text" ) { return { prompt: prompt[0].content[0].text }; } // otherwise transform to a chat message format: - let text = ''; + let text = ""; // if first message is a system message, add it to the text: - if (prompt[0] && prompt[0].role === 'system') { + if (prompt[0] && prompt[0].role === "system") { text += `${prompt[0].content}\n\n`; prompt = prompt.slice(1); } for (const { role, content } of prompt) { switch (role) { - case 'system': { + case "system": { throw new InvalidPromptError({ message: `Unexpected system message in prompt: ${content}`, prompt, }); } - case 'user': { + case "user": { const userMessage = content .map((part: LanguageModelV3TextPart | LanguageModelV3FilePart) => { switch (part.type) { - case 'text': { + case "text": { return part.text; } - case 'file': { + case "file": { throw new UnsupportedFunctionalityError({ - functionality: 'file attachments', + functionality: "file attachments", }); } default: { - return ''; + return ""; } } }) - .join(''); + .join(""); text += `${user}:\n${userMessage}\n\n`; break; } - case 'assistant': { + case "assistant": { const assistantMessage = content .map( ( @@ -92,46 +88,46 @@ export function convertToOpenRouterCompletionPrompt({ | LanguageModelV3ToolResultPart, ) => { switch (part.type) { - case 'text': { + case "text": { return part.text; } - case 'tool-call': { + case "tool-call": { throw new UnsupportedFunctionalityError({ - functionality: 'tool-call messages', + functionality: "tool-call messages", }); } - case 'tool-result': { + case "tool-result": { throw new UnsupportedFunctionalityError({ - functionality: 'tool-result messages', + functionality: "tool-result messages", }); } - case 'reasoning': { + case "reasoning": { throw new UnsupportedFunctionalityError({ - functionality: 'reasoning messages', + functionality: "reasoning messages", }); } - case 'file': { + case "file": { throw new UnsupportedFunctionalityError({ - functionality: 'file attachments', + functionality: "file attachments", }); } default: { - return ''; + return ""; } } }, ) - .join(''); + .join(""); text += `${assistant}:\n${assistantMessage}\n\n`; break; } - case 'tool': { + case "tool": { throw new UnsupportedFunctionalityError({ - functionality: 'tool messages', + functionality: "tool messages", }); } diff --git a/packages/ai-sdk-provider-2/src/completion/index.test.ts b/packages/ai-sdk-provider-2/src/completion/index.test.ts index 46da64b..cfe5ea7 100644 --- a/packages/ai-sdk-provider-2/src/completion/index.test.ts +++ b/packages/ai-sdk-provider-2/src/completion/index.test.ts @@ -1,73 +1,65 @@ -import type { - LanguageModelV3Prompt, - LanguageModelV3StreamPart, -} from '@ai-sdk/provider'; - -import { vi } from 'vitest'; -import { createOpenRouter } from '../provider'; -import { - convertReadableStreamToArray, - createTestServer, -} from '../test-utils/test-server'; - -vi.mock('@/src/version', () => ({ - VERSION: '0.0.0-test', +import type { LanguageModelV3Prompt, LanguageModelV3StreamPart } from "@ai-sdk/provider"; +import { vi } from "vitest"; + +import { createOpenRouter } from "../provider"; +import { convertReadableStreamToArray, createTestServer } from "../test-utils/test-server"; + +vi.mock("../version", () => ({ + VERSION: "0.0.0-test", })); const TEST_PROMPT: LanguageModelV3Prompt = [ - { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, + { role: "user", content: [{ type: "text", text: "Hello" }] }, ]; const TEST_LOGPROBS = { - tokens: [' ever', ' after', '.\n\n', 'The', ' end', '.'], - token_logprobs: [ - -0.0664508, -0.014520033, -1.3820221, -0.7890417, -0.5323165, -0.10247037, - ], + tokens: [" ever", " after", ".\n\n", "The", " end", "."], + token_logprobs: [-0.0664508, -0.014520033, -1.3820221, -0.7890417, -0.5323165, -0.10247037], top_logprobs: [ { - ' ever': -0.0664508, + " ever": -0.0664508, }, { - ' after': -0.014520033, + " after": -0.014520033, }, { - '.\n\n': -1.3820221, + ".\n\n": -1.3820221, }, { The: -0.7890417, }, { - ' end': -0.5323165, + " end": -0.5323165, }, { - '.': -0.10247037, + ".": -0.10247037, }, ] as Record[], }; const provider = createOpenRouter({ - apiKey: 'test-api-key', - compatibility: 'strict', + apiKey: "test-api-key", + compatibility: "strict", }); -const model = provider.completion('openai/gpt-3.5-turbo-instruct'); +const model = provider.completion("openai/gpt-3.5-turbo-instruct"); -describe('doGenerate', () => { +describe("doGenerate", () => { const server = createTestServer({ - 'https://openrouter.ai/api/v1/completions': { - response: { type: 'json-value', body: {} }, + "https://openrouter.ai/api/v1/completions": { + response: { type: "json-value", body: {} }, }, }); function prepareJsonResponse({ - content = '', + content = "", usage = { prompt_tokens: 4, total_tokens: 34, completion_tokens: 30, }, logprobs = null, - finish_reason = 'stop', + finish_reason = "stop", }: { content?: string; usage?: { @@ -82,13 +74,14 @@ describe('doGenerate', () => { } | null; finish_reason?: string; }) { - server.urls['https://openrouter.ai/api/v1/completions']!.response = { - type: 'json-value', + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + server.urls["https://openrouter.ai/api/v1/completions"]!.response = { + type: "json-value", body: { - id: 'cmpl-96cAM1v77r4jXa4qb2NSmRREV5oWB', - object: 'text_completion', + id: "cmpl-96cAM1v77r4jXa4qb2NSmRREV5oWB", + object: "text_completion", created: 1711363706, - model: 'openai/gpt-3.5-turbo-instruct', + model: "openai/gpt-3.5-turbo-instruct", choices: [ { text: content, @@ -102,21 +95,21 @@ describe('doGenerate', () => { }; } - it('should extract text response', async () => { - prepareJsonResponse({ content: 'Hello, World!' }); + it("should extract text response", async () => { + prepareJsonResponse({ content: "Hello, World!" }); const { content } = await model.doGenerate({ prompt: TEST_PROMPT, }); - const text = content[0]?.type === 'text' ? content[0].text : ''; + const text = content[0]?.type === "text" ? content[0].text : ""; - expect(text).toStrictEqual('Hello, World!'); + expect(text).toStrictEqual("Hello, World!"); }); - it('should extract usage', async () => { + it("should extract usage", async () => { prepareJsonResponse({ - content: '', + content: "", usage: { prompt_tokens: 20, total_tokens: 25, completion_tokens: 5 }, }); @@ -139,120 +132,115 @@ describe('doGenerate', () => { }); }); - it('should extract logprobs', async () => { + it("should extract logprobs", async () => { prepareJsonResponse({ logprobs: TEST_LOGPROBS }); - const provider = createOpenRouter({ apiKey: 'test-api-key' }); + const provider = createOpenRouter({ apiKey: "test-api-key" }); - await provider - .completion('openai/gpt-3.5-turbo', { logprobs: 1 }) - .doGenerate({ - prompt: TEST_PROMPT, - }); + await provider.completion("openai/gpt-3.5-turbo", { logprobs: 1 }).doGenerate({ + prompt: TEST_PROMPT, + }); }); - it('should extract finish reason', async () => { + it("should extract finish reason", async () => { prepareJsonResponse({ - content: '', - finish_reason: 'stop', + content: "", + finish_reason: "stop", }); - const { finishReason } = await provider - .completion('openai/gpt-3.5-turbo-instruct') - .doGenerate({ - prompt: TEST_PROMPT, - }); + const { finishReason } = await provider.completion("openai/gpt-3.5-turbo-instruct").doGenerate({ + prompt: TEST_PROMPT, + }); - expect(finishReason).toStrictEqual({ unified: 'stop', raw: 'stop' }); + expect(finishReason).toStrictEqual({ unified: "stop", raw: "stop" }); }); - it('should support unknown finish reason', async () => { + it("should support unknown finish reason", async () => { prepareJsonResponse({ - content: '', - finish_reason: 'eos', + content: "", + finish_reason: "eos", }); - const { finishReason } = await provider - .completion('openai/gpt-3.5-turbo-instruct') - .doGenerate({ - prompt: TEST_PROMPT, - }); + const { finishReason } = await provider.completion("openai/gpt-3.5-turbo-instruct").doGenerate({ + prompt: TEST_PROMPT, + }); - expect(finishReason).toStrictEqual({ unified: 'other', raw: 'eos' }); + expect(finishReason).toStrictEqual({ unified: "other", raw: "eos" }); }); - it('should pass the model and the prompt', async () => { - prepareJsonResponse({ content: '' }); + it("should pass the model and the prompt", async () => { + prepareJsonResponse({ content: "" }); await model.doGenerate({ prompt: TEST_PROMPT, }); + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ - model: 'openai/gpt-3.5-turbo-instruct', - prompt: 'Hello', + model: "openai/gpt-3.5-turbo-instruct", + prompt: "Hello", }); }); - it('should pass the models array when provided', async () => { - prepareJsonResponse({ content: '' }); + it("should pass the models array when provided", async () => { + prepareJsonResponse({ content: "" }); - const customModel = provider.completion('openai/gpt-3.5-turbo-instruct', { - models: ['openai/gpt-4', 'anthropic/claude-2'], + const customModel = provider.completion("openai/gpt-3.5-turbo-instruct", { + models: ["openai/gpt-4", "anthropic/claude-2"], }); await customModel.doGenerate({ prompt: TEST_PROMPT, }); + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ - model: 'openai/gpt-3.5-turbo-instruct', - models: ['openai/gpt-4', 'anthropic/claude-2'], - prompt: 'Hello', + model: "openai/gpt-3.5-turbo-instruct", + models: ["openai/gpt-4", "anthropic/claude-2"], + prompt: "Hello", }); }); - it('should pass headers', async () => { - prepareJsonResponse({ content: '' }); + it("should pass headers", async () => { + prepareJsonResponse({ content: "" }); const provider = createOpenRouter({ - apiKey: 'test-api-key', + apiKey: "test-api-key", headers: { - 'Custom-Provider-Header': 'provider-header-value', + "Custom-Provider-Header": "provider-header-value", }, }); - await provider.completion('openai/gpt-3.5-turbo-instruct').doGenerate({ + await provider.completion("openai/gpt-3.5-turbo-instruct").doGenerate({ prompt: TEST_PROMPT, headers: { - 'Custom-Request-Header': 'request-header-value', + "Custom-Request-Header": "request-header-value", }, }); + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion const requestHeaders = server.calls[0]!.requestHeaders; expect(requestHeaders).toMatchObject({ - authorization: 'Bearer test-api-key', - 'content-type': 'application/json', - 'custom-provider-header': 'provider-header-value', - 'custom-request-header': 'request-header-value', + authorization: "Bearer test-api-key", + "content-type": "application/json", + "custom-provider-header": "provider-header-value", + "custom-request-header": "request-header-value", }); - expect(requestHeaders['user-agent']).toContain( - 'ai-sdk/openrouter/0.0.0-test', - ); + expect(requestHeaders["user-agent"]).toContain("ai-sdk/openrouter/0.0.0-test"); }); }); -describe('doStream', () => { +describe("doStream", () => { const server = createTestServer({ - 'https://openrouter.ai/api/v1/completions': { - response: { type: 'stream-chunks', chunks: [] }, + "https://openrouter.ai/api/v1/completions": { + response: { type: "stream-chunks", chunks: [] }, }, }); function prepareStreamResponse({ content, - finish_reason = 'stop', + finish_reason = "stop", usage = { prompt_tokens: 10, total_tokens: 372, @@ -283,8 +271,9 @@ describe('doStream', () => { } | null; finish_reason?: string; }) { - server.urls['https://openrouter.ai/api/v1/completions']!.response = { - type: 'stream-chunks', + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + server.urls["https://openrouter.ai/api/v1/completions"]!.response = { + type: "stream-chunks", chunks: [ ...content.map((text) => { return `data: {"id":"cmpl-96c64EdfhOw8pjFFgVpLuT8k2MtdT","object":"text_completion","created":1711363440,"choices":[{"text":"${text}","index":0,"logprobs":null,"finish_reason":null}],"model":"openai/gpt-3.5-turbo-instruct"}\n\n`; @@ -295,15 +284,15 @@ describe('doStream', () => { `data: {"id":"cmpl-96c3yLQE1TtZCd6n6OILVmzev8M8H","object":"text_completion","created":1711363310,"model":"openai/gpt-3.5-turbo-instruct","usage":${JSON.stringify( usage, )},"choices":[]}\n\n`, - 'data: [DONE]\n\n', + "data: [DONE]\n\n", ], }; } - it('should stream text deltas', async () => { + it("should stream text deltas", async () => { prepareStreamResponse({ - content: ['Hello', ', ', 'World!'], - finish_reason: 'stop', + content: ["Hello", ", ", "World!"], + finish_reason: "stop", usage: { prompt_tokens: 10, total_tokens: 372, @@ -319,13 +308,13 @@ describe('doStream', () => { // note: space moved to last chunk bc of trimming const elements = await convertReadableStreamToArray(stream); expect(elements).toStrictEqual([ - { type: 'text-delta', delta: 'Hello', id: expect.any(String) }, - { type: 'text-delta', delta: ', ', id: expect.any(String) }, - { type: 'text-delta', delta: 'World!', id: expect.any(String) }, - { type: 'text-delta', delta: '', id: expect.any(String) }, + { type: "text-delta", delta: "Hello", id: expect.any(String) }, + { type: "text-delta", delta: ", ", id: expect.any(String) }, + { type: "text-delta", delta: "World!", id: expect.any(String) }, + { type: "text-delta", delta: "", id: expect.any(String) }, { - type: 'finish', - finishReason: { unified: 'stop', raw: 'stop' }, + type: "finish", + finishReason: { unified: "stop", raw: "stop" }, providerMetadata: { openrouter: { usage: { @@ -353,9 +342,9 @@ describe('doStream', () => { ]); }); - it('should include upstream inference cost when provided', async () => { + it("should include upstream inference cost when provided", async () => { prepareStreamResponse({ - content: ['Hello'], + content: ["Hello"], usage: { prompt_tokens: 5, total_tokens: 15, @@ -370,14 +359,10 @@ describe('doStream', () => { prompt: TEST_PROMPT, }); - const elements = (await convertReadableStreamToArray( - stream, - )) as LanguageModelV3StreamPart[]; + const elements = (await convertReadableStreamToArray(stream)) as LanguageModelV3StreamPart[]; const finishChunk = elements.find( - ( - element, - ): element is Extract => - element.type === 'finish', + (element): element is Extract => + element.type === "finish", ); const openrouterUsage = ( finishChunk?.providerMetadata?.openrouter as { @@ -392,9 +377,9 @@ describe('doStream', () => { }); }); - it('should handle both normal cost and upstream inference cost in finish metadata when both are provided', async () => { + it("should handle both normal cost and upstream inference cost in finish metadata when both are provided", async () => { prepareStreamResponse({ - content: ['Hello'], + content: ["Hello"], usage: { prompt_tokens: 5, total_tokens: 15, @@ -410,14 +395,10 @@ describe('doStream', () => { prompt: TEST_PROMPT, }); - const elements = (await convertReadableStreamToArray( - stream, - )) as LanguageModelV3StreamPart[]; + const elements = (await convertReadableStreamToArray(stream)) as LanguageModelV3StreamPart[]; const finishChunk = elements.find( - ( - element, - ): element is Extract => - element.type === 'finish', + (element): element is Extract => + element.type === "finish", ); const openrouterUsage = ( finishChunk?.providerMetadata?.openrouter as { @@ -433,13 +414,14 @@ describe('doStream', () => { expect(openrouterUsage?.cost).toBe(0.0025); }); - it('should handle error stream parts', async () => { - server.urls['https://openrouter.ai/api/v1/completions']!.response = { - type: 'stream-chunks', + it("should handle error stream parts", async () => { + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + server.urls["https://openrouter.ai/api/v1/completions"]!.response = { + type: "stream-chunks", chunks: [ `data: {"error":{"message": "The server had an error processing your request. Sorry about that! You can retry your request, or contact us through our ` + `help center at help.openrouter.com if you keep seeing this error.","type":"server_error","param":null,"code":null}}\n\n`, - 'data: [DONE]\n\n', + "data: [DONE]\n\n", ], }; @@ -449,25 +431,25 @@ describe('doStream', () => { expect(await convertReadableStreamToArray(stream)).toStrictEqual([ { - type: 'error', + type: "error", error: { message: - 'The server had an error processing your request. Sorry about that! ' + - 'You can retry your request, or contact us through our help center at ' + - 'help.openrouter.com if you keep seeing this error.', - type: 'server_error', + "The server had an error processing your request. Sorry about that! " + + "You can retry your request, or contact us through our help center at " + + "help.openrouter.com if you keep seeing this error.", + type: "server_error", code: null, param: null, }, }, { - finishReason: { unified: 'error', raw: undefined }, + finishReason: { unified: "error", raw: undefined }, providerMetadata: { openrouter: { usage: {}, }, }, - type: 'finish', + type: "finish", usage: { inputTokens: { total: undefined, @@ -485,10 +467,11 @@ describe('doStream', () => { ]); }); - it('should handle unparsable stream parts', async () => { - server.urls['https://openrouter.ai/api/v1/completions']!.response = { - type: 'stream-chunks', - chunks: ['data: {unparsable}\n\n', 'data: [DONE]\n\n'], + it("should handle unparsable stream parts", async () => { + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + server.urls["https://openrouter.ai/api/v1/completions"]!.response = { + type: "stream-chunks", + chunks: ["data: {unparsable}\n\n", "data: [DONE]\n\n"], }; const { stream } = await model.doStream({ @@ -498,15 +481,15 @@ describe('doStream', () => { const elements = await convertReadableStreamToArray(stream); expect(elements.length).toBe(2); - expect(elements[0]?.type).toBe('error'); + expect(elements[0]?.type).toBe("error"); expect(elements[1]).toStrictEqual({ - finishReason: { unified: 'error', raw: undefined }, + finishReason: { unified: "error", raw: undefined }, providerMetadata: { openrouter: { usage: {}, }, }, - type: 'finish', + type: "finish", usage: { inputTokens: { total: undefined, @@ -523,76 +506,74 @@ describe('doStream', () => { }); }); - it('should pass the model and the prompt', async () => { + it("should pass the model and the prompt", async () => { prepareStreamResponse({ content: [] }); await model.doStream({ prompt: TEST_PROMPT, }); + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ stream: true, stream_options: { include_usage: true }, - model: 'openai/gpt-3.5-turbo-instruct', - prompt: 'Hello', + model: "openai/gpt-3.5-turbo-instruct", + prompt: "Hello", }); }); - it('should pass headers', async () => { + it("should pass headers", async () => { prepareStreamResponse({ content: [] }); const provider = createOpenRouter({ - apiKey: 'test-api-key', + apiKey: "test-api-key", headers: { - 'Custom-Provider-Header': 'provider-header-value', + "Custom-Provider-Header": "provider-header-value", }, }); - await provider.completion('openai/gpt-3.5-turbo-instruct').doStream({ + await provider.completion("openai/gpt-3.5-turbo-instruct").doStream({ prompt: TEST_PROMPT, headers: { - 'Custom-Request-Header': 'request-header-value', + "Custom-Request-Header": "request-header-value", }, }); + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion const requestHeaders = server.calls[0]!.requestHeaders; expect(requestHeaders).toMatchObject({ - authorization: 'Bearer test-api-key', - 'content-type': 'application/json', - 'custom-provider-header': 'provider-header-value', - 'custom-request-header': 'request-header-value', + authorization: "Bearer test-api-key", + "content-type": "application/json", + "custom-provider-header": "provider-header-value", + "custom-request-header": "request-header-value", }); - expect(requestHeaders['user-agent']).toContain( - 'ai-sdk/openrouter/0.0.0-test', - ); + expect(requestHeaders["user-agent"]).toContain("ai-sdk/openrouter/0.0.0-test"); }); - it('should pass extra body', async () => { + it("should pass extra body", async () => { prepareStreamResponse({ content: [] }); const provider = createOpenRouter({ - apiKey: 'test-api-key', + apiKey: "test-api-key", extraBody: { - custom_field: 'custom_value', + custom_field: "custom_value", providers: { anthropic: { - custom_field: 'custom_value', + custom_field: "custom_value", }, }, }, }); - await provider.completion('openai/gpt-4o').doStream({ + await provider.completion("openai/gpt-4o").doStream({ prompt: TEST_PROMPT, }); + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion const requestBody = await server.calls[0]!.requestBodyJson; - expect(requestBody).toHaveProperty('custom_field', 'custom_value'); - expect(requestBody).toHaveProperty( - 'providers.anthropic.custom_field', - 'custom_value', - ); + expect(requestBody).toHaveProperty("custom_field", "custom_value"); + expect(requestBody).toHaveProperty("providers.anthropic.custom_field", "custom_value"); }); }); diff --git a/packages/ai-sdk-provider-2/src/completion/index.ts b/packages/ai-sdk-provider-2/src/completion/index.ts index 482f77a..5d61414 100644 --- a/packages/ai-sdk-provider-2/src/completion/index.ts +++ b/packages/ai-sdk-provider-2/src/completion/index.ts @@ -4,38 +4,35 @@ import type { LanguageModelV3FinishReason, LanguageModelV3StreamPart, LanguageModelV3Usage, -} from '@ai-sdk/provider'; -import type { ParseResult } from '@ai-sdk/provider-utils'; -import type { z } from 'zod/v4'; -import type { OpenRouterUsageAccounting } from '../types'; -import type { - OpenRouterCompletionModelId, - OpenRouterCompletionSettings, -} from '../types/openrouter-completion-settings'; - +} from "@ai-sdk/provider"; +import type { ParseResult } from "@ai-sdk/provider-utils"; +import type { z } from "zod/v4"; import { APICallError, NoContentGeneratedError, UnsupportedFunctionalityError, -} from '@ai-sdk/provider'; +} from "@ai-sdk/provider"; import { combineHeaders, createEventSourceResponseHandler, createJsonResponseHandler, generateId, postJsonToApi, -} from '@ai-sdk/provider-utils'; -import { openrouterFailedResponseHandler } from '../schemas/error-response'; -import { - createFinishReason, - mapOpenRouterFinishReason, -} from '../utils/map-finish-reason'; -import { convertToOpenRouterCompletionPrompt } from './convert-to-openrouter-completion-prompt'; -import { OpenRouterCompletionChunkSchema } from './schemas'; +} from "@ai-sdk/provider-utils"; + +import type { OpenRouterUsageAccounting } from "../types"; +import type { + OpenRouterCompletionModelId, + OpenRouterCompletionSettings, +} from "../types/openrouter-completion-settings"; +import { openrouterFailedResponseHandler } from "../schemas/error-response"; +import { createFinishReason, mapOpenRouterFinishReason } from "../utils/map-finish-reason"; +import { convertToOpenRouterCompletionPrompt } from "./convert-to-openrouter-completion-prompt"; +import { OpenRouterCompletionChunkSchema } from "./schemas"; type OpenRouterCompletionConfig = { provider: string; - compatibility: 'strict' | 'compatible'; + compatibility: "strict" | "compatible"; headers: () => Record; url: (options: { modelId: string; path: string }) => string; fetch?: typeof fetch; @@ -43,17 +40,14 @@ type OpenRouterCompletionConfig = { }; export class OpenRouterCompletionLanguageModel implements LanguageModelV3 { - readonly specificationVersion = 'v3' as const; - readonly provider = 'openrouter'; + readonly specificationVersion = "v3" as const; + readonly provider = "openrouter"; readonly modelId: OpenRouterCompletionModelId; readonly supportsImageUrls = true; readonly supportedUrls: Record = { - 'image/*': [ - /^data:image\/[a-zA-Z]+;base64,/, - /^https?:\/\/.+\.(jpg|jpeg|png|gif|webp)$/i, - ], - 'text/*': [/^data:text\//, /^https?:\/\/.+$/], - 'application/*': [/^data:application\//, /^https?:\/\/.+$/], + "image/*": [/^data:image\/[a-zA-Z]+;base64,/, /^https?:\/\/.+\.(jpg|jpeg|png|gif|webp)$/i], + "text/*": [/^data:text\//, /^https?:\/\/.+$/], + "application/*": [/^data:application\//, /^https?:\/\/.+$/], }; readonly defaultObjectGenerationMode = undefined; readonly settings: OpenRouterCompletionSettings; @@ -86,18 +80,18 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 { }: LanguageModelV3CallOptions) { const { prompt: completionPrompt } = convertToOpenRouterCompletionPrompt({ prompt, - inputFormat: 'prompt', + inputFormat: "prompt", }); if (tools?.length) { throw new UnsupportedFunctionalityError({ - functionality: 'tools', + functionality: "tools", }); } if (toolChoice) { throw new UnsupportedFunctionalityError({ - functionality: 'toolChoice', + functionality: "toolChoice", }); } @@ -109,9 +103,9 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 { // model specific settings: logit_bias: this.settings.logitBias, logprobs: - typeof this.settings.logprobs === 'number' + typeof this.settings.logprobs === "number" ? this.settings.logprobs - : typeof this.settings.logprobs === 'boolean' + : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : undefined @@ -146,7 +140,7 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 { async doGenerate( options: LanguageModelV3CallOptions, - ): Promise>> { + ): Promise>> { const providerOptions = options.providerOptions || {}; const openrouterOptions = providerOptions.openrouter || {}; @@ -157,25 +151,23 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 { const { value: response, responseHeaders } = await postJsonToApi({ url: this.config.url({ - path: '/completions', + path: "/completions", modelId: this.modelId, }), headers: combineHeaders(this.config.headers(), options.headers), body: args, failedResponseHandler: openrouterFailedResponseHandler, - successfulResponseHandler: createJsonResponseHandler( - OpenRouterCompletionChunkSchema, - ), + successfulResponseHandler: createJsonResponseHandler(OpenRouterCompletionChunkSchema), abortSignal: options.abortSignal, fetch: this.config.fetch, }); - if ('error' in response) { + if ("error" in response) { const errorData = response.error as { message: string; code?: string }; throw new APICallError({ message: errorData.message, url: this.config.url({ - path: '/completions', + path: "/completions", modelId: this.modelId, }), requestBodyValues: args, @@ -189,15 +181,15 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 { if (!choice) { throw new NoContentGeneratedError({ - message: 'No choice in OpenRouter completion response', + message: "No choice in OpenRouter completion response", }); } return { content: [ { - type: 'text', - text: choice.text ?? '', + type: "text", + text: choice.text ?? "", }, ], finishReason: mapOpenRouterFinishReason(choice.finish_reason), @@ -205,16 +197,13 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 { inputTokens: { total: response.usage?.prompt_tokens ?? 0, noCache: undefined, - cacheRead: - response.usage?.prompt_tokens_details?.cached_tokens ?? undefined, + cacheRead: response.usage?.prompt_tokens_details?.cached_tokens ?? undefined, cacheWrite: undefined, }, outputTokens: { total: response.usage?.completion_tokens ?? 0, text: undefined, - reasoning: - response.usage?.completion_tokens_details?.reasoning_tokens ?? - undefined, + reasoning: response.usage?.completion_tokens_details?.reasoning_tokens ?? undefined, }, }, warnings: [], @@ -226,7 +215,7 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 { async doStream( options: LanguageModelV3CallOptions, - ): Promise>> { + ): Promise>> { const providerOptions = options.providerOptions || {}; const openrouterOptions = providerOptions.openrouter || {}; @@ -237,7 +226,7 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 { const { value: response, responseHeaders } = await postJsonToApi({ url: this.config.url({ - path: '/completions', + path: "/completions", modelId: this.modelId, }), headers: combineHeaders(this.config.headers(), options.headers), @@ -247,19 +236,15 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 { // only include stream_options when in strict compatibility mode: stream_options: - this.config.compatibility === 'strict' - ? { include_usage: true } - : undefined, + this.config.compatibility === "strict" ? { include_usage: true } : undefined, }, failedResponseHandler: openrouterFailedResponseHandler, - successfulResponseHandler: createEventSourceResponseHandler( - OpenRouterCompletionChunkSchema, - ), + successfulResponseHandler: createEventSourceResponseHandler(OpenRouterCompletionChunkSchema), abortSignal: options.abortSignal, fetch: this.config.fetch, }); - let finishReason: LanguageModelV3FinishReason = createFinishReason('other'); + let finishReason: LanguageModelV3FinishReason = createFinishReason("other"); const usage: LanguageModelV3Usage = { inputTokens: { total: undefined, @@ -284,17 +269,17 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 { transform(chunk, controller) { // handle failed chunk parsing / validation: if (!chunk.success) { - finishReason = createFinishReason('error'); - controller.enqueue({ type: 'error', error: chunk.error }); + finishReason = createFinishReason("error"); + controller.enqueue({ type: "error", error: chunk.error }); return; } const value = chunk.value; // handle error chunks: - if ('error' in value) { - finishReason = createFinishReason('error'); - controller.enqueue({ type: 'error', error: value.error }); + if ("error" in value) { + finishReason = createFinishReason("error"); + controller.enqueue({ type: "error", error: value.error }); return; } @@ -306,8 +291,7 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 { openrouterUsage.promptTokens = value.usage.prompt_tokens; if (value.usage.prompt_tokens_details) { - const cachedInputTokens = - value.usage.prompt_tokens_details.cached_tokens ?? 0; + const cachedInputTokens = value.usage.prompt_tokens_details.cached_tokens ?? 0; usage.inputTokens.cacheRead = cachedInputTokens; openrouterUsage.promptTokensDetails = { @@ -317,8 +301,7 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 { openrouterUsage.completionTokens = value.usage.completion_tokens; if (value.usage.completion_tokens_details) { - const reasoningTokens = - value.usage.completion_tokens_details.reasoning_tokens ?? 0; + const reasoningTokens = value.usage.completion_tokens_details.reasoning_tokens ?? 0; usage.outputTokens.reasoning = reasoningTokens; openrouterUsage.completionTokensDetails = { @@ -328,8 +311,7 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 { openrouterUsage.cost = value.usage.cost; openrouterUsage.totalTokens = value.usage.total_tokens; - const upstreamInferenceCost = - value.usage.cost_details?.upstream_inference_cost; + const upstreamInferenceCost = value.usage.cost_details?.upstream_inference_cost; if (upstreamInferenceCost != null) { openrouterUsage.costDetails = { upstreamInferenceCost, @@ -345,7 +327,7 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 { if (choice?.text != null) { controller.enqueue({ - type: 'text-delta', + type: "text-delta", delta: choice.text, id: generateId(), }); @@ -354,7 +336,7 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 { flush(controller) { controller.enqueue({ - type: 'finish', + type: "finish", finishReason, usage, providerMetadata: { diff --git a/packages/ai-sdk-provider-2/src/completion/schemas.ts b/packages/ai-sdk-provider-2/src/completion/schemas.ts index c38f00e..2e48c1f 100644 --- a/packages/ai-sdk-provider-2/src/completion/schemas.ts +++ b/packages/ai-sdk-provider-2/src/completion/schemas.ts @@ -1,6 +1,7 @@ -import { z } from 'zod/v4'; -import { OpenRouterErrorResponseSchema } from '../schemas/error-response'; -import { ReasoningDetailArraySchema } from '../schemas/reasoning-details'; +import { z } from "zod/v4"; + +import { OpenRouterErrorResponseSchema } from "../schemas/error-response"; +import { ReasoningDetailArraySchema } from "../schemas/reasoning-details"; // limited version of the schema, focussed on what is needed for the implementation // this approach limits breakages when the API changes and increases efficiency @@ -22,9 +23,7 @@ export const OpenRouterCompletionChunkSchema = z.union([ .object({ tokens: z.array(z.string()), token_logprobs: z.array(z.number()), - top_logprobs: z - .array(z.record(z.string(), z.number())) - .nullable(), + top_logprobs: z.array(z.record(z.string(), z.number())).nullable(), }) .passthrough() .nullable() diff --git a/packages/ai-sdk-provider-2/src/embedding/index.test.ts b/packages/ai-sdk-provider-2/src/embedding/index.test.ts index bad731a..4b8f6d7 100644 --- a/packages/ai-sdk-provider-2/src/embedding/index.test.ts +++ b/packages/ai-sdk-provider-2/src/embedding/index.test.ts @@ -1,24 +1,22 @@ -import { describe, expect, it } from 'vitest'; -import { createOpenRouter } from '../provider'; -import { OpenRouterEmbeddingModel } from './index'; - -describe('OpenRouterEmbeddingModel', () => { - const mockFetch = async ( - _url: URL | RequestInfo, - _init?: RequestInit, - ): Promise => { +import { describe, expect, it } from "vitest"; + +import { createOpenRouter } from "../provider"; +import { OpenRouterEmbeddingModel } from "./index"; + +describe("OpenRouterEmbeddingModel", () => { + const mockFetch = async (_url: URL | RequestInfo, _init?: RequestInit): Promise => { return new Response( JSON.stringify({ - id: 'test-id', - object: 'list', + id: "test-id", + object: "list", data: [ { - object: 'embedding', + object: "embedding", embedding: new Array(1536).fill(0.1), index: 0, }, ], - model: 'openai/text-embedding-3-small', + model: "openai/text-embedding-3-small", usage: { prompt_tokens: 5, total_tokens: 5, @@ -28,86 +26,81 @@ describe('OpenRouterEmbeddingModel', () => { { status: 200, headers: { - 'content-type': 'application/json', + "content-type": "application/json", }, }, ); }; - describe('provider methods', () => { - it('should expose textEmbeddingModel method', () => { - const provider = createOpenRouter({ apiKey: 'test-key' }); + describe("provider methods", () => { + it("should expose textEmbeddingModel method", () => { + const provider = createOpenRouter({ apiKey: "test-key" }); expect(provider.textEmbeddingModel).toBeDefined(); - expect(typeof provider.textEmbeddingModel).toBe('function'); + expect(typeof provider.textEmbeddingModel).toBe("function"); }); - it('should expose embedding method (deprecated)', () => { - const provider = createOpenRouter({ apiKey: 'test-key' }); + it("should expose embedding method (deprecated)", () => { + const provider = createOpenRouter({ apiKey: "test-key" }); expect(provider.embedding).toBeDefined(); - expect(typeof provider.embedding).toBe('function'); + expect(typeof provider.embedding).toBe("function"); }); - it('should create an embedding model instance', () => { - const provider = createOpenRouter({ apiKey: 'test-key' }); - const model = provider.textEmbeddingModel( - 'openai/text-embedding-3-small', - ); + it("should create an embedding model instance", () => { + const provider = createOpenRouter({ apiKey: "test-key" }); + const model = provider.textEmbeddingModel("openai/text-embedding-3-small"); expect(model).toBeInstanceOf(OpenRouterEmbeddingModel); - expect(model.modelId).toBe('openai/text-embedding-3-small'); - expect(model.provider).toBe('openrouter'); - expect(model.specificationVersion).toBe('v3'); + expect(model.modelId).toBe("openai/text-embedding-3-small"); + expect(model.provider).toBe("openrouter"); + expect(model.specificationVersion).toBe("v3"); }); }); - describe('doEmbed', () => { - it('should embed a single value', async () => { + describe("doEmbed", () => { + it("should embed a single value", async () => { const provider = createOpenRouter({ - apiKey: 'test-key', + apiKey: "test-key", fetch: mockFetch, }); - const model = provider.textEmbeddingModel( - 'openai/text-embedding-3-small', - ); + const model = provider.textEmbeddingModel("openai/text-embedding-3-small"); const result = await model.doEmbed({ - values: ['sunny day at the beach'], + values: ["sunny day at the beach"], }); expect(result.embeddings).toHaveLength(1); expect(result.embeddings[0]).toHaveLength(1536); expect(result.usage).toEqual({ tokens: 5 }); expect( - (result.providerMetadata?.openrouter as { usage?: { cost?: number } }) - ?.usage?.cost, + (result.providerMetadata?.openrouter as { usage?: { cost?: number } })?.usage?.cost, ).toBe(0.00001); }); - it('should embed multiple values', async () => { + it("should embed multiple values", async () => { const mockFetchMultiple = async ( _url: URL | RequestInfo, _init?: RequestInit, ): Promise => { return new Response( JSON.stringify({ - object: 'list', + object: "list", data: [ { - object: 'embedding', + object: "embedding", embedding: new Array(1536).fill(0.1), index: 0, }, { - object: 'embedding', + object: "embedding", embedding: new Array(1536).fill(0.2), index: 1, }, { - object: 'embedding', + object: "embedding", embedding: new Array(1536).fill(0.3), index: 2, }, ], - model: 'openai/text-embedding-3-small', + model: "openai/text-embedding-3-small", usage: { prompt_tokens: 15, total_tokens: 15, @@ -116,26 +109,20 @@ describe('OpenRouterEmbeddingModel', () => { { status: 200, headers: { - 'content-type': 'application/json', + "content-type": "application/json", }, }, ); }; const provider = createOpenRouter({ - apiKey: 'test-key', + apiKey: "test-key", fetch: mockFetchMultiple, }); - const model = provider.textEmbeddingModel( - 'openai/text-embedding-3-small', - ); + const model = provider.textEmbeddingModel("openai/text-embedding-3-small"); const result = await model.doEmbed({ - values: [ - 'sunny day at the beach', - 'rainy day in the city', - 'snowy mountain peak', - ], + values: ["sunny day at the beach", "rainy day in the city", "snowy mountain peak"], }); expect(result.embeddings).toHaveLength(3); @@ -145,7 +132,7 @@ describe('OpenRouterEmbeddingModel', () => { expect(result.usage).toEqual({ tokens: 15 }); }); - it('should pass custom settings to API', async () => { + it("should pass custom settings to API", async () => { let capturedRequest: Record | undefined; const mockFetchWithCapture = async ( @@ -155,15 +142,15 @@ describe('OpenRouterEmbeddingModel', () => { capturedRequest = JSON.parse(init?.body as string); return new Response( JSON.stringify({ - object: 'list', + object: "list", data: [ { - object: 'embedding', + object: "embedding", embedding: new Array(1536).fill(0.1), index: 0, }, ], - model: 'openai/text-embedding-3-small', + model: "openai/text-embedding-3-small", usage: { prompt_tokens: 5, total_tokens: 5, @@ -172,77 +159,72 @@ describe('OpenRouterEmbeddingModel', () => { { status: 200, headers: { - 'content-type': 'application/json', + "content-type": "application/json", }, }, ); }; const provider = createOpenRouter({ - apiKey: 'test-key', + apiKey: "test-key", fetch: mockFetchWithCapture, }); - const model = provider.textEmbeddingModel( - 'openai/text-embedding-3-small', - { - user: 'test-user-123', - provider: { - order: ['openai'], - allow_fallbacks: false, - }, + const model = provider.textEmbeddingModel("openai/text-embedding-3-small", { + user: "test-user-123", + provider: { + order: ["openai"], + allow_fallbacks: false, }, - ); + }); await model.doEmbed({ - values: ['test input'], + values: ["test input"], }); - expect(capturedRequest?.user).toBe('test-user-123'); + expect(capturedRequest?.user).toBe("test-user-123"); expect(capturedRequest?.provider).toEqual({ - order: ['openai'], + order: ["openai"], allow_fallbacks: false, }); - expect(capturedRequest?.model).toBe('openai/text-embedding-3-small'); - expect(capturedRequest?.input).toEqual(['test input']); + expect(capturedRequest?.model).toBe("openai/text-embedding-3-small"); + expect(capturedRequest?.input).toEqual(["test input"]); }); - it('should handle response without usage information', async () => { + it("should handle response without usage information", async () => { const mockFetchNoUsage = async ( _url: URL | RequestInfo, _init?: RequestInit, ): Promise => { return new Response( JSON.stringify({ - object: 'list', + object: "list", data: [ { - object: 'embedding', + object: "embedding", embedding: new Array(1536).fill(0.1), index: 0, }, ], - model: 'openai/text-embedding-3-small', + model: "openai/text-embedding-3-small", }), { status: 200, headers: { - 'content-type': 'application/json', + "content-type": "application/json", }, }, ); }; const provider = createOpenRouter({ - apiKey: 'test-key', + apiKey: "test-key", fetch: mockFetchNoUsage, }); - const model = provider.textEmbeddingModel( - 'openai/text-embedding-3-small', - ); + const model = provider.textEmbeddingModel("openai/text-embedding-3-small"); const result = await model.doEmbed({ - values: ['test'], + values: ["test"], }); expect(result.embeddings).toHaveLength(1); diff --git a/packages/ai-sdk-provider-2/src/embedding/index.ts b/packages/ai-sdk-provider-2/src/embedding/index.ts index a88e325..87e510b 100644 --- a/packages/ai-sdk-provider-2/src/embedding/index.ts +++ b/packages/ai-sdk-provider-2/src/embedding/index.ts @@ -1,20 +1,12 @@ -import type { - EmbeddingModelV3, - SharedV3Headers, - SharedV3ProviderMetadata, -} from '@ai-sdk/provider'; +import type { EmbeddingModelV3, SharedV3Headers, SharedV3ProviderMetadata } from "@ai-sdk/provider"; +import { combineHeaders, createJsonResponseHandler, postJsonToApi } from "@ai-sdk/provider-utils"; + import type { OpenRouterEmbeddingModelId, OpenRouterEmbeddingSettings, -} from '../types/openrouter-embedding-settings'; - -import { - combineHeaders, - createJsonResponseHandler, - postJsonToApi, -} from '@ai-sdk/provider-utils'; -import { openrouterFailedResponseHandler } from '../schemas/error-response'; -import { OpenRouterEmbeddingResponseSchema } from './schemas'; +} from "../types/openrouter-embedding-settings"; +import { openrouterFailedResponseHandler } from "../schemas/error-response"; +import { OpenRouterEmbeddingResponseSchema } from "./schemas"; type OpenRouterEmbeddingConfig = { provider: string; @@ -25,8 +17,8 @@ type OpenRouterEmbeddingConfig = { }; export class OpenRouterEmbeddingModel implements EmbeddingModelV3 { - readonly specificationVersion = 'v3' as const; - readonly provider = 'openrouter'; + readonly specificationVersion = "v3" as const; + readonly provider = "openrouter"; readonly modelId: OpenRouterEmbeddingModelId; readonly settings: OpenRouterEmbeddingSettings; readonly maxEmbeddingsPerCall = undefined; @@ -56,7 +48,7 @@ export class OpenRouterEmbeddingModel implements EmbeddingModelV3 { headers?: SharedV3Headers; body?: unknown; }; - warnings: Array; + warnings: Array; }> { const { values, abortSignal, headers } = options; @@ -71,24 +63,20 @@ export class OpenRouterEmbeddingModel implements EmbeddingModelV3 { const { value: responseValue, responseHeaders } = await postJsonToApi({ url: this.config.url({ - path: '/embeddings', + path: "/embeddings", modelId: this.modelId, }), headers: combineHeaders(this.config.headers(), headers), body: args, failedResponseHandler: openrouterFailedResponseHandler, - successfulResponseHandler: createJsonResponseHandler( - OpenRouterEmbeddingResponseSchema, - ), + successfulResponseHandler: createJsonResponseHandler(OpenRouterEmbeddingResponseSchema), abortSignal, fetch: this.config.fetch, }); return { embeddings: responseValue.data.map((item) => item.embedding), - usage: responseValue.usage - ? { tokens: responseValue.usage.prompt_tokens } - : undefined, + usage: responseValue.usage ? { tokens: responseValue.usage.prompt_tokens } : undefined, providerMetadata: responseValue.usage?.cost ? { openrouter: { diff --git a/packages/ai-sdk-provider-2/src/embedding/schemas.ts b/packages/ai-sdk-provider-2/src/embedding/schemas.ts index 55dcfa3..0336f40 100644 --- a/packages/ai-sdk-provider-2/src/embedding/schemas.ts +++ b/packages/ai-sdk-provider-2/src/embedding/schemas.ts @@ -1,4 +1,4 @@ -import { z } from 'zod/v4'; +import { z } from "zod/v4"; const openrouterEmbeddingUsageSchema = z.object({ prompt_tokens: z.number(), @@ -7,19 +7,17 @@ const openrouterEmbeddingUsageSchema = z.object({ }); const openrouterEmbeddingDataSchema = z.object({ - object: z.literal('embedding'), + object: z.literal("embedding"), embedding: z.array(z.number()), index: z.number().optional(), }); export const OpenRouterEmbeddingResponseSchema = z.object({ id: z.string().optional(), - object: z.literal('list'), + object: z.literal("list"), data: z.array(openrouterEmbeddingDataSchema), model: z.string(), usage: openrouterEmbeddingUsageSchema.optional(), }); -export type OpenRouterEmbeddingResponse = z.infer< - typeof OpenRouterEmbeddingResponseSchema ->; +export type OpenRouterEmbeddingResponse = z.infer; diff --git a/packages/ai-sdk-provider-2/src/facade.ts b/packages/ai-sdk-provider-2/src/facade.ts index cb08ca9..1440285 100644 --- a/packages/ai-sdk-provider-2/src/facade.ts +++ b/packages/ai-sdk-provider-2/src/facade.ts @@ -1,21 +1,21 @@ -import type { OpenRouterProviderSettings } from './provider'; +import { loadApiKey, withoutTrailingSlash } from "@ai-sdk/provider-utils"; + +import type { OpenRouterProviderSettings } from "./provider"; import type { OpenRouterChatModelId, OpenRouterChatSettings, -} from './types/openrouter-chat-settings'; +} from "./types/openrouter-chat-settings"; import type { OpenRouterCompletionModelId, OpenRouterCompletionSettings, -} from './types/openrouter-completion-settings'; +} from "./types/openrouter-completion-settings"; import type { OpenRouterEmbeddingModelId, OpenRouterEmbeddingSettings, -} from './types/openrouter-embedding-settings'; - -import { loadApiKey, withoutTrailingSlash } from '@ai-sdk/provider-utils'; -import { OpenRouterChatLanguageModel } from './chat'; -import { OpenRouterCompletionLanguageModel } from './completion'; -import { OpenRouterEmbeddingModel } from './embedding'; +} from "./types/openrouter-embedding-settings"; +import { OpenRouterChatLanguageModel } from "./chat"; +import { OpenRouterCompletionLanguageModel } from "./completion"; +import { OpenRouterEmbeddingModel } from "./embedding"; /** @deprecated Use `createOpenRouter` instead. @@ -48,8 +48,7 @@ Custom headers to include in the requests. */ constructor(options: OpenRouterProviderSettings = {}) { this.baseURL = - withoutTrailingSlash(options.baseURL ?? options.baseUrl) ?? - 'https://openrouter.ai/api/v1'; + withoutTrailingSlash(options.baseURL ?? options.baseUrl) ?? "https://openrouter.ai/api/v1"; this.apiKey = options.apiKey; this.headers = options.headers; this.api_keys = options.api_keys; @@ -61,13 +60,13 @@ Custom headers to include in the requests. headers: () => ({ Authorization: `Bearer ${loadApiKey({ apiKey: this.apiKey, - environmentVariableName: 'OPENROUTER_API_KEY', - description: 'OpenRouter', + environmentVariableName: "OPENROUTER_API_KEY", + description: "OpenRouter", })}`, ...this.headers, ...(this.api_keys && Object.keys(this.api_keys).length > 0 && { - 'X-Provider-API-Keys': JSON.stringify(this.api_keys), + "X-Provider-API-Keys": JSON.stringify(this.api_keys), }), }), }; @@ -75,21 +74,18 @@ Custom headers to include in the requests. chat(modelId: OpenRouterChatModelId, settings: OpenRouterChatSettings = {}) { return new OpenRouterChatLanguageModel(modelId, settings, { - provider: 'openrouter.chat', + provider: "openrouter.chat", ...this.baseConfig, - compatibility: 'strict', + compatibility: "strict", url: ({ path }) => `${this.baseURL}${path}`, }); } - completion( - modelId: OpenRouterCompletionModelId, - settings: OpenRouterCompletionSettings = {}, - ) { + completion(modelId: OpenRouterCompletionModelId, settings: OpenRouterCompletionSettings = {}) { return new OpenRouterCompletionLanguageModel(modelId, settings, { - provider: 'openrouter.completion', + provider: "openrouter.completion", ...this.baseConfig, - compatibility: 'strict', + compatibility: "strict", url: ({ path }) => `${this.baseURL}${path}`, }); } @@ -99,7 +95,7 @@ Custom headers to include in the requests. settings: OpenRouterEmbeddingSettings = {}, ) { return new OpenRouterEmbeddingModel(modelId, settings, { - provider: 'openrouter.embedding', + provider: "openrouter.embedding", ...this.baseConfig, url: ({ path }) => `${this.baseURL}${path}`, }); @@ -108,10 +104,7 @@ Custom headers to include in the requests. /** * @deprecated Use textEmbeddingModel instead */ - embedding( - modelId: OpenRouterEmbeddingModelId, - settings: OpenRouterEmbeddingSettings = {}, - ) { + embedding(modelId: OpenRouterEmbeddingModelId, settings: OpenRouterEmbeddingSettings = {}) { return this.textEmbeddingModel(modelId, settings); } } diff --git a/packages/ai-sdk-provider-2/src/index.ts b/packages/ai-sdk-provider-2/src/index.ts index 670fb1c..9fa3755 100644 --- a/packages/ai-sdk-provider-2/src/index.ts +++ b/packages/ai-sdk-provider-2/src/index.ts @@ -1,3 +1,3 @@ -export * from './facade'; -export * from './provider'; -export * from './types'; +export * from "./facade"; +export * from "./provider"; +export * from "./types"; diff --git a/packages/ai-sdk-provider-2/src/internal copy/index.ts b/packages/ai-sdk-provider-2/src/internal copy/index.ts index fb46972..82be31b 100644 --- a/packages/ai-sdk-provider-2/src/internal copy/index.ts +++ b/packages/ai-sdk-provider-2/src/internal copy/index.ts @@ -1,5 +1,5 @@ -export * from '../chat'; -export * from '../completion'; -export * from '../types'; -export * from '../types/openrouter-chat-settings'; -export * from '../types/openrouter-completion-settings'; +export * from "../chat"; +export * from "../completion"; +export * from "../types"; +export * from "../types/openrouter-chat-settings"; +export * from "../types/openrouter-completion-settings"; diff --git a/packages/ai-sdk-provider-2/src/provider.ts b/packages/ai-sdk-provider-2/src/provider.ts index 6c9a3e3..bcab8ec 100644 --- a/packages/ai-sdk-provider-2/src/provider.ts +++ b/packages/ai-sdk-provider-2/src/provider.ts @@ -1,23 +1,23 @@ -import type { ProviderV3 } from '@ai-sdk/provider'; +import type { ProviderV3 } from "@ai-sdk/provider"; +import { loadApiKey, withoutTrailingSlash } from "@ai-sdk/provider-utils"; + import type { OpenRouterChatModelId, OpenRouterChatSettings, -} from './types/openrouter-chat-settings'; +} from "./types/openrouter-chat-settings"; import type { OpenRouterCompletionModelId, OpenRouterCompletionSettings, -} from './types/openrouter-completion-settings'; +} from "./types/openrouter-completion-settings"; import type { OpenRouterEmbeddingModelId, OpenRouterEmbeddingSettings, -} from './types/openrouter-embedding-settings'; - -import { loadApiKey, withoutTrailingSlash } from '@ai-sdk/provider-utils'; -import { OpenRouterChatLanguageModel } from './chat'; -import { OpenRouterCompletionLanguageModel } from './completion'; -import { OpenRouterEmbeddingModel } from './embedding'; -import { withUserAgentSuffix } from './utils/with-user-agent-suffix'; -import { VERSION } from './version'; +} from "./types/openrouter-embedding-settings"; +import { OpenRouterChatLanguageModel } from "./chat"; +import { OpenRouterCompletionLanguageModel } from "./completion"; +import { OpenRouterEmbeddingModel } from "./embedding"; +import { withUserAgentSuffix } from "./utils/with-user-agent-suffix"; +import { VERSION } from "./version"; export type { OpenRouterChatSettings, OpenRouterCompletionSettings }; @@ -26,10 +26,7 @@ export interface OpenRouterProvider extends ProviderV3 { modelId: OpenRouterChatModelId, settings?: OpenRouterCompletionSettings, ): OpenRouterCompletionLanguageModel; - ( - modelId: OpenRouterChatModelId, - settings?: OpenRouterChatSettings, - ): OpenRouterChatLanguageModel; + (modelId: OpenRouterChatModelId, settings?: OpenRouterChatSettings): OpenRouterChatLanguageModel; languageModel( modelId: OpenRouterChatModelId, @@ -100,7 +97,7 @@ OpenRouter compatibility mode. Should be set to `strict` when using the OpenRout and `compatible` when using 3rd party providers. In `compatible` mode, newer information such as streamOptions are not being sent. Defaults to 'compatible'. */ - compatibility?: 'strict' | 'compatible'; + compatibility?: "strict" | "compatible"; /** Custom fetch implementation. You can use it as a middleware to intercept requests, @@ -123,39 +120,33 @@ A JSON object to send as the request body to access OpenRouter features & upstre /** Create an OpenRouter provider instance. */ -export function createOpenRouter( - options: OpenRouterProviderSettings = {}, -): OpenRouterProvider { +export function createOpenRouter(options: OpenRouterProviderSettings = {}): OpenRouterProvider { const baseURL = - withoutTrailingSlash(options.baseURL ?? options.baseUrl) ?? - 'https://openrouter.ai/api/v1'; + withoutTrailingSlash(options.baseURL ?? options.baseUrl) ?? "https://openrouter.ai/api/v1"; // we default to compatible, because strict breaks providers like Groq: - const compatibility = options.compatibility ?? 'compatible'; + const compatibility = options.compatibility ?? "compatible"; const getHeaders = () => withUserAgentSuffix( { Authorization: `Bearer ${loadApiKey({ apiKey: options.apiKey, - environmentVariableName: 'OPENROUTER_API_KEY', - description: 'OpenRouter', + environmentVariableName: "OPENROUTER_API_KEY", + description: "OpenRouter", })}`, ...options.headers, ...(options.api_keys && Object.keys(options.api_keys).length > 0 && { - 'X-Provider-API-Keys': JSON.stringify(options.api_keys), + "X-Provider-API-Keys": JSON.stringify(options.api_keys), }), }, `ai-sdk/openrouter/${VERSION}`, ); - const createChatModel = ( - modelId: OpenRouterChatModelId, - settings: OpenRouterChatSettings = {}, - ) => + const createChatModel = (modelId: OpenRouterChatModelId, settings: OpenRouterChatSettings = {}) => new OpenRouterChatLanguageModel(modelId, settings, { - provider: 'openrouter.chat', + provider: "openrouter.chat", url: ({ path }) => `${baseURL}${path}`, headers: getHeaders, compatibility, @@ -168,7 +159,7 @@ export function createOpenRouter( settings: OpenRouterCompletionSettings = {}, ) => new OpenRouterCompletionLanguageModel(modelId, settings, { - provider: 'openrouter.completion', + provider: "openrouter.completion", url: ({ path }) => `${baseURL}${path}`, headers: getHeaders, compatibility, @@ -181,7 +172,7 @@ export function createOpenRouter( settings: OpenRouterEmbeddingSettings = {}, ) => new OpenRouterEmbeddingModel(modelId, settings, { - provider: 'openrouter.embedding', + provider: "openrouter.embedding", url: ({ path }) => `${baseURL}${path}`, headers: getHeaders, fetch: options.fetch, @@ -193,16 +184,11 @@ export function createOpenRouter( settings?: OpenRouterChatSettings | OpenRouterCompletionSettings, ) => { if (new.target) { - throw new Error( - 'The OpenRouter model function cannot be called with the new keyword.', - ); + throw new Error("The OpenRouter model function cannot be called with the new keyword."); } - if (modelId === 'openai/gpt-3.5-turbo-instruct') { - return createCompletionModel( - modelId, - settings as OpenRouterCompletionSettings, - ); + if (modelId === "openai/gpt-3.5-turbo-instruct") { + return createCompletionModel(modelId, settings as OpenRouterCompletionSettings); } return createChatModel(modelId, settings as OpenRouterChatSettings); @@ -226,5 +212,5 @@ export function createOpenRouter( Default OpenRouter provider instance. It uses 'strict' compatibility mode. */ export const openrouter = createOpenRouter({ - compatibility: 'strict', // strict for OpenRouter API + compatibility: "strict", // strict for OpenRouter API }); diff --git a/packages/ai-sdk-provider-2/src/schemas/error-response.test.ts b/packages/ai-sdk-provider-2/src/schemas/error-response.test.ts index df21892..8f3f759 100644 --- a/packages/ai-sdk-provider-2/src/schemas/error-response.test.ts +++ b/packages/ai-sdk-provider-2/src/schemas/error-response.test.ts @@ -1,39 +1,37 @@ -import { OpenRouterErrorResponseSchema } from './error-response'; +import { OpenRouterErrorResponseSchema } from "./error-response"; -describe('OpenRouterErrorResponseSchema', () => { - it('should be valid without a type, code, and param', () => { +describe("OpenRouterErrorResponseSchema", () => { + it("should be valid without a type, code, and param", () => { const errorWithoutTypeCodeAndParam = { error: { - message: 'Example error message', - metadata: { provider_name: 'Example Provider' }, + message: "Example error message", + metadata: { provider_name: "Example Provider" }, }, - user_id: 'example_1', + user_id: "example_1", }; - const result = OpenRouterErrorResponseSchema.parse( - errorWithoutTypeCodeAndParam, - ); + const result = OpenRouterErrorResponseSchema.parse(errorWithoutTypeCodeAndParam); expect(result).toEqual({ error: { - message: 'Example error message', + message: "Example error message", code: null, type: null, param: null, - metadata: { provider_name: 'Example Provider' }, + metadata: { provider_name: "Example Provider" }, }, - user_id: 'example_1', + user_id: "example_1", }); }); - it('should be invalid with a type', () => { + it("should be invalid with a type", () => { const errorWithType = { error: { - message: 'Example error message with type', - type: 'invalid_request_error', + message: "Example error message with type", + type: "invalid_request_error", code: 400, - param: 'canBeAnything', - metadata: { provider_name: 'Example Provider' }, + param: "canBeAnything", + metadata: { provider_name: "Example Provider" }, }, }; @@ -42,10 +40,10 @@ describe('OpenRouterErrorResponseSchema', () => { expect(result).toEqual({ error: { code: 400, - message: 'Example error message with type', - type: 'invalid_request_error', - param: 'canBeAnything', - metadata: { provider_name: 'Example Provider' }, + message: "Example error message with type", + type: "invalid_request_error", + param: "canBeAnything", + metadata: { provider_name: "Example Provider" }, }, }); }); diff --git a/packages/ai-sdk-provider-2/src/schemas/error-response.ts b/packages/ai-sdk-provider-2/src/schemas/error-response.ts index de6ce3b..84c4fe2 100644 --- a/packages/ai-sdk-provider-2/src/schemas/error-response.ts +++ b/packages/ai-sdk-provider-2/src/schemas/error-response.ts @@ -1,7 +1,6 @@ -import type { ChatErrorError } from '@openrouter/sdk/models'; - -import { createJsonErrorResponseHandler } from '@ai-sdk/provider-utils'; -import { z } from 'zod/v4'; +import type { ChatErrorError } from "@openrouter/sdk/models"; +import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils"; +import { z } from "zod/v4"; // Use SDK's ChatErrorError type but wrap in response schema // SDK type: { code: string | number | null; message: string; param?: string | null; type?: string | null } @@ -9,17 +8,13 @@ export const OpenRouterErrorResponseSchema = z .object({ error: z .object({ - code: z - .union([z.string(), z.number()]) - .nullable() - .optional() - .default(null), + code: z.union([z.string(), z.number()]).nullable().optional().default(null), message: z.string(), type: z.string().nullable().optional().default(null), param: z.any().nullable().optional().default(null), }) .passthrough() satisfies z.ZodType< - Omit & { code: string | number | null } + Omit & { code: string | number | null } >, }) .passthrough(); diff --git a/packages/ai-sdk-provider-2/src/schemas/format.ts b/packages/ai-sdk-provider-2/src/schemas/format.ts index 3be2d9c..63f4b89 100644 --- a/packages/ai-sdk-provider-2/src/schemas/format.ts +++ b/packages/ai-sdk-provider-2/src/schemas/format.ts @@ -1,9 +1,9 @@ export enum ReasoningFormat { - Unknown = 'unknown', - OpenAIResponsesV1 = 'openai-responses-v1', - XAIResponsesV1 = 'xai-responses-v1', - AnthropicClaudeV1 = 'anthropic-claude-v1', - GoogleGeminiV1 = 'google-gemini-v1', + Unknown = "unknown", + OpenAIResponsesV1 = "openai-responses-v1", + XAIResponsesV1 = "xai-responses-v1", + AnthropicClaudeV1 = "anthropic-claude-v1", + GoogleGeminiV1 = "google-gemini-v1", } // Anthropic Claude was the first reasoning that we're diff --git a/packages/ai-sdk-provider-2/src/schemas/image.ts b/packages/ai-sdk-provider-2/src/schemas/image.ts index d6df790..7b2fbf2 100644 --- a/packages/ai-sdk-provider-2/src/schemas/image.ts +++ b/packages/ai-sdk-provider-2/src/schemas/image.ts @@ -1,8 +1,8 @@ -import { z } from 'zod/v4'; +import { z } from "zod/v4"; const ImageResponseSchema = z .object({ - type: z.literal('image_url'), + type: z.literal("image_url"), image_url: z .object({ url: z.string(), diff --git a/packages/ai-sdk-provider-2/src/schemas/provider-metadata.ts b/packages/ai-sdk-provider-2/src/schemas/provider-metadata.ts index 4f9c0e1..00ae983 100644 --- a/packages/ai-sdk-provider-2/src/schemas/provider-metadata.ts +++ b/packages/ai-sdk-provider-2/src/schemas/provider-metadata.ts @@ -1,12 +1,13 @@ -import { z } from 'zod/v4'; -import { ReasoningDetailUnionSchema } from './reasoning-details'; +import { z } from "zod/v4"; + +import { ReasoningDetailUnionSchema } from "./reasoning-details"; /** * Schema for file annotations from FileParserPlugin */ export const FileAnnotationSchema = z .object({ - type: z.literal('file'), + type: z.literal("file"), file: z .object({ hash: z.string(), @@ -65,9 +66,7 @@ export const OpenRouterProviderMetadataSchema = z }) .catchall(z.any()); -export type OpenRouterProviderMetadata = z.infer< - typeof OpenRouterProviderMetadataSchema ->; +export type OpenRouterProviderMetadata = z.infer; /** * Schema for parsing provider options that may contain reasoning_details and annotations diff --git a/packages/ai-sdk-provider-2/src/schemas/reasoning-details.ts b/packages/ai-sdk-provider-2/src/schemas/reasoning-details.ts index 916374b..0396624 100644 --- a/packages/ai-sdk-provider-2/src/schemas/reasoning-details.ts +++ b/packages/ai-sdk-provider-2/src/schemas/reasoning-details.ts @@ -1,11 +1,12 @@ -import { z } from 'zod/v4'; -import { isDefinedOrNotNull } from '../utils/type-guards'; -import { ReasoningFormat } from './format'; +import { z } from "zod/v4"; + +import { isDefinedOrNotNull } from "../utils/type-guards"; +import { ReasoningFormat } from "./format"; export enum ReasoningDetailType { - Summary = 'reasoning.summary', - Encrypted = 'reasoning.encrypted', - Text = 'reasoning.text', + Summary = "reasoning.summary", + Encrypted = "reasoning.encrypted", + Text = "reasoning.text", } export const CommonReasoningDetailSchema = z @@ -22,9 +23,7 @@ export const ReasoningDetailSummarySchema = z summary: z.string(), }) .extend(CommonReasoningDetailSchema.shape); -export type ReasoningDetailSummary = z.infer< - typeof ReasoningDetailSummarySchema ->; +export type ReasoningDetailSummary = z.infer; export const ReasoningDetailEncryptedSchema = z .object({ @@ -33,9 +32,7 @@ export const ReasoningDetailEncryptedSchema = z }) .extend(CommonReasoningDetailSchema.shape); -export type ReasoningDetailEncrypted = z.infer< - typeof ReasoningDetailEncryptedSchema ->; +export type ReasoningDetailEncrypted = z.infer; export const ReasoningDetailTextSchema = z .object({ @@ -71,18 +68,14 @@ export const OutputUnionToReasoningDetailsSchema = z.union([ reasoning_details: z.array(ReasoningDetailsWithUnknownSchema), }), }) - .transform((data) => - data.delta.reasoning_details.filter(isDefinedOrNotNull), - ), + .transform((data) => data.delta.reasoning_details.filter(isDefinedOrNotNull)), z .object({ message: z.object({ reasoning_details: z.array(ReasoningDetailsWithUnknownSchema), }), }) - .transform((data) => - data.message.reasoning_details.filter(isDefinedOrNotNull), - ), + .transform((data) => data.message.reasoning_details.filter(isDefinedOrNotNull)), z .object({ text: z.string(), diff --git a/packages/ai-sdk-provider-2/src/test-utils/test-server.ts b/packages/ai-sdk-provider-2/src/test-utils/test-server.ts index 84332e5..d7642f0 100644 --- a/packages/ai-sdk-provider-2/src/test-utils/test-server.ts +++ b/packages/ai-sdk-provider-2/src/test-utils/test-server.ts @@ -3,18 +3,17 @@ * This provides HTTP request interception for testing purposes. */ -import type { JsonBodyType } from 'msw'; -import type { SetupServerApi } from 'msw/node'; - -import { HttpResponse, http } from 'msw'; -import { setupServer } from 'msw/node'; -import { afterAll, afterEach, beforeAll } from 'vitest'; +import type { JsonBodyType } from "msw"; +import type { SetupServerApi } from "msw/node"; +import { http, HttpResponse } from "msw"; +import { setupServer } from "msw/node"; +import { afterAll, afterEach, beforeAll } from "vitest"; // Re-export utilities that were previously in @ai-sdk/provider-utils/test -export { convertReadableStreamToArray } from '@ai-sdk/provider-utils/test'; +export { convertReadableStreamToArray } from "@ai-sdk/provider-utils/test"; type ResponseConfig = { - type: 'json-value' | 'stream-chunks' | 'error'; + type: "json-value" | "stream-chunks" | "error"; body?: JsonBodyType; chunks?: string[]; status?: number; @@ -80,23 +79,20 @@ export function createTestServer(config: TestServerConfig): { const response = urlConfig.response; if (!response) { - return HttpResponse.json( - { error: 'No response configured' }, - { status: 500 }, - ); + return HttpResponse.json({ error: "No response configured" }, { status: 500 }); } const status = response.status ?? 200; const responseHeaders = response.headers ?? {}; switch (response.type) { - case 'json-value': + case "json-value": return HttpResponse.json(response.body ?? null, { status, headers: responseHeaders, }); - case 'stream-chunks': { + case "stream-chunks": { const encoder = new TextEncoder(); const chunks = response.chunks ?? []; const stream = new ReadableStream({ @@ -110,14 +106,14 @@ export function createTestServer(config: TestServerConfig): { return new HttpResponse(stream, { status, headers: { - 'Content-Type': 'text/event-stream', + "Content-Type": "text/event-stream", ...responseHeaders, }, }); } - case 'error': - return HttpResponse.json(response.body ?? { error: 'Test error' }, { + case "error": + return HttpResponse.json(response.body ?? { error: "Test error" }, { status: response.status ?? 500, headers: responseHeaders, }); @@ -133,7 +129,7 @@ export function createTestServer(config: TestServerConfig): { const server = setupServer(...handlers); - beforeAll(() => server.listen({ onUnhandledRequest: 'bypass' })); + beforeAll(() => server.listen({ onUnhandledRequest: "bypass" })); afterEach(() => { server.resetHandlers(); // Clear calls between tests diff --git a/packages/ai-sdk-provider-2/src/tests/provider-options.test.ts b/packages/ai-sdk-provider-2/src/tests/provider-options.test.ts index 20fff19..54ded29 100644 --- a/packages/ai-sdk-provider-2/src/tests/provider-options.test.ts +++ b/packages/ai-sdk-provider-2/src/tests/provider-options.test.ts @@ -1,20 +1,20 @@ -import type { ModelMessage } from 'ai'; +import type { ModelMessage } from "ai"; +import { streamText } from "ai"; +import { describe, expect, it, vi } from "vitest"; -import { streamText } from 'ai'; -import { describe, expect, it, vi } from 'vitest'; -import { createOpenRouter } from '../provider'; -import { createTestServer } from '../test-utils/test-server'; +import { createOpenRouter } from "../provider"; +import { createTestServer } from "../test-utils/test-server"; // Add type assertions for the mocked classes const TEST_MESSAGES: ModelMessage[] = [ - { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, + { role: "user", content: [{ type: "text", text: "Hello" }] }, ]; -describe('providerOptions', () => { +describe("providerOptions", () => { const server = createTestServer({ - 'https://openrouter.ai/api/v1/chat/completions': { + "https://openrouter.ai/api/v1/chat/completions": { response: { - type: 'stream-chunks', + type: "stream-chunks", chunks: [], }, }, @@ -24,11 +24,11 @@ describe('providerOptions', () => { vi.clearAllMocks(); }); - it('should set providerOptions openrouter to extra body', async () => { + it("should set providerOptions openrouter to extra body", async () => { const openrouter = createOpenRouter({ - apiKey: 'test', + apiKey: "test", }); - const model = openrouter('anthropic/claude-3.7-sonnet'); + const model = openrouter("anthropic/claude-3.7-sonnet"); await streamText({ model: model, @@ -45,14 +45,14 @@ describe('providerOptions', () => { expect(await server.calls[0]?.requestBodyJson).toStrictEqual({ messages: [ { - content: 'Hello', - role: 'user', + content: "Hello", + role: "user", }, ], reasoning: { max_tokens: 1000, }, - model: 'anthropic/claude-3.7-sonnet', + model: "anthropic/claude-3.7-sonnet", stream: true, }); }); diff --git a/packages/ai-sdk-provider-2/src/tests/stream-usage-accounting.test.ts b/packages/ai-sdk-provider-2/src/tests/stream-usage-accounting.test.ts index 75b73d0..78d19d2 100644 --- a/packages/ai-sdk-provider-2/src/tests/stream-usage-accounting.test.ts +++ b/packages/ai-sdk-provider-2/src/tests/stream-usage-accounting.test.ts @@ -1,16 +1,13 @@ -import type { OpenRouterChatSettings } from '../types/openrouter-chat-settings'; +import { describe, expect, it } from "vitest"; -import { describe, expect, it } from 'vitest'; -import { OpenRouterChatLanguageModel } from '../chat'; -import { - convertReadableStreamToArray, - createTestServer, -} from '../test-utils/test-server'; +import type { OpenRouterChatSettings } from "../types/openrouter-chat-settings"; +import { OpenRouterChatLanguageModel } from "../chat"; +import { convertReadableStreamToArray, createTestServer } from "../test-utils/test-server"; -describe('OpenRouter Streaming Usage Accounting', () => { +describe("OpenRouter Streaming Usage Accounting", () => { const server = createTestServer({ - 'https://api.openrouter.ai/chat/completions': { - response: { type: 'stream-chunks', chunks: [] }, + "https://api.openrouter.ai/chat/completions": { + response: { type: "stream-chunks", chunks: [] }, }, }); @@ -37,15 +34,15 @@ describe('OpenRouter Streaming Usage Accounting', () => { ); } - chunks.push('data: [DONE]\n\n'); + chunks.push("data: [DONE]\n\n"); - server.urls['https://api.openrouter.ai/chat/completions']!.response = { - type: 'stream-chunks', + server.urls["https://api.openrouter.ai/chat/completions"]!.response = { + type: "stream-chunks", chunks, }; } - it('should include stream_options.include_usage in request when enabled', async () => { + it("should include stream_options.include_usage in request when enabled", async () => { prepareStreamResponse(); // Create model with usage accounting enabled @@ -53,11 +50,11 @@ describe('OpenRouter Streaming Usage Accounting', () => { usage: { include: true }, }; - const model = new OpenRouterChatLanguageModel('test-model', settings, { - provider: 'openrouter.chat', - url: () => 'https://api.openrouter.ai/chat/completions', + const model = new OpenRouterChatLanguageModel("test-model", settings, { + provider: "openrouter.chat", + url: () => "https://api.openrouter.ai/chat/completions", headers: () => ({}), - compatibility: 'strict', + compatibility: "strict", fetch: global.fetch, }); @@ -65,18 +62,15 @@ describe('OpenRouter Streaming Usage Accounting', () => { await model.doStream({ prompt: [ { - role: 'user', - content: [{ type: 'text', text: 'Hello' }], + role: "user", + content: [{ type: "text", text: "Hello" }], }, ], maxOutputTokens: 100, }); // Verify stream options - const requestBody = (await server.calls[0]!.requestBodyJson) as Record< - string, - unknown - >; + const requestBody = (await server.calls[0]!.requestBodyJson) as Record; expect(requestBody).toBeDefined(); expect(requestBody.stream).toBe(true); expect(requestBody.stream_options).toEqual({ @@ -84,7 +78,7 @@ describe('OpenRouter Streaming Usage Accounting', () => { }); }); - it('should include provider-specific metadata in finish event when usage accounting is enabled', async () => { + it("should include provider-specific metadata in finish event when usage accounting is enabled", async () => { prepareStreamResponse(true); // Create model with usage accounting enabled @@ -92,11 +86,11 @@ describe('OpenRouter Streaming Usage Accounting', () => { usage: { include: true }, }; - const model = new OpenRouterChatLanguageModel('test-model', settings, { - provider: 'openrouter.chat', - url: () => 'https://api.openrouter.ai/chat/completions', + const model = new OpenRouterChatLanguageModel("test-model", settings, { + provider: "openrouter.chat", + url: () => "https://api.openrouter.ai/chat/completions", headers: () => ({}), - compatibility: 'strict', + compatibility: "strict", fetch: global.fetch, }); @@ -104,8 +98,8 @@ describe('OpenRouter Streaming Usage Accounting', () => { const result = await model.doStream({ prompt: [ { - role: 'user', - content: [{ type: 'text', text: 'Hello' }], + role: "user", + content: [{ type: "text", text: "Hello" }], }, ], maxOutputTokens: 100, @@ -115,7 +109,7 @@ describe('OpenRouter Streaming Usage Accounting', () => { const chunks = await convertReadableStreamToArray(result.stream); // Find the finish chunk - const finishChunk = chunks.find((chunk) => chunk.type === 'finish'); + const finishChunk = chunks.find((chunk) => chunk.type === "finish"); expect(finishChunk).toBeDefined(); // Verify metadata is included @@ -135,7 +129,7 @@ describe('OpenRouter Streaming Usage Accounting', () => { }); }); - it('should not include provider-specific metadata when usage accounting is disabled', async () => { + it("should not include provider-specific metadata when usage accounting is disabled", async () => { prepareStreamResponse(false); // Create model with usage accounting disabled @@ -143,11 +137,11 @@ describe('OpenRouter Streaming Usage Accounting', () => { // No usage property }; - const model = new OpenRouterChatLanguageModel('test-model', settings, { - provider: 'openrouter.chat', - url: () => 'https://api.openrouter.ai/chat/completions', + const model = new OpenRouterChatLanguageModel("test-model", settings, { + provider: "openrouter.chat", + url: () => "https://api.openrouter.ai/chat/completions", headers: () => ({}), - compatibility: 'strict', + compatibility: "strict", fetch: global.fetch, }); @@ -155,8 +149,8 @@ describe('OpenRouter Streaming Usage Accounting', () => { const result = await model.doStream({ prompt: [ { - role: 'user', - content: [{ type: 'text', text: 'Hello' }], + role: "user", + content: [{ type: "text", text: "Hello" }], }, ], maxOutputTokens: 100, @@ -166,7 +160,7 @@ describe('OpenRouter Streaming Usage Accounting', () => { const chunks = await convertReadableStreamToArray(result.stream); // Find the finish chunk - const finishChunk = chunks.find((chunk) => chunk.type === 'finish'); + const finishChunk = chunks.find((chunk) => chunk.type === "finish"); expect(finishChunk).toBeDefined(); // Verify that provider metadata is not included diff --git a/packages/ai-sdk-provider-2/src/tests/usage-accounting.test.ts b/packages/ai-sdk-provider-2/src/tests/usage-accounting.test.ts index 9949a76..da66fdb 100644 --- a/packages/ai-sdk-provider-2/src/tests/usage-accounting.test.ts +++ b/packages/ai-sdk-provider-2/src/tests/usage-accounting.test.ts @@ -1,28 +1,28 @@ -import type { OpenRouterChatSettings } from '../types/openrouter-chat-settings'; +import { describe, expect, it } from "vitest"; -import { describe, expect, it } from 'vitest'; -import { OpenRouterChatLanguageModel } from '../chat'; -import { createTestServer } from '../test-utils/test-server'; +import type { OpenRouterChatSettings } from "../types/openrouter-chat-settings"; +import { OpenRouterChatLanguageModel } from "../chat"; +import { createTestServer } from "../test-utils/test-server"; -describe('OpenRouter Usage Accounting', () => { +describe("OpenRouter Usage Accounting", () => { const server = createTestServer({ - 'https://api.openrouter.ai/chat/completions': { - response: { type: 'json-value', body: {} }, + "https://api.openrouter.ai/chat/completions": { + response: { type: "json-value", body: {} }, }, }); function prepareJsonResponse(includeUsage = true) { const response = { - id: 'test-id', - model: 'test-model', + id: "test-id", + model: "test-model", choices: [ { message: { - role: 'assistant', - content: 'Hello, I am an AI assistant.', + role: "assistant", + content: "Hello, I am an AI assistant.", }, index: 0, - finish_reason: 'stop', + finish_reason: "stop", }, ], usage: includeUsage @@ -44,13 +44,13 @@ describe('OpenRouter Usage Accounting', () => { : undefined, }; - server.urls['https://api.openrouter.ai/chat/completions']!.response = { - type: 'json-value', + server.urls["https://api.openrouter.ai/chat/completions"]!.response = { + type: "json-value", body: response, }; } - it('should include usage parameter in the request when enabled', async () => { + it("should include usage parameter in the request when enabled", async () => { prepareJsonResponse(); // Create model with usage accounting enabled @@ -58,11 +58,11 @@ describe('OpenRouter Usage Accounting', () => { usage: { include: true }, }; - const model = new OpenRouterChatLanguageModel('test-model', settings, { - provider: 'openrouter.chat', - url: () => 'https://api.openrouter.ai/chat/completions', + const model = new OpenRouterChatLanguageModel("test-model", settings, { + provider: "openrouter.chat", + url: () => "https://api.openrouter.ai/chat/completions", headers: () => ({}), - compatibility: 'strict', + compatibility: "strict", fetch: global.fetch, }); @@ -70,24 +70,21 @@ describe('OpenRouter Usage Accounting', () => { await model.doGenerate({ prompt: [ { - role: 'user', - content: [{ type: 'text', text: 'Hello' }], + role: "user", + content: [{ type: "text", text: "Hello" }], }, ], maxOutputTokens: 100, }); // Check request contains usage parameter - const requestBody = (await server.calls[0]!.requestBodyJson) as Record< - string, - unknown - >; + const requestBody = (await server.calls[0]!.requestBodyJson) as Record; expect(requestBody).toBeDefined(); - expect(requestBody).toHaveProperty('usage'); + expect(requestBody).toHaveProperty("usage"); expect(requestBody.usage).toEqual({ include: true }); }); - it('should include provider-specific metadata in response when usage accounting is enabled', async () => { + it("should include provider-specific metadata in response when usage accounting is enabled", async () => { prepareJsonResponse(); // Create model with usage accounting enabled @@ -95,11 +92,11 @@ describe('OpenRouter Usage Accounting', () => { usage: { include: true }, }; - const model = new OpenRouterChatLanguageModel('test-model', settings, { - provider: 'openrouter.chat', - url: () => 'https://api.openrouter.ai/chat/completions', + const model = new OpenRouterChatLanguageModel("test-model", settings, { + provider: "openrouter.chat", + url: () => "https://api.openrouter.ai/chat/completions", headers: () => ({}), - compatibility: 'strict', + compatibility: "strict", fetch: global.fetch, }); @@ -107,8 +104,8 @@ describe('OpenRouter Usage Accounting', () => { const result = await model.doGenerate({ prompt: [ { - role: 'user', - content: [{ type: 'text', text: 'Hello' }], + role: "user", + content: [{ type: "text", text: "Hello" }], }, ], maxOutputTokens: 100, @@ -141,7 +138,7 @@ describe('OpenRouter Usage Accounting', () => { }); }); - it('should not include provider-specific metadata when usage accounting is disabled', async () => { + it("should not include provider-specific metadata when usage accounting is disabled", async () => { prepareJsonResponse(); // Create model with usage accounting disabled @@ -149,11 +146,11 @@ describe('OpenRouter Usage Accounting', () => { // No usage property }; - const model = new OpenRouterChatLanguageModel('test-model', settings, { - provider: 'openrouter.chat', - url: () => 'https://api.openrouter.ai/chat/completions', + const model = new OpenRouterChatLanguageModel("test-model", settings, { + provider: "openrouter.chat", + url: () => "https://api.openrouter.ai/chat/completions", headers: () => ({}), - compatibility: 'strict', + compatibility: "strict", fetch: global.fetch, }); @@ -161,8 +158,8 @@ describe('OpenRouter Usage Accounting', () => { const result = await model.doGenerate({ prompt: [ { - role: 'user', - content: [{ type: 'text', text: 'Hello' }], + role: "user", + content: [{ type: "text", text: "Hello" }], }, ], maxOutputTokens: 100, @@ -186,19 +183,19 @@ describe('OpenRouter Usage Accounting', () => { }); }); - it('should exclude token details from providerMetadata when not present in response', async () => { + it("should exclude token details from providerMetadata when not present in response", async () => { // Prepare a response without token details const response = { - id: 'test-id', - model: 'test-model', + id: "test-id", + model: "test-model", choices: [ { message: { - role: 'assistant', - content: 'Hello, I am an AI assistant.', + role: "assistant", + content: "Hello, I am an AI assistant.", }, index: 0, - finish_reason: 'stop', + finish_reason: "stop", }, ], usage: { @@ -210,8 +207,8 @@ describe('OpenRouter Usage Accounting', () => { }, }; - server.urls['https://api.openrouter.ai/chat/completions']!.response = { - type: 'json-value', + server.urls["https://api.openrouter.ai/chat/completions"]!.response = { + type: "json-value", body: response, }; @@ -219,27 +216,25 @@ describe('OpenRouter Usage Accounting', () => { usage: { include: true }, }; - const model = new OpenRouterChatLanguageModel('test-model', settings, { - provider: 'openrouter.chat', - url: () => 'https://api.openrouter.ai/chat/completions', + const model = new OpenRouterChatLanguageModel("test-model", settings, { + provider: "openrouter.chat", + url: () => "https://api.openrouter.ai/chat/completions", headers: () => ({}), - compatibility: 'strict', + compatibility: "strict", fetch: global.fetch, }); const result = await model.doGenerate({ prompt: [ { - role: 'user', - content: [{ type: 'text', text: 'Hello' }], + role: "user", + content: [{ type: "text", text: "Hello" }], }, ], maxOutputTokens: 100, }); - const usage = ( - result.providerMetadata?.openrouter as Record - )?.usage; + const usage = (result.providerMetadata?.openrouter as Record)?.usage; // Should include basic token counts expect(usage).toMatchObject({ @@ -250,24 +245,24 @@ describe('OpenRouter Usage Accounting', () => { }); // Should NOT include token details when not present in response - expect(usage).not.toHaveProperty('promptTokensDetails'); - expect(usage).not.toHaveProperty('completionTokensDetails'); - expect(usage).not.toHaveProperty('costDetails'); + expect(usage).not.toHaveProperty("promptTokensDetails"); + expect(usage).not.toHaveProperty("completionTokensDetails"); + expect(usage).not.toHaveProperty("costDetails"); }); - it('should include only present token details in providerMetadata', async () => { + it("should include only present token details in providerMetadata", async () => { // Prepare a response with only cached_tokens (no reasoning or cost details) const response = { - id: 'test-id', - model: 'test-model', + id: "test-id", + model: "test-model", choices: [ { message: { - role: 'assistant', - content: 'Hello, I am an AI assistant.', + role: "assistant", + content: "Hello, I am an AI assistant.", }, index: 0, - finish_reason: 'stop', + finish_reason: "stop", }, ], usage: { @@ -282,8 +277,8 @@ describe('OpenRouter Usage Accounting', () => { }, }; - server.urls['https://api.openrouter.ai/chat/completions']!.response = { - type: 'json-value', + server.urls["https://api.openrouter.ai/chat/completions"]!.response = { + type: "json-value", body: response, }; @@ -291,36 +286,34 @@ describe('OpenRouter Usage Accounting', () => { usage: { include: true }, }; - const model = new OpenRouterChatLanguageModel('test-model', settings, { - provider: 'openrouter.chat', - url: () => 'https://api.openrouter.ai/chat/completions', + const model = new OpenRouterChatLanguageModel("test-model", settings, { + provider: "openrouter.chat", + url: () => "https://api.openrouter.ai/chat/completions", headers: () => ({}), - compatibility: 'strict', + compatibility: "strict", fetch: global.fetch, }); const result = await model.doGenerate({ prompt: [ { - role: 'user', - content: [{ type: 'text', text: 'Hello' }], + role: "user", + content: [{ type: "text", text: "Hello" }], }, ], maxOutputTokens: 100, }); - const usage = ( - result.providerMetadata?.openrouter as Record - )?.usage; + const usage = (result.providerMetadata?.openrouter as Record)?.usage; // Should include promptTokensDetails since cached_tokens is present - expect(usage).toHaveProperty('promptTokensDetails'); + expect(usage).toHaveProperty("promptTokensDetails"); expect((usage as Record).promptTokensDetails).toEqual({ cachedTokens: 5, }); // Should NOT include completionTokensDetails or costDetails - expect(usage).not.toHaveProperty('completionTokensDetails'); - expect(usage).not.toHaveProperty('costDetails'); + expect(usage).not.toHaveProperty("completionTokensDetails"); + expect(usage).not.toHaveProperty("costDetails"); }); }); diff --git a/packages/ai-sdk-provider-2/src/types/index.ts b/packages/ai-sdk-provider-2/src/types/index.ts index 338a897..fbf75c0 100644 --- a/packages/ai-sdk-provider-2/src/types/index.ts +++ b/packages/ai-sdk-provider-2/src/types/index.ts @@ -1,8 +1,8 @@ -import type { LanguageModelV3, LanguageModelV3Prompt } from '@ai-sdk/provider'; +import type { LanguageModelV3, LanguageModelV3Prompt } from "@ai-sdk/provider"; export type { LanguageModelV3, LanguageModelV3Prompt }; -export * from './openrouter-embedding-settings'; +export * from "./openrouter-embedding-settings"; export type OpenRouterProviderOptions = { models?: string[]; @@ -20,7 +20,7 @@ export type OpenRouterProviderOptions = { max_tokens: number; } | { - effort: 'high' | 'medium' | 'low'; + effort: "high" | "medium" | "low"; } ); diff --git a/packages/ai-sdk-provider-2/src/types/openrouter-chat-completions-input.ts b/packages/ai-sdk-provider-2/src/types/openrouter-chat-completions-input.ts index 0c99504..5ea2dbc 100644 --- a/packages/ai-sdk-provider-2/src/types/openrouter-chat-completions-input.ts +++ b/packages/ai-sdk-provider-2/src/types/openrouter-chat-completions-input.ts @@ -1,8 +1,8 @@ -import type { FileAnnotation } from '@/src/schemas/provider-metadata'; -import type { ReasoningDetailUnion } from '@/src/schemas/reasoning-details'; +import type { FileAnnotation } from "../schemas/provider-metadata"; +import type { ReasoningDetailUnion } from "../schemas/reasoning-details"; // Type for OpenRouter Cache Control following Anthropic's pattern -export type OpenRouterCacheControl = { type: 'ephemeral' }; +export type OpenRouterCacheControl = { type: "ephemeral" }; export type OpenRouterChatCompletionsInput = Array; @@ -13,13 +13,13 @@ export type ChatCompletionMessageParam = | ChatCompletionToolMessageParam; export interface ChatCompletionSystemMessageParam { - role: 'system'; + role: "system"; content: string; cache_control?: OpenRouterCacheControl; } export interface ChatCompletionUserMessageParam { - role: 'user'; + role: "user"; content: string | Array; cache_control?: OpenRouterCacheControl; } @@ -31,7 +31,7 @@ export type ChatCompletionContentPart = | ChatCompletionContentPartInputAudio; export interface ChatCompletionContentPartFile { - type: 'file'; + type: "file"; file: { filename?: string; file_data?: string; @@ -41,7 +41,7 @@ export interface ChatCompletionContentPartFile { } export interface ChatCompletionContentPartImage { - type: 'image_url'; + type: "image_url"; image_url: { url: string; }; @@ -49,7 +49,7 @@ export interface ChatCompletionContentPartImage { } export interface ChatCompletionContentPartText { - type: 'text'; + type: "text"; text: string; reasoning?: string | null; cache_control?: OpenRouterCacheControl; @@ -57,21 +57,21 @@ export interface ChatCompletionContentPartText { /** https://openrouter.ai/docs/guides/overview/multimodal/audio */ export const OPENROUTER_AUDIO_FORMATS = [ - 'wav', - 'mp3', - 'aiff', - 'aac', - 'ogg', - 'flac', - 'm4a', - 'pcm16', - 'pcm24', + "wav", + "mp3", + "aiff", + "aac", + "ogg", + "flac", + "m4a", + "pcm16", + "pcm24", ] as const; export type OpenRouterAudioFormat = (typeof OPENROUTER_AUDIO_FORMATS)[number]; export interface ChatCompletionContentPartInputAudio { - type: 'input_audio'; + type: "input_audio"; input_audio: { data: string; format: OpenRouterAudioFormat; @@ -80,7 +80,7 @@ export interface ChatCompletionContentPartInputAudio { } export interface ChatCompletionAssistantMessageParam { - role: 'assistant'; + role: "assistant"; content?: string | null; reasoning?: string | null; reasoning_details?: ReasoningDetailUnion[]; @@ -90,7 +90,7 @@ export interface ChatCompletionAssistantMessageParam { } export interface ChatCompletionMessageToolCall { - type: 'function'; + type: "function"; id: string; function: { arguments: string; @@ -99,7 +99,7 @@ export interface ChatCompletionMessageToolCall { } export interface ChatCompletionToolMessageParam { - role: 'tool'; + role: "tool"; content: string; tool_call_id: string; cache_control?: OpenRouterCacheControl; diff --git a/packages/ai-sdk-provider-2/src/types/openrouter-chat-settings.ts b/packages/ai-sdk-provider-2/src/types/openrouter-chat-settings.ts index e06b653..31f039b 100644 --- a/packages/ai-sdk-provider-2/src/types/openrouter-chat-settings.ts +++ b/packages/ai-sdk-provider-2/src/types/openrouter-chat-settings.ts @@ -1,5 +1,6 @@ -import type * as models from '@openrouter/sdk/models'; -import type { OpenRouterSharedSettings } from '..'; +import type * as models from "@openrouter/sdk/models"; + +import type { OpenRouterSharedSettings } from ".."; // https://openrouter.ai/api/v1/models export type OpenRouterChatModelId = string; diff --git a/packages/ai-sdk-provider-2/src/types/openrouter-completion-settings.ts b/packages/ai-sdk-provider-2/src/types/openrouter-completion-settings.ts index b5f6b99..d08978d 100644 --- a/packages/ai-sdk-provider-2/src/types/openrouter-completion-settings.ts +++ b/packages/ai-sdk-provider-2/src/types/openrouter-completion-settings.ts @@ -1,4 +1,4 @@ -import type { OpenRouterSharedSettings } from '.'; +import type { OpenRouterSharedSettings } from "."; export type OpenRouterCompletionModelId = string; diff --git a/packages/ai-sdk-provider-2/src/types/openrouter-embedding-settings.ts b/packages/ai-sdk-provider-2/src/types/openrouter-embedding-settings.ts index a20a60b..9b71625 100644 --- a/packages/ai-sdk-provider-2/src/types/openrouter-embedding-settings.ts +++ b/packages/ai-sdk-provider-2/src/types/openrouter-embedding-settings.ts @@ -1,4 +1,4 @@ -import type { OpenRouterSharedSettings } from '..'; +import type { OpenRouterSharedSettings } from ".."; // https://openrouter.ai/api/v1/models export type OpenRouterEmbeddingModelId = string; @@ -29,7 +29,7 @@ export type OpenRouterEmbeddingSettings = { /** * Control whether to use providers that may store data */ - data_collection?: 'allow' | 'deny'; + data_collection?: "allow" | "deny"; /** * List of provider slugs to allow for this request */ @@ -41,7 +41,7 @@ export type OpenRouterEmbeddingSettings = { /** * Sort providers by price, throughput, or latency */ - sort?: 'price' | 'throughput' | 'latency'; + sort?: "price" | "throughput" | "latency"; /** * Maximum pricing you want to pay for this request */ diff --git a/packages/ai-sdk-provider-2/src/utils/map-finish-reason.ts b/packages/ai-sdk-provider-2/src/utils/map-finish-reason.ts index ad29cc5..3d18d79 100644 --- a/packages/ai-sdk-provider-2/src/utils/map-finish-reason.ts +++ b/packages/ai-sdk-provider-2/src/utils/map-finish-reason.ts @@ -1,28 +1,20 @@ -import type { LanguageModelV3FinishReason } from '@ai-sdk/provider'; +import type { LanguageModelV3FinishReason } from "@ai-sdk/provider"; -type UnifiedFinishReason = - | 'stop' - | 'length' - | 'content-filter' - | 'tool-calls' - | 'error' - | 'other'; +type UnifiedFinishReason = "stop" | "length" | "content-filter" | "tool-calls" | "error" | "other"; -function mapToUnified( - finishReason: string | null | undefined, -): UnifiedFinishReason { +function mapToUnified(finishReason: string | null | undefined): UnifiedFinishReason { switch (finishReason) { - case 'stop': - return 'stop'; - case 'length': - return 'length'; - case 'content_filter': - return 'content-filter'; - case 'function_call': - case 'tool_calls': - return 'tool-calls'; + case "stop": + return "stop"; + case "length": + return "length"; + case "content_filter": + return "content-filter"; + case "function_call": + case "tool_calls": + return "tool-calls"; default: - return 'other'; + return "other"; } } diff --git a/packages/ai-sdk-provider-2/src/utils/remove-undefined.ts b/packages/ai-sdk-provider-2/src/utils/remove-undefined.ts index 2de511a..4c0b391 100644 --- a/packages/ai-sdk-provider-2/src/utils/remove-undefined.ts +++ b/packages/ai-sdk-provider-2/src/utils/remove-undefined.ts @@ -6,7 +6,8 @@ export function removeUndefinedEntries( record: Record, ): Record { - return Object.fromEntries( - Object.entries(record).filter(([, value]) => value !== null), - ) as Record; + return Object.fromEntries(Object.entries(record).filter(([, value]) => value !== null)) as Record< + string, + T + >; } diff --git a/packages/ai-sdk-provider-2/src/utils/with-user-agent-suffix.ts b/packages/ai-sdk-provider-2/src/utils/with-user-agent-suffix.ts index 06a6eca..0a7df9d 100644 --- a/packages/ai-sdk-provider-2/src/utils/with-user-agent-suffix.ts +++ b/packages/ai-sdk-provider-2/src/utils/with-user-agent-suffix.ts @@ -1,4 +1,4 @@ -import { removeUndefinedEntries } from '@/src/utils/remove-undefined'; +import { removeUndefinedEntries } from "../utils/remove-undefined"; /** * Appends suffix parts to the `user-agent` header. @@ -18,13 +18,11 @@ export function withUserAgentSuffix( (headers as Record) ?? {}, ); - const currentUserAgentHeader = cleanedHeaders['user-agent'] || ''; - const newUserAgent = [currentUserAgentHeader, ...userAgentSuffixParts] - .filter(Boolean) - .join(' '); + const currentUserAgentHeader = cleanedHeaders["user-agent"] || ""; + const newUserAgent = [currentUserAgentHeader, ...userAgentSuffixParts].filter(Boolean).join(" "); return { ...cleanedHeaders, - 'user-agent': newUserAgent, + "user-agent": newUserAgent, }; } diff --git a/packages/ai-sdk-provider-2/src/version.ts b/packages/ai-sdk-provider-2/src/version.ts index 531fbc9..8fff599 100644 --- a/packages/ai-sdk-provider-2/src/version.ts +++ b/packages/ai-sdk-provider-2/src/version.ts @@ -1,4 +1,4 @@ // Version string of this package injected at build time. declare const __PACKAGE_VERSION__: string | undefined; export const VERSION: string = - __PACKAGE_VERSION__ === undefined ? '0.0.0-test' : __PACKAGE_VERSION__; + __PACKAGE_VERSION__ === undefined ? "0.0.0-test" : __PACKAGE_VERSION__; diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index a09bcc8..0a5faa8 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -136,6 +136,12 @@ importers: '@hyperbolic/tsconfig': specifier: workspace:* version: link:../../tooling/typescript + '@openrouter/sdk': + specifier: ^0.1.27 + version: 0.1.27 + '@types/json-schema': + specifier: 7.0.15 + version: 7.0.15 ai: specifier: ^6.0.48 version: 6.0.48(zod@4.3.6) @@ -985,6 +991,9 @@ packages: '@open-draft/until@2.1.0': resolution: {integrity: sha512-U69T3ItWHvLwGg5eJ0n3I62nWuE6ilHlmz7zM0npLBRvPRd7e6NYmg54vvRtP5mZG7kZqZCFVdsTWo7BPtBujg==} + '@openrouter/sdk@0.1.27': + resolution: {integrity: sha512-RH//L10bSmc81q25zAZudiI4kNkLgxF2E+WU42vghp3N6TEvZ6F0jK7uT3tOxkEn91gzmMw9YVmDENy7SJsajQ==} + '@opentelemetry/api@1.9.0': resolution: {integrity: sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==} engines: {node: '>=8.0.0'} @@ -4036,7 +4045,7 @@ snapshots: '@types/node': 20.5.1 chalk: 4.1.2 cosmiconfig: 8.3.6(typescript@5.9.3) - cosmiconfig-typescript-loader: 4.4.0(@types/node@20.5.1)(cosmiconfig@8.3.6(typescript@5.9.3))(ts-node@10.9.2(@types/node@20.5.1)(typescript@5.9.3))(typescript@5.9.3) + cosmiconfig-typescript-loader: 4.4.0(@types/node@20.5.1)(cosmiconfig@8.3.6(typescript@5.9.3))(ts-node@10.9.2(@types/node@22.13.10)(typescript@5.9.3))(typescript@5.9.3) lodash.isplainobject: 4.0.6 lodash.merge: 4.6.2 lodash.uniq: 4.5.0 @@ -4505,6 +4514,10 @@ snapshots: '@open-draft/until@2.1.0': {} + '@openrouter/sdk@0.1.27': + dependencies: + zod: 4.3.6 + '@opentelemetry/api@1.9.0': {} '@pkgjs/parseargs@0.11.0': @@ -5040,7 +5053,7 @@ snapshots: cookie@1.1.1: {} - cosmiconfig-typescript-loader@4.4.0(@types/node@20.5.1)(cosmiconfig@8.3.6(typescript@5.9.3))(ts-node@10.9.2(@types/node@20.5.1)(typescript@5.9.3))(typescript@5.9.3): + cosmiconfig-typescript-loader@4.4.0(@types/node@20.5.1)(cosmiconfig@8.3.6(typescript@5.9.3))(ts-node@10.9.2(@types/node@22.13.10)(typescript@5.9.3))(typescript@5.9.3): dependencies: '@types/node': 20.5.1 cosmiconfig: 8.3.6(typescript@5.9.3) From bbf588c84b3e43f9226de7d994cd00fa7f709617 Mon Sep 17 00:00:00 2001 From: Connor Chevli Date: Thu, 22 Jan 2026 21:11:41 -0800 Subject: [PATCH 04/22] fix lint and build --- packages/ai-sdk-provider-2/src/chat/errors.test.ts | 2 ++ packages/ai-sdk-provider-2/src/chat/file-url-utils.ts | 1 + packages/ai-sdk-provider-2/src/chat/get-tool-choice.ts | 1 + .../ai-sdk-provider-2/src/chat/large-pdf-response.test.ts | 2 ++ .../ai-sdk-provider-2/src/chat/payload-comparison.test.ts | 4 ++++ .../src/{internal copy => internal}/index.ts | 0 packages/ai-sdk-provider-2/src/test-utils/test-server.ts | 2 ++ .../src/tests/stream-usage-accounting.test.ts | 2 ++ packages/ai-sdk-provider-2/src/tests/usage-accounting.test.ts | 4 ++++ 9 files changed, 18 insertions(+) rename packages/ai-sdk-provider-2/src/{internal copy => internal}/index.ts (100%) diff --git a/packages/ai-sdk-provider-2/src/chat/errors.test.ts b/packages/ai-sdk-provider-2/src/chat/errors.test.ts index 45309d4..1b29729 100644 --- a/packages/ai-sdk-provider-2/src/chat/errors.test.ts +++ b/packages/ai-sdk-provider-2/src/chat/errors.test.ts @@ -22,6 +22,7 @@ describe("HTTP 200 Error Response Handling", () => { it("should throw APICallError for HTTP 200 responses with error payloads", async () => { // OpenRouter sometimes returns HTTP 200 with an error object instead of choices // This can occur for various server errors (e.g., internal errors, processing failures) + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion server.urls["https://test.openrouter.ai/api/v1/chat/completions"]!.response = { type: "json-value", body: { @@ -44,6 +45,7 @@ describe("HTTP 200 Error Response Handling", () => { it("should parse successful responses normally when no error present", async () => { // Normal successful response without error + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion server.urls["https://test.openrouter.ai/api/v1/chat/completions"]!.response = { type: "json-value", body: { diff --git a/packages/ai-sdk-provider-2/src/chat/file-url-utils.ts b/packages/ai-sdk-provider-2/src/chat/file-url-utils.ts index 54ef8bd..34477b2 100644 --- a/packages/ai-sdk-provider-2/src/chat/file-url-utils.ts +++ b/packages/ai-sdk-provider-2/src/chat/file-url-utils.ts @@ -40,6 +40,7 @@ export function getMediaType(dataUrl: string, defaultMediaType: string): string export function getBase64FromDataUrl(dataUrl: string): string { const match = dataUrl.match(/^data:[^;]*;base64,(.+)$/); + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion return match ? match[1]! : dataUrl; } diff --git a/packages/ai-sdk-provider-2/src/chat/get-tool-choice.ts b/packages/ai-sdk-provider-2/src/chat/get-tool-choice.ts index edde4d6..69e3815 100644 --- a/packages/ai-sdk-provider-2/src/chat/get-tool-choice.ts +++ b/packages/ai-sdk-provider-2/src/chat/get-tool-choice.ts @@ -2,6 +2,7 @@ import type { LanguageModelV3ToolChoice } from "@ai-sdk/provider"; import { InvalidArgumentError } from "@ai-sdk/provider"; import { z } from "zod/v4"; +// eslint-disable-next-line @typescript-eslint/no-unused-vars const ChatCompletionToolChoiceSchema = z.union([ z.literal("auto"), z.literal("none"), diff --git a/packages/ai-sdk-provider-2/src/chat/large-pdf-response.test.ts b/packages/ai-sdk-provider-2/src/chat/large-pdf-response.test.ts index 0805a1f..bc92a70 100644 --- a/packages/ai-sdk-provider-2/src/chat/large-pdf-response.test.ts +++ b/packages/ai-sdk-provider-2/src/chat/large-pdf-response.test.ts @@ -22,6 +22,7 @@ describe("Large PDF Response Handling", () => { it("should handle HTTP 200 responses with error payloads (500 internal errors)", async () => { // This is the actual response OpenRouter returns for large PDF failures // HTTP 200 status but contains error object instead of choices + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion server.urls["https://test.openrouter.ai/api/v1/chat/completions"]!.response = { type: "json-value", body: { @@ -44,6 +45,7 @@ describe("Large PDF Response Handling", () => { it("should parse successful large PDF responses with file annotations", async () => { // Successful response with file annotations from FileParserPlugin + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion server.urls["https://test.openrouter.ai/api/v1/chat/completions"]!.response = { type: "json-value", body: { diff --git a/packages/ai-sdk-provider-2/src/chat/payload-comparison.test.ts b/packages/ai-sdk-provider-2/src/chat/payload-comparison.test.ts index 62d7de0..09746f9 100644 --- a/packages/ai-sdk-provider-2/src/chat/payload-comparison.test.ts +++ b/packages/ai-sdk-provider-2/src/chat/payload-comparison.test.ts @@ -101,6 +101,7 @@ describe("Payload Comparison - Large PDF", () => { // usage: { include: true } // } + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion const messages = capturedRequestBody!.messages; expect(messages).toHaveLength(1); expect(messages[0]?.role).toBe("user"); @@ -132,9 +133,12 @@ describe("Payload Comparison - Large PDF", () => { }); // Check for plugins array + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion expect(capturedRequestBody!.plugins).toBeDefined(); + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion expect(capturedRequestBody!.plugins).toBeInstanceOf(Array); + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion const { plugins } = capturedRequestBody!; if (!plugins) { throw new Error("Plugins should be defined"); diff --git a/packages/ai-sdk-provider-2/src/internal copy/index.ts b/packages/ai-sdk-provider-2/src/internal/index.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/internal copy/index.ts rename to packages/ai-sdk-provider-2/src/internal/index.ts diff --git a/packages/ai-sdk-provider-2/src/test-utils/test-server.ts b/packages/ai-sdk-provider-2/src/test-utils/test-server.ts index d7642f0..1037d90 100644 --- a/packages/ai-sdk-provider-2/src/test-utils/test-server.ts +++ b/packages/ai-sdk-provider-2/src/test-utils/test-server.ts @@ -51,6 +51,7 @@ export function createTestServer(config: TestServerConfig): { const handlers = Object.keys(config).map((url) => http.post(url, async ({ request }) => { + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion const urlConfig = urls[url]!; // Record the call @@ -135,6 +136,7 @@ export function createTestServer(config: TestServerConfig): { // Clear calls between tests calls.length = 0; for (const url of Object.keys(urls)) { + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion urls[url]!.calls = []; } }); diff --git a/packages/ai-sdk-provider-2/src/tests/stream-usage-accounting.test.ts b/packages/ai-sdk-provider-2/src/tests/stream-usage-accounting.test.ts index 78d19d2..820fcef 100644 --- a/packages/ai-sdk-provider-2/src/tests/stream-usage-accounting.test.ts +++ b/packages/ai-sdk-provider-2/src/tests/stream-usage-accounting.test.ts @@ -36,6 +36,7 @@ describe("OpenRouter Streaming Usage Accounting", () => { chunks.push("data: [DONE]\n\n"); + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion server.urls["https://api.openrouter.ai/chat/completions"]!.response = { type: "stream-chunks", chunks, @@ -70,6 +71,7 @@ describe("OpenRouter Streaming Usage Accounting", () => { }); // Verify stream options + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion const requestBody = (await server.calls[0]!.requestBodyJson) as Record; expect(requestBody).toBeDefined(); expect(requestBody.stream).toBe(true); diff --git a/packages/ai-sdk-provider-2/src/tests/usage-accounting.test.ts b/packages/ai-sdk-provider-2/src/tests/usage-accounting.test.ts index da66fdb..9684721 100644 --- a/packages/ai-sdk-provider-2/src/tests/usage-accounting.test.ts +++ b/packages/ai-sdk-provider-2/src/tests/usage-accounting.test.ts @@ -44,6 +44,7 @@ describe("OpenRouter Usage Accounting", () => { : undefined, }; + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion server.urls["https://api.openrouter.ai/chat/completions"]!.response = { type: "json-value", body: response, @@ -78,6 +79,7 @@ describe("OpenRouter Usage Accounting", () => { }); // Check request contains usage parameter + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion const requestBody = (await server.calls[0]!.requestBodyJson) as Record; expect(requestBody).toBeDefined(); expect(requestBody).toHaveProperty("usage"); @@ -207,6 +209,7 @@ describe("OpenRouter Usage Accounting", () => { }, }; + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion server.urls["https://api.openrouter.ai/chat/completions"]!.response = { type: "json-value", body: response, @@ -277,6 +280,7 @@ describe("OpenRouter Usage Accounting", () => { }, }; + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion server.urls["https://api.openrouter.ai/chat/completions"]!.response = { type: "json-value", body: response, From cb10f1b560edc8f9da1869ef624b59a03ed6fde2 Mon Sep 17 00:00:00 2001 From: Connor Chevli Date: Fri, 23 Jan 2026 01:56:03 -0800 Subject: [PATCH 05/22] tiny updates --- packages/ai-sdk-provider-2/src/provider.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/ai-sdk-provider-2/src/provider.ts b/packages/ai-sdk-provider-2/src/provider.ts index bcab8ec..098d221 100644 --- a/packages/ai-sdk-provider-2/src/provider.ts +++ b/packages/ai-sdk-provider-2/src/provider.ts @@ -122,7 +122,7 @@ Create an OpenRouter provider instance. */ export function createOpenRouter(options: OpenRouterProviderSettings = {}): OpenRouterProvider { const baseURL = - withoutTrailingSlash(options.baseURL ?? options.baseUrl) ?? "https://openrouter.ai/api/v1"; + withoutTrailingSlash(options.baseURL ?? options.baseUrl) ?? "https://api.hyperbolic.xyz/v1"; // we default to compatible, because strict breaks providers like Groq: const compatibility = options.compatibility ?? "compatible"; @@ -132,7 +132,7 @@ export function createOpenRouter(options: OpenRouterProviderSettings = {}): Open { Authorization: `Bearer ${loadApiKey({ apiKey: options.apiKey, - environmentVariableName: "OPENROUTER_API_KEY", + environmentVariableName: "HYPERBOLIC_API_KEY", description: "OpenRouter", })}`, ...options.headers, From fa846798132993b43c9d4548cdfe34ba88684fd2 Mon Sep 17 00:00:00 2001 From: Connor Chevli Date: Fri, 23 Jan 2026 03:21:26 -0800 Subject: [PATCH 06/22] image model support --- .../src/image/hyperbolic-image-settings.ts | 37 +++++++++++++++++++ packages/ai-sdk-provider-2/src/provider.ts | 25 +++++++++++-- .../src/hyperbolic-image-settings.ts | 9 ++--- 3 files changed, 61 insertions(+), 10 deletions(-) create mode 100644 packages/ai-sdk-provider-2/src/image/hyperbolic-image-settings.ts diff --git a/packages/ai-sdk-provider-2/src/image/hyperbolic-image-settings.ts b/packages/ai-sdk-provider-2/src/image/hyperbolic-image-settings.ts new file mode 100644 index 0000000..739fc61 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/image/hyperbolic-image-settings.ts @@ -0,0 +1,37 @@ +// Modified by Hyperbolic Labs, Inc. on 2025-03-25 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + +import type { GenerateImageResult } from "ai"; + +import type { OpenRouterSharedSettings as HyperbolicSharedSettings } from "../types"; + +export type HyperbolicImageModelId = string; + +export type HyperbolicImageSettings = { + /** + * Override the maximum number of images per call (default is dependent on the + * model, or 1 for an unknown model). + */ + maxImagesPerCall?: number; +} & HyperbolicSharedSettings; + +export type HyperbolicImageProviderOptions = { + cfgScale?: number; + negativePrompt?: string; + steps?: number; + strength?: number; + enableRefiner?: boolean; + image?: string; +}; + +export type HyperbolicImageProviderResponseMetadata = { + inferenceTime: number; + randomSeeds: number[]; +}; + +export type HyperbolicGenerateImageResult = Omit & { + responses: (GenerateImageResult["responses"][number] & { + hyperbolic: HyperbolicImageProviderResponseMetadata; + })[]; +}; diff --git a/packages/ai-sdk-provider-2/src/provider.ts b/packages/ai-sdk-provider-2/src/provider.ts index 098d221..01331fd 100644 --- a/packages/ai-sdk-provider-2/src/provider.ts +++ b/packages/ai-sdk-provider-2/src/provider.ts @@ -1,6 +1,10 @@ import type { ProviderV3 } from "@ai-sdk/provider"; import { loadApiKey, withoutTrailingSlash } from "@ai-sdk/provider-utils"; +import type { + HyperbolicImageModelId, + HyperbolicImageSettings, +} from "./image/hyperbolic-image-settings"; import type { OpenRouterChatModelId, OpenRouterChatSettings, @@ -16,6 +20,7 @@ import type { import { OpenRouterChatLanguageModel } from "./chat"; import { OpenRouterCompletionLanguageModel } from "./completion"; import { OpenRouterEmbeddingModel } from "./embedding"; +import { HyperbolicImageModel } from "./image"; import { withUserAgentSuffix } from "./utils/with-user-agent-suffix"; import { VERSION } from "./version"; @@ -69,6 +74,8 @@ Creates an OpenRouter text embedding model. (AI SDK v4 - deprecated, use textEmb modelId: OpenRouterEmbeddingModelId, settings?: OpenRouterEmbeddingSettings, ): OpenRouterEmbeddingModel; + + image(modelId: HyperbolicImageModelId, settings?: HyperbolicImageSettings): HyperbolicImageModel; } export interface OpenRouterProviderSettings { @@ -167,6 +174,19 @@ export function createOpenRouter(options: OpenRouterProviderSettings = {}): Open extraBody: options.extraBody, }); + const createImageModel = ( + modelId: HyperbolicImageModelId, + settings: HyperbolicImageSettings = {}, + ) => + new HyperbolicImageModel(modelId, settings, { + provider: "hyperbolic.image", + url: ({ path }) => `${baseURL}${path}`, + headers: getHeaders, + compatibility, + fetch: options.fetch, + extraBody: options.extraBody, + }); + const createEmbeddingModel = ( modelId: OpenRouterEmbeddingModelId, settings: OpenRouterEmbeddingSettings = {}, @@ -187,10 +207,6 @@ export function createOpenRouter(options: OpenRouterProviderSettings = {}): Open throw new Error("The OpenRouter model function cannot be called with the new keyword."); } - if (modelId === "openai/gpt-3.5-turbo-instruct") { - return createCompletionModel(modelId, settings as OpenRouterCompletionSettings); - } - return createChatModel(modelId, settings as OpenRouterChatSettings); }; @@ -204,6 +220,7 @@ export function createOpenRouter(options: OpenRouterProviderSettings = {}): Open provider.completion = createCompletionModel; provider.textEmbeddingModel = createEmbeddingModel; provider.embedding = createEmbeddingModel; // deprecated alias for v4 compatibility + provider.image = createImageModel; return provider as OpenRouterProvider; } diff --git a/packages/ai-sdk-provider/src/hyperbolic-image-settings.ts b/packages/ai-sdk-provider/src/hyperbolic-image-settings.ts index 9399faf..c263646 100644 --- a/packages/ai-sdk-provider/src/hyperbolic-image-settings.ts +++ b/packages/ai-sdk-provider/src/hyperbolic-image-settings.ts @@ -2,7 +2,7 @@ // Original work Copyright 2025 OpenRouter Inc. // Licensed under the Apache License, Version 2.0 -import type { Experimental_GenerateImageResult } from "ai"; +import type { GenerateImageResult } from "ai"; import type { HyperbolicSharedSettings } from "./types"; @@ -30,11 +30,8 @@ export type HyperbolicImageProviderResponseMetadata = { randomSeeds: number[]; }; -export type Experimental_HyperbolicGenerateImageResult = Omit< - Experimental_GenerateImageResult, - "responses" -> & { - responses: (Experimental_GenerateImageResult["responses"][number] & { +export type HyperbolicGenerateImageResult = Omit & { + responses: (GenerateImageResult["responses"][number] & { hyperbolic: HyperbolicImageProviderResponseMetadata; })[]; }; From bffb9722e5510fe9acac129522de36e66c8b1ae0 Mon Sep 17 00:00:00 2001 From: Connor Chevli Date: Fri, 23 Jan 2026 03:46:54 -0800 Subject: [PATCH 07/22] better exports --- ...convert-to-openrouter-completion-prompt.ts | 5 + packages/ai-sdk-provider-2/src/image/index.ts | 132 ++++++++++++++++++ .../ai-sdk-provider-2/src/internal/index.ts | 1 + .../src/utils/hyperbolic-error.ts | 49 +++++++ 4 files changed, 187 insertions(+) create mode 100644 packages/ai-sdk-provider-2/src/image/index.ts create mode 100644 packages/ai-sdk-provider-2/src/utils/hyperbolic-error.ts diff --git a/packages/ai-sdk-provider-2/src/completion/convert-to-openrouter-completion-prompt.ts b/packages/ai-sdk-provider-2/src/completion/convert-to-openrouter-completion-prompt.ts index 4b122ee..addc37f 100644 --- a/packages/ai-sdk-provider-2/src/completion/convert-to-openrouter-completion-prompt.ts +++ b/packages/ai-sdk-provider-2/src/completion/convert-to-openrouter-completion-prompt.ts @@ -57,6 +57,7 @@ export function convertToOpenRouterCompletionPrompt({ .map((part: LanguageModelV3TextPart | LanguageModelV3FilePart) => { switch (part.type) { case "text": { + console.log("return 2", part.text); return part.text; } @@ -66,6 +67,7 @@ export function convertToOpenRouterCompletionPrompt({ }); } default: { + console.log("return 3"); return ""; } } @@ -89,6 +91,7 @@ export function convertToOpenRouterCompletionPrompt({ ) => { switch (part.type) { case "text": { + console.log("return 4"); return part.text; } case "tool-call": { @@ -114,6 +117,7 @@ export function convertToOpenRouterCompletionPrompt({ } default: { + console.log("return 5"); return ""; } } @@ -139,6 +143,7 @@ export function convertToOpenRouterCompletionPrompt({ // Assistant message prefix: text += `${assistant}:\n`; + console.log("return 6", { text }); return { prompt: text, diff --git a/packages/ai-sdk-provider-2/src/image/index.ts b/packages/ai-sdk-provider-2/src/image/index.ts new file mode 100644 index 0000000..f3314e3 --- /dev/null +++ b/packages/ai-sdk-provider-2/src/image/index.ts @@ -0,0 +1,132 @@ +// Modified by Hyperbolic Labs, Inc. on 2025-03-25 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + +import type { ImageModelV3, SharedV3Warning } from "@ai-sdk/provider"; +import { combineHeaders, createJsonResponseHandler, postJsonToApi } from "@ai-sdk/provider-utils"; +import { z } from "zod"; + +import type { + HyperbolicImageModelId, + HyperbolicImageProviderOptions, + HyperbolicImageProviderResponseMetadata, + HyperbolicImageSettings, +} from "./hyperbolic-image-settings"; +import { hyperbolicFailedResponseHandler } from "../utils/hyperbolic-error"; + +type HyperbolicImageModelConfig = { + provider: string; + compatibility: "strict" | "compatible"; + headers: () => Record; + url: (options: { modelId: string; path: string }) => string; + fetch?: typeof fetch; + extraBody?: Record; +}; + +export class HyperbolicImageModel implements ImageModelV3 { + readonly specificationVersion = "v3"; + readonly provider = "hyperbolic.image"; + + get maxImagesPerCall(): number { + return this.settings.maxImagesPerCall ?? 1; + } + + constructor( + readonly modelId: HyperbolicImageModelId, + private readonly settings: HyperbolicImageSettings, + private readonly config: HyperbolicImageModelConfig, + ) {} + + async doGenerate( + options: Omit[0], "providerOptions"> & { + providerOptions: { + hyperbolic?: HyperbolicImageProviderOptions; + }; + }, + ): Promise< + Omit>, "response"> & { + response: Awaited>["response"] & { + hyperbolic: HyperbolicImageProviderResponseMetadata; + }; + } + > { + const warnings: Array = []; + const [width, height] = options.size ? options.size.split("x").map(Number) : []; + + const args = { + prompt: options.prompt, + height, + width, + cfg_scale: options.providerOptions?.hyperbolic?.cfgScale, + enable_refiner: options.providerOptions?.hyperbolic?.enableRefiner, + model_name: this.modelId, + negative_prompt: options.providerOptions?.hyperbolic?.negativePrompt, + steps: options.providerOptions?.hyperbolic?.steps, + strength: options.providerOptions?.hyperbolic?.strength, + image: options.providerOptions?.hyperbolic?.image, + }; + + if (options.aspectRatio != undefined) { + warnings.push({ + type: "unsupported", + feature: "aspectRatio", + details: "This model does not support `aspectRatio`. Use `size` instead.", + }); + } + if (options.seed != undefined) { + warnings.push({ + type: "unsupported", + feature: "seed", + details: "This model does not support `seed`.", + }); + } + if (options.n != undefined) { + warnings.push({ + type: "unsupported", + feature: "n", + details: "This model does not support `n`.", + }); + } + + const { value: response, responseHeaders } = await postJsonToApi({ + url: this.config.url({ + path: "/image/generation", + modelId: this.modelId, + }), + headers: combineHeaders(this.config.headers(), options.headers), + body: args, + failedResponseHandler: hyperbolicFailedResponseHandler, + successfulResponseHandler: createJsonResponseHandler(hyperbolicImageResponseSchema), + abortSignal: options.abortSignal, + fetch: this.config.fetch, + }); + + return { + images: response.images.map((image) => image.image), + warnings, + response: { + timestamp: new Date(), + modelId: this.modelId, + headers: responseHeaders, + hyperbolic: { + inferenceTime: response.inference_time, + randomSeeds: response.images.map((image) => image.random_seed), + }, + }, + }; + } +} + +// minimal version of the schema, focussed on what is needed for the implementation to avoid breaking changes +const hyperbolicImageResponseSchema = z.object({ + images: z.array( + z.object({ + image: z.string(), + index: z.number(), + random_seed: z.number(), + }), + ), + inference_time: z.number(), +}); + +export * from "./hyperbolic-image-settings"; diff --git a/packages/ai-sdk-provider-2/src/internal/index.ts b/packages/ai-sdk-provider-2/src/internal/index.ts index 82be31b..ffcc9e5 100644 --- a/packages/ai-sdk-provider-2/src/internal/index.ts +++ b/packages/ai-sdk-provider-2/src/internal/index.ts @@ -1,5 +1,6 @@ export * from "../chat"; export * from "../completion"; +export * from "../image"; export * from "../types"; export * from "../types/openrouter-chat-settings"; export * from "../types/openrouter-completion-settings"; diff --git a/packages/ai-sdk-provider-2/src/utils/hyperbolic-error.ts b/packages/ai-sdk-provider-2/src/utils/hyperbolic-error.ts new file mode 100644 index 0000000..60eed7e --- /dev/null +++ b/packages/ai-sdk-provider-2/src/utils/hyperbolic-error.ts @@ -0,0 +1,49 @@ +// Modified by Hyperbolic Labs, Inc. on 2025-03-25 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + +import type { TypeValidationError } from "ai"; +import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils"; +import { JSONParseError } from "ai"; +import { z } from "zod"; + +export const HyperbolicErrorResponseSchema = z.object({ + object: z.literal("error"), + message: z.string(), + type: z.string(), + param: z.any().nullable(), + code: z.coerce.number().nullable(), +}); + +// eslint-disable-next-line @typescript-eslint/no-explicit-any +export const isHyperbolicError = (data: any): data is HyperbolicErrorData => { + return "object" in data && data.object === "error"; +}; + +export type HyperbolicErrorData = z.infer; + +export const hyperbolicFailedResponseHandler = createJsonErrorResponseHandler({ + errorSchema: HyperbolicErrorResponseSchema, + errorToMessage: (data) => data.message, +}); + +/** + * Error messages from the API are sometimes an ugly combo of text and JSON in a single chunk. Extract data from error message if it contains JSON + */ +export const tryParsingHyperbolicError = (error: JSONParseError | TypeValidationError) => { + if (!JSONParseError.isInstance(error)) { + return undefined; + } + + const jsonMatch = error.text.match(/\{.*\}/); // Match between brackets + if (jsonMatch) { + try { + const parsedErrorJson = JSON.parse(jsonMatch[0]); + if (parsedErrorJson.message) { + return HyperbolicErrorResponseSchema.parse(parsedErrorJson); + } + } catch { + return undefined; + } + } +}; From 1b76c6cc248dbaa4d8e3748e908824cde306e09a Mon Sep 17 00:00:00 2001 From: Connor Chevli Date: Fri, 23 Jan 2026 12:13:43 -0800 Subject: [PATCH 08/22] clean up readme --- packages/ai-sdk-provider-2/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/ai-sdk-provider-2/README.md b/packages/ai-sdk-provider-2/README.md index 3e99fbc..45b5825 100644 --- a/packages/ai-sdk-provider-2/README.md +++ b/packages/ai-sdk-provider-2/README.md @@ -33,7 +33,7 @@ import { generateText } from "ai"; import { createHyperbolic } from "@hyperbolic/ai-sdk-provider"; const hyperbolic = createHyperbolic({ - apiKey: process.env.HYPERBOLIC_API_KEY, // Found in settings after logging in at https://app.hyperbolic.xyz + apiKey: process.env.HYPERBOLIC_API_KEY, // Found in settings after logging in at https://app.hyperbolic.ai }); const { text } = await generateText({ @@ -45,7 +45,7 @@ const { text } = await generateText({ ## Supported models This list is not a definitive list of models supported by Hyperbolic, as it constantly changes as we add new models (and deprecate old ones) to our system. -You can find the latest list of models supported by Hyperbolic [here](https://openrouter.ai/models). +You can find the latest list of models supported by Hyperbolic [here](https://app.hyperbolic.ai/models). ## Using Models From fa92fa00abb665517cf9634b7d475db54954d7ae Mon Sep 17 00:00:00 2001 From: Connor Chevli Date: Fri, 23 Jan 2026 12:18:13 -0800 Subject: [PATCH 09/22] remove embedding model support since we don't have any models --- ...nvert-to-hyperbolic-chat-messages.test.ts} | 2 +- ...=> convert-to-hyperbolic-chat-messages.ts} | 2 +- .../src/chat/file-url-utils.ts | 4 +- packages/ai-sdk-provider-2/src/chat/index.ts | 6 +- .../src/chat/payload-comparison.test.ts | 4 +- ...onvert-to-hyperbolic-completion-prompt.ts} | 0 .../ai-sdk-provider-2/src/completion/index.ts | 4 +- .../src/embedding/index.test.ts | 235 ------------------ .../ai-sdk-provider-2/src/embedding/index.ts | 96 ------- .../src/embedding/schemas.ts | 23 -- packages/ai-sdk-provider-2/src/facade.ts | 27 +- .../ai-sdk-provider-2/src/internal/index.ts | 4 +- packages/ai-sdk-provider-2/src/provider.ts | 40 +-- .../src/tests/stream-usage-accounting.test.ts | 2 +- .../src/tests/usage-accounting.test.ts | 2 +- ...s => hyperbolic-chat-completions-input.ts} | 0 ...ettings.ts => hyperbolic-chat-settings.ts} | 0 ...s.ts => hyperbolic-completion-settings.ts} | 0 packages/ai-sdk-provider-2/src/types/index.ts | 2 - .../types/openrouter-embedding-settings.ts | 56 ----- 20 files changed, 19 insertions(+), 490 deletions(-) rename packages/ai-sdk-provider-2/src/chat/{convert-to-openrouter-chat-messages.test.ts => convert-to-hyperbolic-chat-messages.test.ts} (99%) rename packages/ai-sdk-provider-2/src/chat/{convert-to-openrouter-chat-messages.ts => convert-to-hyperbolic-chat-messages.ts} (99%) rename packages/ai-sdk-provider-2/src/completion/{convert-to-openrouter-completion-prompt.ts => convert-to-hyperbolic-completion-prompt.ts} (100%) delete mode 100644 packages/ai-sdk-provider-2/src/embedding/index.test.ts delete mode 100644 packages/ai-sdk-provider-2/src/embedding/index.ts delete mode 100644 packages/ai-sdk-provider-2/src/embedding/schemas.ts rename packages/ai-sdk-provider-2/src/types/{openrouter-chat-completions-input.ts => hyperbolic-chat-completions-input.ts} (100%) rename packages/ai-sdk-provider-2/src/types/{openrouter-chat-settings.ts => hyperbolic-chat-settings.ts} (100%) rename packages/ai-sdk-provider-2/src/types/{openrouter-completion-settings.ts => hyperbolic-completion-settings.ts} (100%) delete mode 100644 packages/ai-sdk-provider-2/src/types/openrouter-embedding-settings.ts diff --git a/packages/ai-sdk-provider-2/src/chat/convert-to-openrouter-chat-messages.test.ts b/packages/ai-sdk-provider-2/src/chat/convert-to-hyperbolic-chat-messages.test.ts similarity index 99% rename from packages/ai-sdk-provider-2/src/chat/convert-to-openrouter-chat-messages.test.ts rename to packages/ai-sdk-provider-2/src/chat/convert-to-hyperbolic-chat-messages.test.ts index 71c08fc..0b6b653 100644 --- a/packages/ai-sdk-provider-2/src/chat/convert-to-openrouter-chat-messages.test.ts +++ b/packages/ai-sdk-provider-2/src/chat/convert-to-hyperbolic-chat-messages.test.ts @@ -1,5 +1,5 @@ import { ReasoningDetailType } from "../schemas/reasoning-details"; -import { convertToOpenRouterChatMessages } from "./convert-to-openrouter-chat-messages"; +import { convertToOpenRouterChatMessages } from "./convert-to-hyperbolic-chat-messages"; import { MIME_TO_FORMAT } from "./file-url-utils"; describe("user messages", () => { diff --git a/packages/ai-sdk-provider-2/src/chat/convert-to-openrouter-chat-messages.ts b/packages/ai-sdk-provider-2/src/chat/convert-to-hyperbolic-chat-messages.ts similarity index 99% rename from packages/ai-sdk-provider-2/src/chat/convert-to-openrouter-chat-messages.ts rename to packages/ai-sdk-provider-2/src/chat/convert-to-hyperbolic-chat-messages.ts index 18efcf4..6ff34fd 100644 --- a/packages/ai-sdk-provider-2/src/chat/convert-to-openrouter-chat-messages.ts +++ b/packages/ai-sdk-provider-2/src/chat/convert-to-hyperbolic-chat-messages.ts @@ -10,7 +10,7 @@ import type { ReasoningDetailUnion } from "../schemas/reasoning-details"; import type { ChatCompletionContentPart, OpenRouterChatCompletionsInput, -} from "../types/openrouter-chat-completions-input"; +} from "../types/hyperbolic-chat-completions-input"; import { OpenRouterProviderOptionsSchema } from "../schemas/provider-metadata"; import { getFileUrl, getInputAudioData } from "./file-url-utils"; import { isUrl } from "./is-url"; diff --git a/packages/ai-sdk-provider-2/src/chat/file-url-utils.ts b/packages/ai-sdk-provider-2/src/chat/file-url-utils.ts index 34477b2..c4c63c8 100644 --- a/packages/ai-sdk-provider-2/src/chat/file-url-utils.ts +++ b/packages/ai-sdk-provider-2/src/chat/file-url-utils.ts @@ -1,8 +1,8 @@ import type { LanguageModelV3FilePart } from "@ai-sdk/provider"; import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils"; -import type { OpenRouterAudioFormat } from "../types/openrouter-chat-completions-input"; -import { OPENROUTER_AUDIO_FORMATS } from "../types/openrouter-chat-completions-input"; +import type { OpenRouterAudioFormat } from "../types/hyperbolic-chat-completions-input"; +import { OPENROUTER_AUDIO_FORMATS } from "../types/hyperbolic-chat-completions-input"; import { isUrl } from "./is-url"; export function getFileUrl({ diff --git a/packages/ai-sdk-provider-2/src/chat/index.ts b/packages/ai-sdk-provider-2/src/chat/index.ts index 94eef03..e024efd 100644 --- a/packages/ai-sdk-provider-2/src/chat/index.ts +++ b/packages/ai-sdk-provider-2/src/chat/index.ts @@ -25,16 +25,16 @@ import { import type { FileAnnotation } from "../schemas/provider-metadata"; import type { ReasoningDetailUnion } from "../schemas/reasoning-details"; -import type { OpenRouterUsageAccounting } from "../types/index"; import type { OpenRouterChatModelId, OpenRouterChatSettings, -} from "../types/openrouter-chat-settings"; +} from "../types/hyperbolic-chat-settings"; +import type { OpenRouterUsageAccounting } from "../types/index"; import { openrouterFailedResponseHandler } from "../schemas/error-response"; import { OpenRouterProviderMetadataSchema } from "../schemas/provider-metadata"; import { ReasoningDetailType } from "../schemas/reasoning-details"; import { createFinishReason, mapOpenRouterFinishReason } from "../utils/map-finish-reason"; -import { convertToOpenRouterChatMessages } from "./convert-to-openrouter-chat-messages"; +import { convertToOpenRouterChatMessages } from "./convert-to-hyperbolic-chat-messages"; import { getBase64FromDataUrl, getMediaType } from "./file-url-utils"; import { getChatCompletionToolChoice } from "./get-tool-choice"; import { diff --git a/packages/ai-sdk-provider-2/src/chat/payload-comparison.test.ts b/packages/ai-sdk-provider-2/src/chat/payload-comparison.test.ts index 09746f9..14098c0 100644 --- a/packages/ai-sdk-provider-2/src/chat/payload-comparison.test.ts +++ b/packages/ai-sdk-provider-2/src/chat/payload-comparison.test.ts @@ -1,8 +1,8 @@ import type { LanguageModelV3Prompt } from "@ai-sdk/provider"; import { describe, expect, it, vi } from "vitest"; -import type { OpenRouterChatCompletionsInput } from "../types/openrouter-chat-completions-input"; -import type { OpenRouterChatSettings } from "../types/openrouter-chat-settings"; +import type { OpenRouterChatCompletionsInput } from "../types/hyperbolic-chat-completions-input"; +import type { OpenRouterChatSettings } from "../types/hyperbolic-chat-settings"; import { createOpenRouter } from "../provider"; describe("Payload Comparison - Large PDF", () => { diff --git a/packages/ai-sdk-provider-2/src/completion/convert-to-openrouter-completion-prompt.ts b/packages/ai-sdk-provider-2/src/completion/convert-to-hyperbolic-completion-prompt.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/completion/convert-to-openrouter-completion-prompt.ts rename to packages/ai-sdk-provider-2/src/completion/convert-to-hyperbolic-completion-prompt.ts diff --git a/packages/ai-sdk-provider-2/src/completion/index.ts b/packages/ai-sdk-provider-2/src/completion/index.ts index 5d61414..945fe79 100644 --- a/packages/ai-sdk-provider-2/src/completion/index.ts +++ b/packages/ai-sdk-provider-2/src/completion/index.ts @@ -24,10 +24,10 @@ import type { OpenRouterUsageAccounting } from "../types"; import type { OpenRouterCompletionModelId, OpenRouterCompletionSettings, -} from "../types/openrouter-completion-settings"; +} from "../types/hyperbolic-completion-settings"; import { openrouterFailedResponseHandler } from "../schemas/error-response"; import { createFinishReason, mapOpenRouterFinishReason } from "../utils/map-finish-reason"; -import { convertToOpenRouterCompletionPrompt } from "./convert-to-openrouter-completion-prompt"; +import { convertToOpenRouterCompletionPrompt } from "./convert-to-hyperbolic-completion-prompt"; import { OpenRouterCompletionChunkSchema } from "./schemas"; type OpenRouterCompletionConfig = { diff --git a/packages/ai-sdk-provider-2/src/embedding/index.test.ts b/packages/ai-sdk-provider-2/src/embedding/index.test.ts deleted file mode 100644 index 4b8f6d7..0000000 --- a/packages/ai-sdk-provider-2/src/embedding/index.test.ts +++ /dev/null @@ -1,235 +0,0 @@ -import { describe, expect, it } from "vitest"; - -import { createOpenRouter } from "../provider"; -import { OpenRouterEmbeddingModel } from "./index"; - -describe("OpenRouterEmbeddingModel", () => { - const mockFetch = async (_url: URL | RequestInfo, _init?: RequestInit): Promise => { - return new Response( - JSON.stringify({ - id: "test-id", - object: "list", - data: [ - { - object: "embedding", - embedding: new Array(1536).fill(0.1), - index: 0, - }, - ], - model: "openai/text-embedding-3-small", - usage: { - prompt_tokens: 5, - total_tokens: 5, - cost: 0.00001, - }, - }), - { - status: 200, - headers: { - "content-type": "application/json", - }, - }, - ); - }; - - describe("provider methods", () => { - it("should expose textEmbeddingModel method", () => { - const provider = createOpenRouter({ apiKey: "test-key" }); - expect(provider.textEmbeddingModel).toBeDefined(); - expect(typeof provider.textEmbeddingModel).toBe("function"); - }); - - it("should expose embedding method (deprecated)", () => { - const provider = createOpenRouter({ apiKey: "test-key" }); - expect(provider.embedding).toBeDefined(); - expect(typeof provider.embedding).toBe("function"); - }); - - it("should create an embedding model instance", () => { - const provider = createOpenRouter({ apiKey: "test-key" }); - const model = provider.textEmbeddingModel("openai/text-embedding-3-small"); - expect(model).toBeInstanceOf(OpenRouterEmbeddingModel); - expect(model.modelId).toBe("openai/text-embedding-3-small"); - expect(model.provider).toBe("openrouter"); - expect(model.specificationVersion).toBe("v3"); - }); - }); - - describe("doEmbed", () => { - it("should embed a single value", async () => { - const provider = createOpenRouter({ - apiKey: "test-key", - fetch: mockFetch, - }); - const model = provider.textEmbeddingModel("openai/text-embedding-3-small"); - - const result = await model.doEmbed({ - values: ["sunny day at the beach"], - }); - - expect(result.embeddings).toHaveLength(1); - expect(result.embeddings[0]).toHaveLength(1536); - expect(result.usage).toEqual({ tokens: 5 }); - expect( - (result.providerMetadata?.openrouter as { usage?: { cost?: number } })?.usage?.cost, - ).toBe(0.00001); - }); - - it("should embed multiple values", async () => { - const mockFetchMultiple = async ( - _url: URL | RequestInfo, - _init?: RequestInit, - ): Promise => { - return new Response( - JSON.stringify({ - object: "list", - data: [ - { - object: "embedding", - embedding: new Array(1536).fill(0.1), - index: 0, - }, - { - object: "embedding", - embedding: new Array(1536).fill(0.2), - index: 1, - }, - { - object: "embedding", - embedding: new Array(1536).fill(0.3), - index: 2, - }, - ], - model: "openai/text-embedding-3-small", - usage: { - prompt_tokens: 15, - total_tokens: 15, - }, - }), - { - status: 200, - headers: { - "content-type": "application/json", - }, - }, - ); - }; - - const provider = createOpenRouter({ - apiKey: "test-key", - fetch: mockFetchMultiple, - }); - const model = provider.textEmbeddingModel("openai/text-embedding-3-small"); - - const result = await model.doEmbed({ - values: ["sunny day at the beach", "rainy day in the city", "snowy mountain peak"], - }); - - expect(result.embeddings).toHaveLength(3); - expect(result.embeddings[0]).toHaveLength(1536); - expect(result.embeddings[1]).toHaveLength(1536); - expect(result.embeddings[2]).toHaveLength(1536); - expect(result.usage).toEqual({ tokens: 15 }); - }); - - it("should pass custom settings to API", async () => { - let capturedRequest: Record | undefined; - - const mockFetchWithCapture = async ( - _url: URL | RequestInfo, - init?: RequestInit, - ): Promise => { - capturedRequest = JSON.parse(init?.body as string); - return new Response( - JSON.stringify({ - object: "list", - data: [ - { - object: "embedding", - embedding: new Array(1536).fill(0.1), - index: 0, - }, - ], - model: "openai/text-embedding-3-small", - usage: { - prompt_tokens: 5, - total_tokens: 5, - }, - }), - { - status: 200, - headers: { - "content-type": "application/json", - }, - }, - ); - }; - - const provider = createOpenRouter({ - apiKey: "test-key", - fetch: mockFetchWithCapture, - }); - - const model = provider.textEmbeddingModel("openai/text-embedding-3-small", { - user: "test-user-123", - provider: { - order: ["openai"], - allow_fallbacks: false, - }, - }); - - await model.doEmbed({ - values: ["test input"], - }); - - expect(capturedRequest?.user).toBe("test-user-123"); - expect(capturedRequest?.provider).toEqual({ - order: ["openai"], - allow_fallbacks: false, - }); - expect(capturedRequest?.model).toBe("openai/text-embedding-3-small"); - expect(capturedRequest?.input).toEqual(["test input"]); - }); - - it("should handle response without usage information", async () => { - const mockFetchNoUsage = async ( - _url: URL | RequestInfo, - _init?: RequestInit, - ): Promise => { - return new Response( - JSON.stringify({ - object: "list", - data: [ - { - object: "embedding", - embedding: new Array(1536).fill(0.1), - index: 0, - }, - ], - model: "openai/text-embedding-3-small", - }), - { - status: 200, - headers: { - "content-type": "application/json", - }, - }, - ); - }; - - const provider = createOpenRouter({ - apiKey: "test-key", - fetch: mockFetchNoUsage, - }); - const model = provider.textEmbeddingModel("openai/text-embedding-3-small"); - - const result = await model.doEmbed({ - values: ["test"], - }); - - expect(result.embeddings).toHaveLength(1); - expect(result.usage).toBeUndefined(); - expect(result.providerMetadata).toBeUndefined(); - }); - }); -}); diff --git a/packages/ai-sdk-provider-2/src/embedding/index.ts b/packages/ai-sdk-provider-2/src/embedding/index.ts deleted file mode 100644 index 87e510b..0000000 --- a/packages/ai-sdk-provider-2/src/embedding/index.ts +++ /dev/null @@ -1,96 +0,0 @@ -import type { EmbeddingModelV3, SharedV3Headers, SharedV3ProviderMetadata } from "@ai-sdk/provider"; -import { combineHeaders, createJsonResponseHandler, postJsonToApi } from "@ai-sdk/provider-utils"; - -import type { - OpenRouterEmbeddingModelId, - OpenRouterEmbeddingSettings, -} from "../types/openrouter-embedding-settings"; -import { openrouterFailedResponseHandler } from "../schemas/error-response"; -import { OpenRouterEmbeddingResponseSchema } from "./schemas"; - -type OpenRouterEmbeddingConfig = { - provider: string; - headers: () => Record; - url: (options: { modelId: string; path: string }) => string; - fetch?: typeof fetch; - extraBody?: Record; -}; - -export class OpenRouterEmbeddingModel implements EmbeddingModelV3 { - readonly specificationVersion = "v3" as const; - readonly provider = "openrouter"; - readonly modelId: OpenRouterEmbeddingModelId; - readonly settings: OpenRouterEmbeddingSettings; - readonly maxEmbeddingsPerCall = undefined; - readonly supportsParallelCalls = true; - - private readonly config: OpenRouterEmbeddingConfig; - - constructor( - modelId: OpenRouterEmbeddingModelId, - settings: OpenRouterEmbeddingSettings, - config: OpenRouterEmbeddingConfig, - ) { - this.modelId = modelId; - this.settings = settings; - this.config = config; - } - - async doEmbed(options: { - values: Array; - abortSignal?: AbortSignal; - headers?: Record; - }): Promise<{ - embeddings: Array>; - usage?: { tokens: number }; - providerMetadata?: SharedV3ProviderMetadata; - response?: { - headers?: SharedV3Headers; - body?: unknown; - }; - warnings: Array; - }> { - const { values, abortSignal, headers } = options; - - const args = { - model: this.modelId, - input: values, - user: this.settings.user, - provider: this.settings.provider, - ...this.config.extraBody, - ...this.settings.extraBody, - }; - - const { value: responseValue, responseHeaders } = await postJsonToApi({ - url: this.config.url({ - path: "/embeddings", - modelId: this.modelId, - }), - headers: combineHeaders(this.config.headers(), headers), - body: args, - failedResponseHandler: openrouterFailedResponseHandler, - successfulResponseHandler: createJsonResponseHandler(OpenRouterEmbeddingResponseSchema), - abortSignal, - fetch: this.config.fetch, - }); - - return { - embeddings: responseValue.data.map((item) => item.embedding), - usage: responseValue.usage ? { tokens: responseValue.usage.prompt_tokens } : undefined, - providerMetadata: responseValue.usage?.cost - ? { - openrouter: { - usage: { - cost: responseValue.usage.cost, - }, - }, - } - : undefined, - response: { - headers: responseHeaders, - body: responseValue, - }, - warnings: [], - }; - } -} diff --git a/packages/ai-sdk-provider-2/src/embedding/schemas.ts b/packages/ai-sdk-provider-2/src/embedding/schemas.ts deleted file mode 100644 index 0336f40..0000000 --- a/packages/ai-sdk-provider-2/src/embedding/schemas.ts +++ /dev/null @@ -1,23 +0,0 @@ -import { z } from "zod/v4"; - -const openrouterEmbeddingUsageSchema = z.object({ - prompt_tokens: z.number(), - total_tokens: z.number(), - cost: z.number().optional(), -}); - -const openrouterEmbeddingDataSchema = z.object({ - object: z.literal("embedding"), - embedding: z.array(z.number()), - index: z.number().optional(), -}); - -export const OpenRouterEmbeddingResponseSchema = z.object({ - id: z.string().optional(), - object: z.literal("list"), - data: z.array(openrouterEmbeddingDataSchema), - model: z.string(), - usage: openrouterEmbeddingUsageSchema.optional(), -}); - -export type OpenRouterEmbeddingResponse = z.infer; diff --git a/packages/ai-sdk-provider-2/src/facade.ts b/packages/ai-sdk-provider-2/src/facade.ts index 1440285..b5b2209 100644 --- a/packages/ai-sdk-provider-2/src/facade.ts +++ b/packages/ai-sdk-provider-2/src/facade.ts @@ -4,18 +4,13 @@ import type { OpenRouterProviderSettings } from "./provider"; import type { OpenRouterChatModelId, OpenRouterChatSettings, -} from "./types/openrouter-chat-settings"; +} from "./types/hyperbolic-chat-settings"; import type { OpenRouterCompletionModelId, OpenRouterCompletionSettings, -} from "./types/openrouter-completion-settings"; -import type { - OpenRouterEmbeddingModelId, - OpenRouterEmbeddingSettings, -} from "./types/openrouter-embedding-settings"; +} from "./types/hyperbolic-completion-settings"; import { OpenRouterChatLanguageModel } from "./chat"; import { OpenRouterCompletionLanguageModel } from "./completion"; -import { OpenRouterEmbeddingModel } from "./embedding"; /** @deprecated Use `createOpenRouter` instead. @@ -89,22 +84,4 @@ Custom headers to include in the requests. url: ({ path }) => `${this.baseURL}${path}`, }); } - - textEmbeddingModel( - modelId: OpenRouterEmbeddingModelId, - settings: OpenRouterEmbeddingSettings = {}, - ) { - return new OpenRouterEmbeddingModel(modelId, settings, { - provider: "openrouter.embedding", - ...this.baseConfig, - url: ({ path }) => `${this.baseURL}${path}`, - }); - } - - /** - * @deprecated Use textEmbeddingModel instead - */ - embedding(modelId: OpenRouterEmbeddingModelId, settings: OpenRouterEmbeddingSettings = {}) { - return this.textEmbeddingModel(modelId, settings); - } } diff --git a/packages/ai-sdk-provider-2/src/internal/index.ts b/packages/ai-sdk-provider-2/src/internal/index.ts index ffcc9e5..fe316cd 100644 --- a/packages/ai-sdk-provider-2/src/internal/index.ts +++ b/packages/ai-sdk-provider-2/src/internal/index.ts @@ -2,5 +2,5 @@ export * from "../chat"; export * from "../completion"; export * from "../image"; export * from "../types"; -export * from "../types/openrouter-chat-settings"; -export * from "../types/openrouter-completion-settings"; +export * from "../types/hyperbolic-chat-settings"; +export * from "../types/hyperbolic-completion-settings"; diff --git a/packages/ai-sdk-provider-2/src/provider.ts b/packages/ai-sdk-provider-2/src/provider.ts index 01331fd..eb9bae2 100644 --- a/packages/ai-sdk-provider-2/src/provider.ts +++ b/packages/ai-sdk-provider-2/src/provider.ts @@ -8,18 +8,13 @@ import type { import type { OpenRouterChatModelId, OpenRouterChatSettings, -} from "./types/openrouter-chat-settings"; +} from "./types/hyperbolic-chat-settings"; import type { OpenRouterCompletionModelId, OpenRouterCompletionSettings, -} from "./types/openrouter-completion-settings"; -import type { - OpenRouterEmbeddingModelId, - OpenRouterEmbeddingSettings, -} from "./types/openrouter-embedding-settings"; +} from "./types/hyperbolic-completion-settings"; import { OpenRouterChatLanguageModel } from "./chat"; import { OpenRouterCompletionLanguageModel } from "./completion"; -import { OpenRouterEmbeddingModel } from "./embedding"; import { HyperbolicImageModel } from "./image"; import { withUserAgentSuffix } from "./utils/with-user-agent-suffix"; import { VERSION } from "./version"; @@ -58,23 +53,6 @@ Creates an OpenRouter completion model for text generation. settings?: OpenRouterCompletionSettings, ): OpenRouterCompletionLanguageModel; - /** -Creates an OpenRouter text embedding model. (AI SDK v5) - */ - textEmbeddingModel( - modelId: OpenRouterEmbeddingModelId, - settings?: OpenRouterEmbeddingSettings, - ): OpenRouterEmbeddingModel; - - /** -Creates an OpenRouter text embedding model. (AI SDK v4 - deprecated, use textEmbeddingModel instead) -@deprecated Use textEmbeddingModel instead - */ - embedding( - modelId: OpenRouterEmbeddingModelId, - settings?: OpenRouterEmbeddingSettings, - ): OpenRouterEmbeddingModel; - image(modelId: HyperbolicImageModelId, settings?: HyperbolicImageSettings): HyperbolicImageModel; } @@ -187,18 +165,6 @@ export function createOpenRouter(options: OpenRouterProviderSettings = {}): Open extraBody: options.extraBody, }); - const createEmbeddingModel = ( - modelId: OpenRouterEmbeddingModelId, - settings: OpenRouterEmbeddingSettings = {}, - ) => - new OpenRouterEmbeddingModel(modelId, settings, { - provider: "openrouter.embedding", - url: ({ path }) => `${baseURL}${path}`, - headers: getHeaders, - fetch: options.fetch, - extraBody: options.extraBody, - }); - const createLanguageModel = ( modelId: OpenRouterChatModelId | OpenRouterCompletionModelId, settings?: OpenRouterChatSettings | OpenRouterCompletionSettings, @@ -218,8 +184,6 @@ export function createOpenRouter(options: OpenRouterProviderSettings = {}): Open provider.languageModel = createLanguageModel; provider.chat = createChatModel; provider.completion = createCompletionModel; - provider.textEmbeddingModel = createEmbeddingModel; - provider.embedding = createEmbeddingModel; // deprecated alias for v4 compatibility provider.image = createImageModel; return provider as OpenRouterProvider; diff --git a/packages/ai-sdk-provider-2/src/tests/stream-usage-accounting.test.ts b/packages/ai-sdk-provider-2/src/tests/stream-usage-accounting.test.ts index 820fcef..a998c52 100644 --- a/packages/ai-sdk-provider-2/src/tests/stream-usage-accounting.test.ts +++ b/packages/ai-sdk-provider-2/src/tests/stream-usage-accounting.test.ts @@ -1,6 +1,6 @@ import { describe, expect, it } from "vitest"; -import type { OpenRouterChatSettings } from "../types/openrouter-chat-settings"; +import type { OpenRouterChatSettings } from "../types/hyperbolic-chat-settings"; import { OpenRouterChatLanguageModel } from "../chat"; import { convertReadableStreamToArray, createTestServer } from "../test-utils/test-server"; diff --git a/packages/ai-sdk-provider-2/src/tests/usage-accounting.test.ts b/packages/ai-sdk-provider-2/src/tests/usage-accounting.test.ts index 9684721..e044e54 100644 --- a/packages/ai-sdk-provider-2/src/tests/usage-accounting.test.ts +++ b/packages/ai-sdk-provider-2/src/tests/usage-accounting.test.ts @@ -1,6 +1,6 @@ import { describe, expect, it } from "vitest"; -import type { OpenRouterChatSettings } from "../types/openrouter-chat-settings"; +import type { OpenRouterChatSettings } from "../types/hyperbolic-chat-settings"; import { OpenRouterChatLanguageModel } from "../chat"; import { createTestServer } from "../test-utils/test-server"; diff --git a/packages/ai-sdk-provider-2/src/types/openrouter-chat-completions-input.ts b/packages/ai-sdk-provider-2/src/types/hyperbolic-chat-completions-input.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/types/openrouter-chat-completions-input.ts rename to packages/ai-sdk-provider-2/src/types/hyperbolic-chat-completions-input.ts diff --git a/packages/ai-sdk-provider-2/src/types/openrouter-chat-settings.ts b/packages/ai-sdk-provider-2/src/types/hyperbolic-chat-settings.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/types/openrouter-chat-settings.ts rename to packages/ai-sdk-provider-2/src/types/hyperbolic-chat-settings.ts diff --git a/packages/ai-sdk-provider-2/src/types/openrouter-completion-settings.ts b/packages/ai-sdk-provider-2/src/types/hyperbolic-completion-settings.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/types/openrouter-completion-settings.ts rename to packages/ai-sdk-provider-2/src/types/hyperbolic-completion-settings.ts diff --git a/packages/ai-sdk-provider-2/src/types/index.ts b/packages/ai-sdk-provider-2/src/types/index.ts index fbf75c0..088fc0d 100644 --- a/packages/ai-sdk-provider-2/src/types/index.ts +++ b/packages/ai-sdk-provider-2/src/types/index.ts @@ -2,8 +2,6 @@ import type { LanguageModelV3, LanguageModelV3Prompt } from "@ai-sdk/provider"; export type { LanguageModelV3, LanguageModelV3Prompt }; -export * from "./openrouter-embedding-settings"; - export type OpenRouterProviderOptions = { models?: string[]; diff --git a/packages/ai-sdk-provider-2/src/types/openrouter-embedding-settings.ts b/packages/ai-sdk-provider-2/src/types/openrouter-embedding-settings.ts deleted file mode 100644 index 9b71625..0000000 --- a/packages/ai-sdk-provider-2/src/types/openrouter-embedding-settings.ts +++ /dev/null @@ -1,56 +0,0 @@ -import type { OpenRouterSharedSettings } from ".."; - -// https://openrouter.ai/api/v1/models -export type OpenRouterEmbeddingModelId = string; - -export type OpenRouterEmbeddingSettings = { - /** - * A unique identifier representing your end-user, which can help OpenRouter to - * monitor and detect abuse. - */ - user?: string; - - /** - * Provider routing preferences to control request routing behavior - */ - provider?: { - /** - * List of provider slugs to try in order (e.g. ["openai", "voyageai"]) - */ - order?: string[]; - /** - * Whether to allow backup providers when primary is unavailable (default: true) - */ - allow_fallbacks?: boolean; - /** - * Only use providers that support all parameters in your request (default: false) - */ - require_parameters?: boolean; - /** - * Control whether to use providers that may store data - */ - data_collection?: "allow" | "deny"; - /** - * List of provider slugs to allow for this request - */ - only?: string[]; - /** - * List of provider slugs to skip for this request - */ - ignore?: string[]; - /** - * Sort providers by price, throughput, or latency - */ - sort?: "price" | "throughput" | "latency"; - /** - * Maximum pricing you want to pay for this request - */ - max_price?: { - prompt?: number | string; - completion?: number | string; - image?: number | string; - audio?: number | string; - request?: number | string; - }; - }; -} & OpenRouterSharedSettings; From aacbe741158d1ce98922f0c4db931fb9ecdfc6a8 Mon Sep 17 00:00:00 2001 From: Connor Chevli Date: Fri, 23 Jan 2026 12:24:24 -0800 Subject: [PATCH 10/22] replace url --- .../ai-sdk-provider-2/src/chat/index.test.ts | 34 +++++++++---------- .../src/completion/index.test.ts | 12 +++---- packages/ai-sdk-provider-2/src/facade.ts | 4 +-- .../src/tests/provider-options.test.ts | 2 +- .../src/types/hyperbolic-chat-settings.ts | 2 +- 5 files changed, 27 insertions(+), 27 deletions(-) diff --git a/packages/ai-sdk-provider-2/src/chat/index.test.ts b/packages/ai-sdk-provider-2/src/chat/index.test.ts index f5517cd..27a63a1 100644 --- a/packages/ai-sdk-provider-2/src/chat/index.test.ts +++ b/packages/ai-sdk-provider-2/src/chat/index.test.ts @@ -152,7 +152,7 @@ function isTextDeltaPart(part: LanguageModelV3StreamPart): part is Extract< describe("doGenerate", () => { const server = createTestServer({ - "https://openrouter.ai/api/v1/chat/completions": { + "https://api.hyperbolic.xyz/v1/chat/completions": { response: { type: "json-value", body: {} }, }, }); @@ -197,7 +197,7 @@ describe("doGenerate", () => { finish_reason?: string; } = {}) { // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { type: "json-value", body: { id: "chatcmpl-95ZTZkhr0mHNKqerQfiwkuox3PHAd", @@ -768,7 +768,7 @@ describe("doGenerate", () => { describe("doStream", () => { const server = createTestServer({ - "https://openrouter.ai/api/v1/chat/completions": { + "https://api.hyperbolic.xyz/v1/chat/completions": { response: { type: "json-value", body: {} }, }, }); @@ -811,7 +811,7 @@ describe("doStream", () => { finish_reason?: string; }) { // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { type: "stream-chunks", chunks: [ `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` + @@ -1012,7 +1012,7 @@ describe("doStream", () => { // This test verifies that when the API returns both 'reasoning' and 'reasoning_details' fields, // we prioritize reasoning_details and ignore the reasoning field to avoid duplicates. // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { type: "stream-chunks", chunks: [ // First chunk: both reasoning and reasoning_details with different content @@ -1136,7 +1136,7 @@ describe("doStream", () => { // This test verifies that reasoning_details are included in providerMetadata // for all reasoning-delta chunks, enabling users to accumulate them for multi-turn conversations // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { type: "stream-chunks", chunks: [ // First chunk: reasoning_details with Text type @@ -1227,7 +1227,7 @@ describe("doStream", () => { // This test reproduces the issue where reasoning appears first but then gets "pushed down" // by content that comes later in the stream // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { type: "stream-chunks", chunks: [ // First chunk: Start with reasoning @@ -1305,7 +1305,7 @@ describe("doStream", () => { it("should stream tool deltas", async () => { // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { type: "stream-chunks", chunks: [ `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + @@ -1522,7 +1522,7 @@ describe("doStream", () => { it("should stream tool call that is sent in one chunk", async () => { // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { type: "stream-chunks", chunks: [ `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + @@ -1637,7 +1637,7 @@ describe("doStream", () => { it("should override finishReason to tool-calls in streaming when tool calls and encrypted reasoning are present", async () => { // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { type: "stream-chunks", chunks: [ // First chunk: reasoning_details with encrypted data @@ -1699,7 +1699,7 @@ describe("doStream", () => { it("should stream images", async () => { // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { type: "stream-chunks", chunks: [ `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + @@ -1770,7 +1770,7 @@ describe("doStream", () => { it("should handle error stream parts", async () => { // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { type: "stream-chunks", chunks: [ `data: {"error":{"message": "The server had an error processing your request. Sorry about that! You can retry your request, or contact us through our ` + @@ -1823,7 +1823,7 @@ describe("doStream", () => { it("should handle unparsable stream parts", async () => { // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { type: "stream-chunks", chunks: ["data: {unparsable}\n\n", "data: [DONE]\n\n"], }; @@ -2081,7 +2081,7 @@ describe("doStream", () => { // This test verifies that file annotations from FileParserPlugin are accumulated // during streaming and included in the finish event's providerMetadata // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { type: "stream-chunks", chunks: [ // First chunk with role and content @@ -2147,7 +2147,7 @@ describe("doStream", () => { it("should accumulate multiple file annotations from stream", async () => { // This test verifies that multiple file annotations are accumulated correctly // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { type: "stream-chunks", chunks: [ // First chunk with content @@ -2205,14 +2205,14 @@ describe("doStream", () => { describe("debug settings", () => { const server = createTestServer({ - "https://openrouter.ai/api/v1/chat/completions": { + "https://api.hyperbolic.xyz/v1/chat/completions": { response: { type: "json-value", body: {} }, }, }); function prepareJsonResponse({ content = "" }: { content?: string } = {}) { // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://openrouter.ai/api/v1/chat/completions"]!.response = { + server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { type: "json-value", body: { id: "chatcmpl-test", diff --git a/packages/ai-sdk-provider-2/src/completion/index.test.ts b/packages/ai-sdk-provider-2/src/completion/index.test.ts index cfe5ea7..0c6e663 100644 --- a/packages/ai-sdk-provider-2/src/completion/index.test.ts +++ b/packages/ai-sdk-provider-2/src/completion/index.test.ts @@ -46,7 +46,7 @@ const model = provider.completion("openai/gpt-3.5-turbo-instruct"); describe("doGenerate", () => { const server = createTestServer({ - "https://openrouter.ai/api/v1/completions": { + "https://api.hyperbolic.xyz/v1/completions": { response: { type: "json-value", body: {} }, }, }); @@ -75,7 +75,7 @@ describe("doGenerate", () => { finish_reason?: string; }) { // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://openrouter.ai/api/v1/completions"]!.response = { + server.urls["https://api.hyperbolic.xyz/v1/completions"]!.response = { type: "json-value", body: { id: "cmpl-96cAM1v77r4jXa4qb2NSmRREV5oWB", @@ -233,7 +233,7 @@ describe("doGenerate", () => { describe("doStream", () => { const server = createTestServer({ - "https://openrouter.ai/api/v1/completions": { + "https://api.hyperbolic.xyz/v1/completions": { response: { type: "stream-chunks", chunks: [] }, }, }); @@ -272,7 +272,7 @@ describe("doStream", () => { finish_reason?: string; }) { // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://openrouter.ai/api/v1/completions"]!.response = { + server.urls["https://api.hyperbolic.xyz/v1/completions"]!.response = { type: "stream-chunks", chunks: [ ...content.map((text) => { @@ -416,7 +416,7 @@ describe("doStream", () => { it("should handle error stream parts", async () => { // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://openrouter.ai/api/v1/completions"]!.response = { + server.urls["https://api.hyperbolic.xyz/v1/completions"]!.response = { type: "stream-chunks", chunks: [ `data: {"error":{"message": "The server had an error processing your request. Sorry about that! You can retry your request, or contact us through our ` + @@ -469,7 +469,7 @@ describe("doStream", () => { it("should handle unparsable stream parts", async () => { // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://openrouter.ai/api/v1/completions"]!.response = { + server.urls["https://api.hyperbolic.xyz/v1/completions"]!.response = { type: "stream-chunks", chunks: ["data: {unparsable}\n\n", "data: [DONE]\n\n"], }; diff --git a/packages/ai-sdk-provider-2/src/facade.ts b/packages/ai-sdk-provider-2/src/facade.ts index b5b2209..8b6c332 100644 --- a/packages/ai-sdk-provider-2/src/facade.ts +++ b/packages/ai-sdk-provider-2/src/facade.ts @@ -18,7 +18,7 @@ import { OpenRouterCompletionLanguageModel } from "./completion"; export class OpenRouter { /** Use a different URL prefix for API calls, e.g. to use proxy servers. -The default prefix is `https://openrouter.ai/api/v1`. +The default prefix is `https://api.hyperbolic.xyz/v1`. */ readonly baseURL: string; @@ -43,7 +43,7 @@ Custom headers to include in the requests. */ constructor(options: OpenRouterProviderSettings = {}) { this.baseURL = - withoutTrailingSlash(options.baseURL ?? options.baseUrl) ?? "https://openrouter.ai/api/v1"; + withoutTrailingSlash(options.baseURL ?? options.baseUrl) ?? "https://api.hyperbolic.xyz/v1"; this.apiKey = options.apiKey; this.headers = options.headers; this.api_keys = options.api_keys; diff --git a/packages/ai-sdk-provider-2/src/tests/provider-options.test.ts b/packages/ai-sdk-provider-2/src/tests/provider-options.test.ts index 54ded29..cbe6464 100644 --- a/packages/ai-sdk-provider-2/src/tests/provider-options.test.ts +++ b/packages/ai-sdk-provider-2/src/tests/provider-options.test.ts @@ -12,7 +12,7 @@ const TEST_MESSAGES: ModelMessage[] = [ describe("providerOptions", () => { const server = createTestServer({ - "https://openrouter.ai/api/v1/chat/completions": { + "https://api.hyperbolic.xyz/v1/chat/completions": { response: { type: "stream-chunks", chunks: [], diff --git a/packages/ai-sdk-provider-2/src/types/hyperbolic-chat-settings.ts b/packages/ai-sdk-provider-2/src/types/hyperbolic-chat-settings.ts index 31f039b..9aff314 100644 --- a/packages/ai-sdk-provider-2/src/types/hyperbolic-chat-settings.ts +++ b/packages/ai-sdk-provider-2/src/types/hyperbolic-chat-settings.ts @@ -2,7 +2,7 @@ import type * as models from "@openrouter/sdk/models"; import type { OpenRouterSharedSettings } from ".."; -// https://openrouter.ai/api/v1/models +// https://api.hyperbolic.xyz/v1/models export type OpenRouterChatModelId = string; export type OpenRouterChatSettings = { From a376760de4bdacb1eb871690eb7b041e9c06301d Mon Sep 17 00:00:00 2001 From: Connor Chevli Date: Fri, 23 Jan 2026 12:25:21 -0800 Subject: [PATCH 11/22] move new version to proper package name --- packages/ai-sdk-provider-2/src/index.ts | 3 --- .../ai-sdk-provider-2/src/internal/index.ts | 6 ------ .../.prettierignore | 0 .../CHANGELOG.md | 0 .../LICENSE | 0 .../README.md | 4 ++-- .../eslint.config.js | 0 .../package.json | 17 ++++------------- .../src/__generated__/models.gen.ts | 0 .../convert-to-hyperbolic-chat-messages.test.ts | 0 .../src/convert-to-hyperbolic-chat-messages.ts | 0 .../convert-to-hyperbolic-completion-prompt.ts | 0 .../src/hyperbolic-chat-language-model.test.ts | 0 .../src/hyperbolic-chat-language-model.ts | 0 .../src/hyperbolic-chat-prompt.ts | 0 .../src/hyperbolic-chat-settings.ts | 0 ...hyperbolic-completion-language-model.test.ts | 0 .../src/hyperbolic-completion-language-model.ts | 0 .../src/hyperbolic-completion-settings.ts | 0 .../src}/hyperbolic-error.ts | 0 .../src/hyperbolic-image-language-model.ts | 0 .../src/hyperbolic-image-settings.ts | 0 .../src/hyperbolic-provider-options.test.ts | 0 .../src/hyperbolic-provider.ts | 0 packages/ai-sdk-provider-old/src/index.ts | 3 +++ .../ai-sdk-provider-old/src/internal/index.ts | 7 +++++++ .../src/map-hyperbolic-chat-logprobs.ts | 0 .../src/map-hyperbolic-completion-logprobs.ts | 0 .../src/map-hyperbolic-finish-reason.ts | 0 .../src/scripts/templates/models.ts.hbs | 0 .../src/scripts/update-models-list.ts | 0 .../src/types.ts | 0 .../tsconfig.json | 0 .../tsup.config.ts | 9 --------- .../turbo.json | 0 .../vitest.config.mts | 0 .../vitest.edge.config.ts | 0 .../vitest.node.config.ts | 0 packages/ai-sdk-provider/README.md | 4 ++-- packages/ai-sdk-provider/package.json | 15 ++++++++++++--- .../convert-to-hyperbolic-chat-messages.test.ts | 0 .../chat/convert-to-hyperbolic-chat-messages.ts | 0 .../src/chat/errors.test.ts | 0 .../src/chat/file-parser-schema.test.ts | 0 .../src/chat/file-url-utils.ts | 0 .../src/chat/get-tool-choice.ts | 0 .../src/chat/index.test.ts | 0 .../src/chat/index.ts | 0 .../src/chat/is-url.ts | 0 .../src/chat/large-pdf-response.test.ts | 0 .../src/chat/payload-comparison.test.ts | 0 .../src/chat/schemas.ts | 0 .../convert-to-hyperbolic-completion-prompt.ts | 0 .../src/completion/index.test.ts | 0 .../src/completion/index.ts | 0 .../src/completion/schemas.ts | 0 .../src/facade.ts | 0 .../src/image/hyperbolic-image-settings.ts | 0 .../src/image/index.ts | 0 packages/ai-sdk-provider/src/index.ts | 4 ++-- packages/ai-sdk-provider/src/internal/index.ts | 11 +++++------ .../src/provider.ts | 0 .../src/schemas/error-response.test.ts | 0 .../src/schemas/error-response.ts | 0 .../src/schemas/format.ts | 0 .../src/schemas/image.ts | 0 .../src/schemas/provider-metadata.ts | 0 .../src/schemas/reasoning-details.ts | 0 .../src/test-utils/test-server.ts | 0 .../src/tests/provider-options.test.ts | 0 .../src/tests/stream-usage-accounting.test.ts | 0 .../src/tests/usage-accounting.test.ts | 0 .../types/hyperbolic-chat-completions-input.ts | 0 .../src/types/hyperbolic-chat-settings.ts | 0 .../src/types/hyperbolic-completion-settings.ts | 0 .../src/types/index.ts | 0 .../src/{ => utils}/hyperbolic-error.ts | 0 .../src/utils/map-finish-reason.ts | 0 .../src/utils/remove-undefined.ts | 0 .../src/utils/type-guards.ts | 0 .../src/utils/with-user-agent-suffix.ts | 0 .../src/version.ts | 0 packages/ai-sdk-provider/tsup.config.ts | 9 +++++++++ 83 files changed, 46 insertions(+), 46 deletions(-) delete mode 100644 packages/ai-sdk-provider-2/src/index.ts delete mode 100644 packages/ai-sdk-provider-2/src/internal/index.ts rename packages/{ai-sdk-provider-2 => ai-sdk-provider-old}/.prettierignore (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider-old}/CHANGELOG.md (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider-old}/LICENSE (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider-old}/README.md (96%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider-old}/eslint.config.js (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider-old}/package.json (86%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider-old}/src/__generated__/models.gen.ts (100%) rename packages/{ai-sdk-provider => ai-sdk-provider-old}/src/convert-to-hyperbolic-chat-messages.test.ts (100%) rename packages/{ai-sdk-provider => ai-sdk-provider-old}/src/convert-to-hyperbolic-chat-messages.ts (100%) rename packages/{ai-sdk-provider => ai-sdk-provider-old}/src/convert-to-hyperbolic-completion-prompt.ts (100%) rename packages/{ai-sdk-provider => ai-sdk-provider-old}/src/hyperbolic-chat-language-model.test.ts (100%) rename packages/{ai-sdk-provider => ai-sdk-provider-old}/src/hyperbolic-chat-language-model.ts (100%) rename packages/{ai-sdk-provider => ai-sdk-provider-old}/src/hyperbolic-chat-prompt.ts (100%) rename packages/{ai-sdk-provider => ai-sdk-provider-old}/src/hyperbolic-chat-settings.ts (100%) rename packages/{ai-sdk-provider => ai-sdk-provider-old}/src/hyperbolic-completion-language-model.test.ts (100%) rename packages/{ai-sdk-provider => ai-sdk-provider-old}/src/hyperbolic-completion-language-model.ts (100%) rename packages/{ai-sdk-provider => ai-sdk-provider-old}/src/hyperbolic-completion-settings.ts (100%) rename packages/{ai-sdk-provider-2/src/utils => ai-sdk-provider-old/src}/hyperbolic-error.ts (100%) rename packages/{ai-sdk-provider => ai-sdk-provider-old}/src/hyperbolic-image-language-model.ts (100%) rename packages/{ai-sdk-provider => ai-sdk-provider-old}/src/hyperbolic-image-settings.ts (100%) rename packages/{ai-sdk-provider => ai-sdk-provider-old}/src/hyperbolic-provider-options.test.ts (100%) rename packages/{ai-sdk-provider => ai-sdk-provider-old}/src/hyperbolic-provider.ts (100%) create mode 100644 packages/ai-sdk-provider-old/src/index.ts create mode 100644 packages/ai-sdk-provider-old/src/internal/index.ts rename packages/{ai-sdk-provider => ai-sdk-provider-old}/src/map-hyperbolic-chat-logprobs.ts (100%) rename packages/{ai-sdk-provider => ai-sdk-provider-old}/src/map-hyperbolic-completion-logprobs.ts (100%) rename packages/{ai-sdk-provider => ai-sdk-provider-old}/src/map-hyperbolic-finish-reason.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider-old}/src/scripts/templates/models.ts.hbs (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider-old}/src/scripts/update-models-list.ts (100%) rename packages/{ai-sdk-provider => ai-sdk-provider-old}/src/types.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider-old}/tsconfig.json (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider-old}/tsup.config.ts (51%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider-old}/turbo.json (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider-old}/vitest.config.mts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider-old}/vitest.edge.config.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider-old}/vitest.node.config.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/chat/convert-to-hyperbolic-chat-messages.test.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/chat/convert-to-hyperbolic-chat-messages.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/chat/errors.test.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/chat/file-parser-schema.test.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/chat/file-url-utils.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/chat/get-tool-choice.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/chat/index.test.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/chat/index.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/chat/is-url.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/chat/large-pdf-response.test.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/chat/payload-comparison.test.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/chat/schemas.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/completion/convert-to-hyperbolic-completion-prompt.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/completion/index.test.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/completion/index.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/completion/schemas.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/facade.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/image/hyperbolic-image-settings.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/image/index.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/provider.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/schemas/error-response.test.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/schemas/error-response.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/schemas/format.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/schemas/image.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/schemas/provider-metadata.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/schemas/reasoning-details.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/test-utils/test-server.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/tests/provider-options.test.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/tests/stream-usage-accounting.test.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/tests/usage-accounting.test.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/types/hyperbolic-chat-completions-input.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/types/hyperbolic-chat-settings.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/types/hyperbolic-completion-settings.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/types/index.ts (100%) rename packages/ai-sdk-provider/src/{ => utils}/hyperbolic-error.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/utils/map-finish-reason.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/utils/remove-undefined.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/utils/type-guards.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/utils/with-user-agent-suffix.ts (100%) rename packages/{ai-sdk-provider-2 => ai-sdk-provider}/src/version.ts (100%) diff --git a/packages/ai-sdk-provider-2/src/index.ts b/packages/ai-sdk-provider-2/src/index.ts deleted file mode 100644 index 9fa3755..0000000 --- a/packages/ai-sdk-provider-2/src/index.ts +++ /dev/null @@ -1,3 +0,0 @@ -export * from "./facade"; -export * from "./provider"; -export * from "./types"; diff --git a/packages/ai-sdk-provider-2/src/internal/index.ts b/packages/ai-sdk-provider-2/src/internal/index.ts deleted file mode 100644 index fe316cd..0000000 --- a/packages/ai-sdk-provider-2/src/internal/index.ts +++ /dev/null @@ -1,6 +0,0 @@ -export * from "../chat"; -export * from "../completion"; -export * from "../image"; -export * from "../types"; -export * from "../types/hyperbolic-chat-settings"; -export * from "../types/hyperbolic-completion-settings"; diff --git a/packages/ai-sdk-provider-2/.prettierignore b/packages/ai-sdk-provider-old/.prettierignore similarity index 100% rename from packages/ai-sdk-provider-2/.prettierignore rename to packages/ai-sdk-provider-old/.prettierignore diff --git a/packages/ai-sdk-provider-2/CHANGELOG.md b/packages/ai-sdk-provider-old/CHANGELOG.md similarity index 100% rename from packages/ai-sdk-provider-2/CHANGELOG.md rename to packages/ai-sdk-provider-old/CHANGELOG.md diff --git a/packages/ai-sdk-provider-2/LICENSE b/packages/ai-sdk-provider-old/LICENSE similarity index 100% rename from packages/ai-sdk-provider-2/LICENSE rename to packages/ai-sdk-provider-old/LICENSE diff --git a/packages/ai-sdk-provider-2/README.md b/packages/ai-sdk-provider-old/README.md similarity index 96% rename from packages/ai-sdk-provider-2/README.md rename to packages/ai-sdk-provider-old/README.md index 45b5825..3e99fbc 100644 --- a/packages/ai-sdk-provider-2/README.md +++ b/packages/ai-sdk-provider-old/README.md @@ -33,7 +33,7 @@ import { generateText } from "ai"; import { createHyperbolic } from "@hyperbolic/ai-sdk-provider"; const hyperbolic = createHyperbolic({ - apiKey: process.env.HYPERBOLIC_API_KEY, // Found in settings after logging in at https://app.hyperbolic.ai + apiKey: process.env.HYPERBOLIC_API_KEY, // Found in settings after logging in at https://app.hyperbolic.xyz }); const { text } = await generateText({ @@ -45,7 +45,7 @@ const { text } = await generateText({ ## Supported models This list is not a definitive list of models supported by Hyperbolic, as it constantly changes as we add new models (and deprecate old ones) to our system. -You can find the latest list of models supported by Hyperbolic [here](https://app.hyperbolic.ai/models). +You can find the latest list of models supported by Hyperbolic [here](https://openrouter.ai/models). ## Using Models diff --git a/packages/ai-sdk-provider-2/eslint.config.js b/packages/ai-sdk-provider-old/eslint.config.js similarity index 100% rename from packages/ai-sdk-provider-2/eslint.config.js rename to packages/ai-sdk-provider-old/eslint.config.js diff --git a/packages/ai-sdk-provider-2/package.json b/packages/ai-sdk-provider-old/package.json similarity index 86% rename from packages/ai-sdk-provider-2/package.json rename to packages/ai-sdk-provider-old/package.json index 06a5d36..fef3ce5 100644 --- a/packages/ai-sdk-provider-2/package.json +++ b/packages/ai-sdk-provider-old/package.json @@ -1,5 +1,5 @@ { - "name": "@hyperbolic/ai-sdk-provider-2", + "name": "@hyperbolic/ai-sdk-provider-old", "private": false, "version": "0.1.3", "type": "module", @@ -54,26 +54,17 @@ "@hyperbolic/eslint-config": "workspace:*", "@hyperbolic/prettier-config": "workspace:*", "@hyperbolic/tsconfig": "workspace:*", - "@openrouter/sdk": "^0.1.27", - "@types/json-schema": "7.0.15", - "ai": "^6.0.48", "eslint": "catalog:", "handlebars": "^4.7.8", - "msw": "2.12.4", "prettier": "catalog:", "tsup": "8.5.0", "type-fest": "^4.37.0", - "typescript": "catalog:", - "vite-tsconfig-paths": "^5.1.4", - "vitest": "3.2.4", - "zod": "^4.0.0" + "typescript": "catalog:" }, "dependencies": { "@ai-sdk/provider": "^3.0.5", - "@ai-sdk/provider-utils": "^4.0.9" - }, - "peerDependencies": { + "@ai-sdk/provider-utils": "^4.0.9", "ai": "^6.0.48", - "zod": "^3.25.0 || ^4.0.0" + "zod": "^4.0.0" } } diff --git a/packages/ai-sdk-provider-2/src/__generated__/models.gen.ts b/packages/ai-sdk-provider-old/src/__generated__/models.gen.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/__generated__/models.gen.ts rename to packages/ai-sdk-provider-old/src/__generated__/models.gen.ts diff --git a/packages/ai-sdk-provider/src/convert-to-hyperbolic-chat-messages.test.ts b/packages/ai-sdk-provider-old/src/convert-to-hyperbolic-chat-messages.test.ts similarity index 100% rename from packages/ai-sdk-provider/src/convert-to-hyperbolic-chat-messages.test.ts rename to packages/ai-sdk-provider-old/src/convert-to-hyperbolic-chat-messages.test.ts diff --git a/packages/ai-sdk-provider/src/convert-to-hyperbolic-chat-messages.ts b/packages/ai-sdk-provider-old/src/convert-to-hyperbolic-chat-messages.ts similarity index 100% rename from packages/ai-sdk-provider/src/convert-to-hyperbolic-chat-messages.ts rename to packages/ai-sdk-provider-old/src/convert-to-hyperbolic-chat-messages.ts diff --git a/packages/ai-sdk-provider/src/convert-to-hyperbolic-completion-prompt.ts b/packages/ai-sdk-provider-old/src/convert-to-hyperbolic-completion-prompt.ts similarity index 100% rename from packages/ai-sdk-provider/src/convert-to-hyperbolic-completion-prompt.ts rename to packages/ai-sdk-provider-old/src/convert-to-hyperbolic-completion-prompt.ts diff --git a/packages/ai-sdk-provider/src/hyperbolic-chat-language-model.test.ts b/packages/ai-sdk-provider-old/src/hyperbolic-chat-language-model.test.ts similarity index 100% rename from packages/ai-sdk-provider/src/hyperbolic-chat-language-model.test.ts rename to packages/ai-sdk-provider-old/src/hyperbolic-chat-language-model.test.ts diff --git a/packages/ai-sdk-provider/src/hyperbolic-chat-language-model.ts b/packages/ai-sdk-provider-old/src/hyperbolic-chat-language-model.ts similarity index 100% rename from packages/ai-sdk-provider/src/hyperbolic-chat-language-model.ts rename to packages/ai-sdk-provider-old/src/hyperbolic-chat-language-model.ts diff --git a/packages/ai-sdk-provider/src/hyperbolic-chat-prompt.ts b/packages/ai-sdk-provider-old/src/hyperbolic-chat-prompt.ts similarity index 100% rename from packages/ai-sdk-provider/src/hyperbolic-chat-prompt.ts rename to packages/ai-sdk-provider-old/src/hyperbolic-chat-prompt.ts diff --git a/packages/ai-sdk-provider/src/hyperbolic-chat-settings.ts b/packages/ai-sdk-provider-old/src/hyperbolic-chat-settings.ts similarity index 100% rename from packages/ai-sdk-provider/src/hyperbolic-chat-settings.ts rename to packages/ai-sdk-provider-old/src/hyperbolic-chat-settings.ts diff --git a/packages/ai-sdk-provider/src/hyperbolic-completion-language-model.test.ts b/packages/ai-sdk-provider-old/src/hyperbolic-completion-language-model.test.ts similarity index 100% rename from packages/ai-sdk-provider/src/hyperbolic-completion-language-model.test.ts rename to packages/ai-sdk-provider-old/src/hyperbolic-completion-language-model.test.ts diff --git a/packages/ai-sdk-provider/src/hyperbolic-completion-language-model.ts b/packages/ai-sdk-provider-old/src/hyperbolic-completion-language-model.ts similarity index 100% rename from packages/ai-sdk-provider/src/hyperbolic-completion-language-model.ts rename to packages/ai-sdk-provider-old/src/hyperbolic-completion-language-model.ts diff --git a/packages/ai-sdk-provider/src/hyperbolic-completion-settings.ts b/packages/ai-sdk-provider-old/src/hyperbolic-completion-settings.ts similarity index 100% rename from packages/ai-sdk-provider/src/hyperbolic-completion-settings.ts rename to packages/ai-sdk-provider-old/src/hyperbolic-completion-settings.ts diff --git a/packages/ai-sdk-provider-2/src/utils/hyperbolic-error.ts b/packages/ai-sdk-provider-old/src/hyperbolic-error.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/utils/hyperbolic-error.ts rename to packages/ai-sdk-provider-old/src/hyperbolic-error.ts diff --git a/packages/ai-sdk-provider/src/hyperbolic-image-language-model.ts b/packages/ai-sdk-provider-old/src/hyperbolic-image-language-model.ts similarity index 100% rename from packages/ai-sdk-provider/src/hyperbolic-image-language-model.ts rename to packages/ai-sdk-provider-old/src/hyperbolic-image-language-model.ts diff --git a/packages/ai-sdk-provider/src/hyperbolic-image-settings.ts b/packages/ai-sdk-provider-old/src/hyperbolic-image-settings.ts similarity index 100% rename from packages/ai-sdk-provider/src/hyperbolic-image-settings.ts rename to packages/ai-sdk-provider-old/src/hyperbolic-image-settings.ts diff --git a/packages/ai-sdk-provider/src/hyperbolic-provider-options.test.ts b/packages/ai-sdk-provider-old/src/hyperbolic-provider-options.test.ts similarity index 100% rename from packages/ai-sdk-provider/src/hyperbolic-provider-options.test.ts rename to packages/ai-sdk-provider-old/src/hyperbolic-provider-options.test.ts diff --git a/packages/ai-sdk-provider/src/hyperbolic-provider.ts b/packages/ai-sdk-provider-old/src/hyperbolic-provider.ts similarity index 100% rename from packages/ai-sdk-provider/src/hyperbolic-provider.ts rename to packages/ai-sdk-provider-old/src/hyperbolic-provider.ts diff --git a/packages/ai-sdk-provider-old/src/index.ts b/packages/ai-sdk-provider-old/src/index.ts new file mode 100644 index 0000000..0180b06 --- /dev/null +++ b/packages/ai-sdk-provider-old/src/index.ts @@ -0,0 +1,3 @@ +export * from "./hyperbolic-provider"; +export * from "./types"; +export * from "./hyperbolic-error"; diff --git a/packages/ai-sdk-provider-old/src/internal/index.ts b/packages/ai-sdk-provider-old/src/internal/index.ts new file mode 100644 index 0000000..c9936d2 --- /dev/null +++ b/packages/ai-sdk-provider-old/src/internal/index.ts @@ -0,0 +1,7 @@ +export * from "../hyperbolic-chat-language-model"; +export * from "../hyperbolic-chat-settings"; +export * from "../hyperbolic-completion-language-model"; +export * from "../hyperbolic-completion-settings"; +export * from "../hyperbolic-image-language-model"; +export * from "../hyperbolic-image-settings"; +export * from "../types"; diff --git a/packages/ai-sdk-provider/src/map-hyperbolic-chat-logprobs.ts b/packages/ai-sdk-provider-old/src/map-hyperbolic-chat-logprobs.ts similarity index 100% rename from packages/ai-sdk-provider/src/map-hyperbolic-chat-logprobs.ts rename to packages/ai-sdk-provider-old/src/map-hyperbolic-chat-logprobs.ts diff --git a/packages/ai-sdk-provider/src/map-hyperbolic-completion-logprobs.ts b/packages/ai-sdk-provider-old/src/map-hyperbolic-completion-logprobs.ts similarity index 100% rename from packages/ai-sdk-provider/src/map-hyperbolic-completion-logprobs.ts rename to packages/ai-sdk-provider-old/src/map-hyperbolic-completion-logprobs.ts diff --git a/packages/ai-sdk-provider/src/map-hyperbolic-finish-reason.ts b/packages/ai-sdk-provider-old/src/map-hyperbolic-finish-reason.ts similarity index 100% rename from packages/ai-sdk-provider/src/map-hyperbolic-finish-reason.ts rename to packages/ai-sdk-provider-old/src/map-hyperbolic-finish-reason.ts diff --git a/packages/ai-sdk-provider-2/src/scripts/templates/models.ts.hbs b/packages/ai-sdk-provider-old/src/scripts/templates/models.ts.hbs similarity index 100% rename from packages/ai-sdk-provider-2/src/scripts/templates/models.ts.hbs rename to packages/ai-sdk-provider-old/src/scripts/templates/models.ts.hbs diff --git a/packages/ai-sdk-provider-2/src/scripts/update-models-list.ts b/packages/ai-sdk-provider-old/src/scripts/update-models-list.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/scripts/update-models-list.ts rename to packages/ai-sdk-provider-old/src/scripts/update-models-list.ts diff --git a/packages/ai-sdk-provider/src/types.ts b/packages/ai-sdk-provider-old/src/types.ts similarity index 100% rename from packages/ai-sdk-provider/src/types.ts rename to packages/ai-sdk-provider-old/src/types.ts diff --git a/packages/ai-sdk-provider-2/tsconfig.json b/packages/ai-sdk-provider-old/tsconfig.json similarity index 100% rename from packages/ai-sdk-provider-2/tsconfig.json rename to packages/ai-sdk-provider-old/tsconfig.json diff --git a/packages/ai-sdk-provider-2/tsup.config.ts b/packages/ai-sdk-provider-old/tsup.config.ts similarity index 51% rename from packages/ai-sdk-provider-2/tsup.config.ts rename to packages/ai-sdk-provider-old/tsup.config.ts index 2d8b30b..cded0e2 100644 --- a/packages/ai-sdk-provider-2/tsup.config.ts +++ b/packages/ai-sdk-provider-old/tsup.config.ts @@ -1,17 +1,11 @@ -import { readFileSync } from "node:fs"; import { defineConfig } from "tsup"; -const package_ = JSON.parse(readFileSync(new URL("./package.json", import.meta.url), "utf8")); - export default defineConfig([ { entry: ["src/index.ts"], format: ["cjs", "esm"], dts: true, sourcemap: true, - define: { - __PACKAGE_VERSION__: JSON.stringify(package_.version), - }, }, { entry: ["src/internal/index.ts"], @@ -19,8 +13,5 @@ export default defineConfig([ format: ["cjs", "esm"], dts: true, sourcemap: true, - define: { - __PACKAGE_VERSION__: JSON.stringify(package_.version), - }, }, ]); diff --git a/packages/ai-sdk-provider-2/turbo.json b/packages/ai-sdk-provider-old/turbo.json similarity index 100% rename from packages/ai-sdk-provider-2/turbo.json rename to packages/ai-sdk-provider-old/turbo.json diff --git a/packages/ai-sdk-provider-2/vitest.config.mts b/packages/ai-sdk-provider-old/vitest.config.mts similarity index 100% rename from packages/ai-sdk-provider-2/vitest.config.mts rename to packages/ai-sdk-provider-old/vitest.config.mts diff --git a/packages/ai-sdk-provider-2/vitest.edge.config.ts b/packages/ai-sdk-provider-old/vitest.edge.config.ts similarity index 100% rename from packages/ai-sdk-provider-2/vitest.edge.config.ts rename to packages/ai-sdk-provider-old/vitest.edge.config.ts diff --git a/packages/ai-sdk-provider-2/vitest.node.config.ts b/packages/ai-sdk-provider-old/vitest.node.config.ts similarity index 100% rename from packages/ai-sdk-provider-2/vitest.node.config.ts rename to packages/ai-sdk-provider-old/vitest.node.config.ts diff --git a/packages/ai-sdk-provider/README.md b/packages/ai-sdk-provider/README.md index 3e99fbc..45b5825 100644 --- a/packages/ai-sdk-provider/README.md +++ b/packages/ai-sdk-provider/README.md @@ -33,7 +33,7 @@ import { generateText } from "ai"; import { createHyperbolic } from "@hyperbolic/ai-sdk-provider"; const hyperbolic = createHyperbolic({ - apiKey: process.env.HYPERBOLIC_API_KEY, // Found in settings after logging in at https://app.hyperbolic.xyz + apiKey: process.env.HYPERBOLIC_API_KEY, // Found in settings after logging in at https://app.hyperbolic.ai }); const { text } = await generateText({ @@ -45,7 +45,7 @@ const { text } = await generateText({ ## Supported models This list is not a definitive list of models supported by Hyperbolic, as it constantly changes as we add new models (and deprecate old ones) to our system. -You can find the latest list of models supported by Hyperbolic [here](https://openrouter.ai/models). +You can find the latest list of models supported by Hyperbolic [here](https://app.hyperbolic.ai/models). ## Using Models diff --git a/packages/ai-sdk-provider/package.json b/packages/ai-sdk-provider/package.json index b543660..213e59b 100644 --- a/packages/ai-sdk-provider/package.json +++ b/packages/ai-sdk-provider/package.json @@ -54,17 +54,26 @@ "@hyperbolic/eslint-config": "workspace:*", "@hyperbolic/prettier-config": "workspace:*", "@hyperbolic/tsconfig": "workspace:*", + "@openrouter/sdk": "^0.1.27", + "@types/json-schema": "7.0.15", + "ai": "^6.0.48", "eslint": "catalog:", "handlebars": "^4.7.8", + "msw": "2.12.4", "prettier": "catalog:", "tsup": "8.5.0", "type-fest": "^4.37.0", - "typescript": "catalog:" + "typescript": "catalog:", + "vite-tsconfig-paths": "^5.1.4", + "vitest": "3.2.4", + "zod": "^4.0.0" }, "dependencies": { "@ai-sdk/provider": "^3.0.5", - "@ai-sdk/provider-utils": "^4.0.9", + "@ai-sdk/provider-utils": "^4.0.9" + }, + "peerDependencies": { "ai": "^6.0.48", - "zod": "^4.0.0" + "zod": "^3.25.0 || ^4.0.0" } } diff --git a/packages/ai-sdk-provider-2/src/chat/convert-to-hyperbolic-chat-messages.test.ts b/packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.test.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/chat/convert-to-hyperbolic-chat-messages.test.ts rename to packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.test.ts diff --git a/packages/ai-sdk-provider-2/src/chat/convert-to-hyperbolic-chat-messages.ts b/packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/chat/convert-to-hyperbolic-chat-messages.ts rename to packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.ts diff --git a/packages/ai-sdk-provider-2/src/chat/errors.test.ts b/packages/ai-sdk-provider/src/chat/errors.test.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/chat/errors.test.ts rename to packages/ai-sdk-provider/src/chat/errors.test.ts diff --git a/packages/ai-sdk-provider-2/src/chat/file-parser-schema.test.ts b/packages/ai-sdk-provider/src/chat/file-parser-schema.test.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/chat/file-parser-schema.test.ts rename to packages/ai-sdk-provider/src/chat/file-parser-schema.test.ts diff --git a/packages/ai-sdk-provider-2/src/chat/file-url-utils.ts b/packages/ai-sdk-provider/src/chat/file-url-utils.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/chat/file-url-utils.ts rename to packages/ai-sdk-provider/src/chat/file-url-utils.ts diff --git a/packages/ai-sdk-provider-2/src/chat/get-tool-choice.ts b/packages/ai-sdk-provider/src/chat/get-tool-choice.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/chat/get-tool-choice.ts rename to packages/ai-sdk-provider/src/chat/get-tool-choice.ts diff --git a/packages/ai-sdk-provider-2/src/chat/index.test.ts b/packages/ai-sdk-provider/src/chat/index.test.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/chat/index.test.ts rename to packages/ai-sdk-provider/src/chat/index.test.ts diff --git a/packages/ai-sdk-provider-2/src/chat/index.ts b/packages/ai-sdk-provider/src/chat/index.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/chat/index.ts rename to packages/ai-sdk-provider/src/chat/index.ts diff --git a/packages/ai-sdk-provider-2/src/chat/is-url.ts b/packages/ai-sdk-provider/src/chat/is-url.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/chat/is-url.ts rename to packages/ai-sdk-provider/src/chat/is-url.ts diff --git a/packages/ai-sdk-provider-2/src/chat/large-pdf-response.test.ts b/packages/ai-sdk-provider/src/chat/large-pdf-response.test.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/chat/large-pdf-response.test.ts rename to packages/ai-sdk-provider/src/chat/large-pdf-response.test.ts diff --git a/packages/ai-sdk-provider-2/src/chat/payload-comparison.test.ts b/packages/ai-sdk-provider/src/chat/payload-comparison.test.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/chat/payload-comparison.test.ts rename to packages/ai-sdk-provider/src/chat/payload-comparison.test.ts diff --git a/packages/ai-sdk-provider-2/src/chat/schemas.ts b/packages/ai-sdk-provider/src/chat/schemas.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/chat/schemas.ts rename to packages/ai-sdk-provider/src/chat/schemas.ts diff --git a/packages/ai-sdk-provider-2/src/completion/convert-to-hyperbolic-completion-prompt.ts b/packages/ai-sdk-provider/src/completion/convert-to-hyperbolic-completion-prompt.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/completion/convert-to-hyperbolic-completion-prompt.ts rename to packages/ai-sdk-provider/src/completion/convert-to-hyperbolic-completion-prompt.ts diff --git a/packages/ai-sdk-provider-2/src/completion/index.test.ts b/packages/ai-sdk-provider/src/completion/index.test.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/completion/index.test.ts rename to packages/ai-sdk-provider/src/completion/index.test.ts diff --git a/packages/ai-sdk-provider-2/src/completion/index.ts b/packages/ai-sdk-provider/src/completion/index.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/completion/index.ts rename to packages/ai-sdk-provider/src/completion/index.ts diff --git a/packages/ai-sdk-provider-2/src/completion/schemas.ts b/packages/ai-sdk-provider/src/completion/schemas.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/completion/schemas.ts rename to packages/ai-sdk-provider/src/completion/schemas.ts diff --git a/packages/ai-sdk-provider-2/src/facade.ts b/packages/ai-sdk-provider/src/facade.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/facade.ts rename to packages/ai-sdk-provider/src/facade.ts diff --git a/packages/ai-sdk-provider-2/src/image/hyperbolic-image-settings.ts b/packages/ai-sdk-provider/src/image/hyperbolic-image-settings.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/image/hyperbolic-image-settings.ts rename to packages/ai-sdk-provider/src/image/hyperbolic-image-settings.ts diff --git a/packages/ai-sdk-provider-2/src/image/index.ts b/packages/ai-sdk-provider/src/image/index.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/image/index.ts rename to packages/ai-sdk-provider/src/image/index.ts diff --git a/packages/ai-sdk-provider/src/index.ts b/packages/ai-sdk-provider/src/index.ts index 0180b06..9fa3755 100644 --- a/packages/ai-sdk-provider/src/index.ts +++ b/packages/ai-sdk-provider/src/index.ts @@ -1,3 +1,3 @@ -export * from "./hyperbolic-provider"; +export * from "./facade"; +export * from "./provider"; export * from "./types"; -export * from "./hyperbolic-error"; diff --git a/packages/ai-sdk-provider/src/internal/index.ts b/packages/ai-sdk-provider/src/internal/index.ts index c9936d2..fe316cd 100644 --- a/packages/ai-sdk-provider/src/internal/index.ts +++ b/packages/ai-sdk-provider/src/internal/index.ts @@ -1,7 +1,6 @@ -export * from "../hyperbolic-chat-language-model"; -export * from "../hyperbolic-chat-settings"; -export * from "../hyperbolic-completion-language-model"; -export * from "../hyperbolic-completion-settings"; -export * from "../hyperbolic-image-language-model"; -export * from "../hyperbolic-image-settings"; +export * from "../chat"; +export * from "../completion"; +export * from "../image"; export * from "../types"; +export * from "../types/hyperbolic-chat-settings"; +export * from "../types/hyperbolic-completion-settings"; diff --git a/packages/ai-sdk-provider-2/src/provider.ts b/packages/ai-sdk-provider/src/provider.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/provider.ts rename to packages/ai-sdk-provider/src/provider.ts diff --git a/packages/ai-sdk-provider-2/src/schemas/error-response.test.ts b/packages/ai-sdk-provider/src/schemas/error-response.test.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/schemas/error-response.test.ts rename to packages/ai-sdk-provider/src/schemas/error-response.test.ts diff --git a/packages/ai-sdk-provider-2/src/schemas/error-response.ts b/packages/ai-sdk-provider/src/schemas/error-response.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/schemas/error-response.ts rename to packages/ai-sdk-provider/src/schemas/error-response.ts diff --git a/packages/ai-sdk-provider-2/src/schemas/format.ts b/packages/ai-sdk-provider/src/schemas/format.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/schemas/format.ts rename to packages/ai-sdk-provider/src/schemas/format.ts diff --git a/packages/ai-sdk-provider-2/src/schemas/image.ts b/packages/ai-sdk-provider/src/schemas/image.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/schemas/image.ts rename to packages/ai-sdk-provider/src/schemas/image.ts diff --git a/packages/ai-sdk-provider-2/src/schemas/provider-metadata.ts b/packages/ai-sdk-provider/src/schemas/provider-metadata.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/schemas/provider-metadata.ts rename to packages/ai-sdk-provider/src/schemas/provider-metadata.ts diff --git a/packages/ai-sdk-provider-2/src/schemas/reasoning-details.ts b/packages/ai-sdk-provider/src/schemas/reasoning-details.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/schemas/reasoning-details.ts rename to packages/ai-sdk-provider/src/schemas/reasoning-details.ts diff --git a/packages/ai-sdk-provider-2/src/test-utils/test-server.ts b/packages/ai-sdk-provider/src/test-utils/test-server.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/test-utils/test-server.ts rename to packages/ai-sdk-provider/src/test-utils/test-server.ts diff --git a/packages/ai-sdk-provider-2/src/tests/provider-options.test.ts b/packages/ai-sdk-provider/src/tests/provider-options.test.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/tests/provider-options.test.ts rename to packages/ai-sdk-provider/src/tests/provider-options.test.ts diff --git a/packages/ai-sdk-provider-2/src/tests/stream-usage-accounting.test.ts b/packages/ai-sdk-provider/src/tests/stream-usage-accounting.test.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/tests/stream-usage-accounting.test.ts rename to packages/ai-sdk-provider/src/tests/stream-usage-accounting.test.ts diff --git a/packages/ai-sdk-provider-2/src/tests/usage-accounting.test.ts b/packages/ai-sdk-provider/src/tests/usage-accounting.test.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/tests/usage-accounting.test.ts rename to packages/ai-sdk-provider/src/tests/usage-accounting.test.ts diff --git a/packages/ai-sdk-provider-2/src/types/hyperbolic-chat-completions-input.ts b/packages/ai-sdk-provider/src/types/hyperbolic-chat-completions-input.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/types/hyperbolic-chat-completions-input.ts rename to packages/ai-sdk-provider/src/types/hyperbolic-chat-completions-input.ts diff --git a/packages/ai-sdk-provider-2/src/types/hyperbolic-chat-settings.ts b/packages/ai-sdk-provider/src/types/hyperbolic-chat-settings.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/types/hyperbolic-chat-settings.ts rename to packages/ai-sdk-provider/src/types/hyperbolic-chat-settings.ts diff --git a/packages/ai-sdk-provider-2/src/types/hyperbolic-completion-settings.ts b/packages/ai-sdk-provider/src/types/hyperbolic-completion-settings.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/types/hyperbolic-completion-settings.ts rename to packages/ai-sdk-provider/src/types/hyperbolic-completion-settings.ts diff --git a/packages/ai-sdk-provider-2/src/types/index.ts b/packages/ai-sdk-provider/src/types/index.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/types/index.ts rename to packages/ai-sdk-provider/src/types/index.ts diff --git a/packages/ai-sdk-provider/src/hyperbolic-error.ts b/packages/ai-sdk-provider/src/utils/hyperbolic-error.ts similarity index 100% rename from packages/ai-sdk-provider/src/hyperbolic-error.ts rename to packages/ai-sdk-provider/src/utils/hyperbolic-error.ts diff --git a/packages/ai-sdk-provider-2/src/utils/map-finish-reason.ts b/packages/ai-sdk-provider/src/utils/map-finish-reason.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/utils/map-finish-reason.ts rename to packages/ai-sdk-provider/src/utils/map-finish-reason.ts diff --git a/packages/ai-sdk-provider-2/src/utils/remove-undefined.ts b/packages/ai-sdk-provider/src/utils/remove-undefined.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/utils/remove-undefined.ts rename to packages/ai-sdk-provider/src/utils/remove-undefined.ts diff --git a/packages/ai-sdk-provider-2/src/utils/type-guards.ts b/packages/ai-sdk-provider/src/utils/type-guards.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/utils/type-guards.ts rename to packages/ai-sdk-provider/src/utils/type-guards.ts diff --git a/packages/ai-sdk-provider-2/src/utils/with-user-agent-suffix.ts b/packages/ai-sdk-provider/src/utils/with-user-agent-suffix.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/utils/with-user-agent-suffix.ts rename to packages/ai-sdk-provider/src/utils/with-user-agent-suffix.ts diff --git a/packages/ai-sdk-provider-2/src/version.ts b/packages/ai-sdk-provider/src/version.ts similarity index 100% rename from packages/ai-sdk-provider-2/src/version.ts rename to packages/ai-sdk-provider/src/version.ts diff --git a/packages/ai-sdk-provider/tsup.config.ts b/packages/ai-sdk-provider/tsup.config.ts index cded0e2..2d8b30b 100644 --- a/packages/ai-sdk-provider/tsup.config.ts +++ b/packages/ai-sdk-provider/tsup.config.ts @@ -1,11 +1,17 @@ +import { readFileSync } from "node:fs"; import { defineConfig } from "tsup"; +const package_ = JSON.parse(readFileSync(new URL("./package.json", import.meta.url), "utf8")); + export default defineConfig([ { entry: ["src/index.ts"], format: ["cjs", "esm"], dts: true, sourcemap: true, + define: { + __PACKAGE_VERSION__: JSON.stringify(package_.version), + }, }, { entry: ["src/internal/index.ts"], @@ -13,5 +19,8 @@ export default defineConfig([ format: ["cjs", "esm"], dts: true, sourcemap: true, + define: { + __PACKAGE_VERSION__: JSON.stringify(package_.version), + }, }, ]); From 3967fa811e34bde75871a3e994378bcb102658de Mon Sep 17 00:00:00 2001 From: Connor Chevli Date: Fri, 23 Jan 2026 13:06:39 -0800 Subject: [PATCH 12/22] rename most things to hyperbolic conventions --- ...onvert-to-hyperbolic-chat-messages.test.ts | 20 ++-- .../convert-to-hyperbolic-chat-messages.ts | 42 +++---- .../ai-sdk-provider/src/chat/errors.test.ts | 18 +-- .../src/chat/file-parser-schema.test.ts | 4 + .../src/chat/file-url-utils.ts | 20 ++-- .../src/chat/get-tool-choice.ts | 4 + .../ai-sdk-provider/src/chat/index.test.ts | 62 +++++----- packages/ai-sdk-provider/src/chat/index.ts | 66 ++++++----- packages/ai-sdk-provider/src/chat/is-url.ts | 4 + .../src/chat/large-pdf-response.test.ts | 16 ++- .../src/chat/payload-comparison.test.ts | 18 +-- packages/ai-sdk-provider/src/chat/schemas.ts | 4 + ...convert-to-hyperbolic-completion-prompt.ts | 4 + .../src/completion/index.test.ts | 26 +++-- .../ai-sdk-provider/src/completion/index.ts | 54 ++++----- .../ai-sdk-provider/src/completion/schemas.ts | 6 +- packages/ai-sdk-provider/src/facade.ts | 40 ++++--- .../src/image/hyperbolic-image-settings.ts | 4 - packages/ai-sdk-provider/src/image/index.ts | 4 - packages/ai-sdk-provider/src/provider.ts | 106 +++++++++--------- .../src/schemas/error-response.test.ts | 4 + .../src/schemas/error-response.ts | 4 + .../ai-sdk-provider/src/schemas/format.ts | 4 + packages/ai-sdk-provider/src/schemas/image.ts | 8 +- .../src/schemas/provider-metadata.ts | 14 ++- .../src/schemas/reasoning-details.ts | 4 + .../src/scripts/update-models-list.ts | 4 + .../src/test-utils/test-server.ts | 4 + .../src/tests/provider-options.test.ts | 8 +- .../src/tests/stream-usage-accounting.test.ts | 42 +++---- .../src/tests/usage-accounting.test.ts | 72 ++++++------ .../hyperbolic-chat-completions-input.ts | 26 +++-- .../src/types/hyperbolic-chat-settings.ts | 10 +- .../types/hyperbolic-completion-settings.ts | 8 +- packages/ai-sdk-provider/src/types/index.ts | 8 +- .../src/utils/map-finish-reason.ts | 4 + .../src/utils/remove-undefined.ts | 4 + .../ai-sdk-provider/src/utils/type-guards.ts | 4 + .../src/utils/with-user-agent-suffix.ts | 4 + pnpm-lock.yaml | 66 +++++------ 40 files changed, 482 insertions(+), 342 deletions(-) diff --git a/packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.test.ts b/packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.test.ts index 0b6b653..a8c7b7e 100644 --- a/packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.test.ts +++ b/packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.test.ts @@ -1,3 +1,7 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import { ReasoningDetailType } from "../schemas/reasoning-details"; import { convertToOpenRouterChatMessages } from "./convert-to-hyperbolic-chat-messages"; import { MIME_TO_FORMAT } from "./file-url-utils"; @@ -425,7 +429,7 @@ describe("cache control", () => { data: "ZmlsZSBjb250ZW50", mediaType: "text/plain", providerOptions: { - openrouter: { + hyperbolic: { filename: "file.txt", }, }, @@ -486,7 +490,7 @@ describe("cache control", () => { data: "ZmlsZSBjb250ZW50", mediaType: "text/plain", providerOptions: { - openrouter: { + hyperbolic: { filename: "file.txt", }, }, @@ -739,7 +743,7 @@ describe("reasoning_details accumulation", () => { type: "reasoning", text: "First reasoning chunk", providerOptions: { - openrouter: { + hyperbolic: { reasoning_details: [ { type: ReasoningDetailType.Text, @@ -753,7 +757,7 @@ describe("reasoning_details accumulation", () => { type: "reasoning", text: "Second reasoning chunk", providerOptions: { - openrouter: { + hyperbolic: { reasoning_details: [ { type: ReasoningDetailType.Text, @@ -769,7 +773,7 @@ describe("reasoning_details accumulation", () => { }, ], providerOptions: { - openrouter: { + hyperbolic: { reasoning_details: [ { type: ReasoningDetailType.Text, @@ -820,7 +824,7 @@ describe("reasoning_details accumulation", () => { }, ], providerOptions: { - openrouter: { + hyperbolic: { reasoning_details: [ { type: ReasoningDetailType.Text, @@ -894,7 +898,7 @@ describe("reasoning_details accumulation", () => { type: "reasoning", text: "First chunk", providerOptions: { - openrouter: { + hyperbolic: { reasoning_details: [ { type: ReasoningDetailType.Text, @@ -915,7 +919,7 @@ describe("reasoning_details accumulation", () => { }, ], providerOptions: { - openrouter: { + hyperbolic: { reasoning_details: [ { type: ReasoningDetailType.Text, diff --git a/packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.ts b/packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.ts index 6ff34fd..6fbdbe8 100644 --- a/packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.ts +++ b/packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.ts @@ -1,3 +1,7 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import type { LanguageModelV3FilePart, LanguageModelV3Prompt, @@ -9,32 +13,32 @@ import type { import type { ReasoningDetailUnion } from "../schemas/reasoning-details"; import type { ChatCompletionContentPart, - OpenRouterChatCompletionsInput, + HyperbolicChatCompletionsInput, } from "../types/hyperbolic-chat-completions-input"; -import { OpenRouterProviderOptionsSchema } from "../schemas/provider-metadata"; +import { HyperbolicProviderOptionsSchema } from "../schemas/provider-metadata"; import { getFileUrl, getInputAudioData } from "./file-url-utils"; import { isUrl } from "./is-url"; -// Type for OpenRouter Cache Control following Anthropic's pattern -export type OpenRouterCacheControl = { type: "ephemeral" }; +// Type for Hyperbolic Cache Control following Anthropic's pattern +export type HyperbolicCacheControl = { type: "ephemeral" }; function getCacheControl( providerMetadata: SharedV3ProviderMetadata | undefined, -): OpenRouterCacheControl | undefined { +): HyperbolicCacheControl | undefined { const anthropic = providerMetadata?.anthropic; - const openrouter = providerMetadata?.openrouter; + const hyperbolic = providerMetadata?.hyperbolic; // Allow both cacheControl and cache_control: - return (openrouter?.cacheControl ?? - openrouter?.cache_control ?? + return (hyperbolic?.cacheControl ?? + hyperbolic?.cache_control ?? anthropic?.cacheControl ?? - anthropic?.cache_control) as OpenRouterCacheControl | undefined; + anthropic?.cache_control) as HyperbolicCacheControl | undefined; } export function convertToOpenRouterChatMessages( prompt: LanguageModelV3Prompt, -): OpenRouterChatCompletionsInput { - const messages: OpenRouterChatCompletionsInput = []; +): HyperbolicChatCompletionsInput { + const messages: HyperbolicChatCompletionsInput = []; for (const { role, content, providerOptions } of prompt) { switch (role) { case "system": { @@ -106,7 +110,7 @@ export function convertToOpenRouterChatMessages( } const fileName = String( - part.providerOptions?.openrouter?.filename ?? part.filename ?? "", + part.providerOptions?.hyperbolic?.filename ?? part.filename ?? "", ); const fileData = getFileUrl({ @@ -177,7 +181,7 @@ export function convertToOpenRouterChatMessages( } case "tool-call": { const partReasoningDetails = (part.providerOptions as Record) - ?.openrouter as Record | undefined; + ?.hyperbolic as Record | undefined; if ( partReasoningDetails?.reasoning_details && Array.isArray(partReasoningDetails.reasoning_details) @@ -198,15 +202,15 @@ export function convertToOpenRouterChatMessages( } case "reasoning": { reasoning += part.text; - const parsedPartProviderOptions = OpenRouterProviderOptionsSchema.safeParse( + const parsedPartProviderOptions = HyperbolicProviderOptionsSchema.safeParse( part.providerOptions, ); if ( parsedPartProviderOptions.success && - parsedPartProviderOptions.data?.openrouter?.reasoning_details + parsedPartProviderOptions.data?.hyperbolic?.reasoning_details ) { accumulatedReasoningDetails.push( - ...parsedPartProviderOptions.data.openrouter.reasoning_details, + ...parsedPartProviderOptions.data.hyperbolic.reasoning_details, ); } break; @@ -221,12 +225,12 @@ export function convertToOpenRouterChatMessages( } // Check message-level providerOptions for preserved reasoning_details and annotations - const parsedProviderOptions = OpenRouterProviderOptionsSchema.safeParse(providerOptions); + const parsedProviderOptions = HyperbolicProviderOptionsSchema.safeParse(providerOptions); const messageReasoningDetails = parsedProviderOptions.success - ? parsedProviderOptions.data?.openrouter?.reasoning_details + ? parsedProviderOptions.data?.hyperbolic?.reasoning_details : undefined; const messageAnnotations = parsedProviderOptions.success - ? parsedProviderOptions.data?.openrouter?.annotations + ? parsedProviderOptions.data?.hyperbolic?.annotations : undefined; // Use message-level reasoning_details if available, otherwise use accumulated from parts diff --git a/packages/ai-sdk-provider/src/chat/errors.test.ts b/packages/ai-sdk-provider/src/chat/errors.test.ts index 1b29729..4bc5f11 100644 --- a/packages/ai-sdk-provider/src/chat/errors.test.ts +++ b/packages/ai-sdk-provider/src/chat/errors.test.ts @@ -1,29 +1,33 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import type { LanguageModelV3Prompt } from "@ai-sdk/provider"; import { describe, expect, it } from "vitest"; -import { createOpenRouter } from "../provider"; +import { createHyperbolic } from "../provider"; import { createTestServer } from "../test-utils/test-server"; const TEST_PROMPT: LanguageModelV3Prompt = [ { role: "user", content: [{ type: "text", text: "Hello" }] }, ]; -const provider = createOpenRouter({ - baseURL: "https://test.openrouter.ai/api/v1", +const provider = createHyperbolic({ + baseURL: "https://api.hyperbolic.xyz/v1", apiKey: "test-api-key", }); const server = createTestServer({ - "https://test.openrouter.ai/api/v1/chat/completions": {}, + "https://api.hyperbolic.xyz/v1/chat/completions": {}, }); describe("HTTP 200 Error Response Handling", () => { describe("doGenerate", () => { it("should throw APICallError for HTTP 200 responses with error payloads", async () => { - // OpenRouter sometimes returns HTTP 200 with an error object instead of choices + // Hyperbolic sometimes returns HTTP 200 with an error object instead of choices // This can occur for various server errors (e.g., internal errors, processing failures) // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://test.openrouter.ai/api/v1/chat/completions"]!.response = { + server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { type: "json-value", body: { error: { @@ -46,7 +50,7 @@ describe("HTTP 200 Error Response Handling", () => { it("should parse successful responses normally when no error present", async () => { // Normal successful response without error // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://test.openrouter.ai/api/v1/chat/completions"]!.response = { + server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { type: "json-value", body: { id: "gen-123", diff --git a/packages/ai-sdk-provider/src/chat/file-parser-schema.test.ts b/packages/ai-sdk-provider/src/chat/file-parser-schema.test.ts index 3e83ddd..19da1ec 100644 --- a/packages/ai-sdk-provider/src/chat/file-parser-schema.test.ts +++ b/packages/ai-sdk-provider/src/chat/file-parser-schema.test.ts @@ -1,3 +1,7 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import { describe, expect, it } from "vitest"; import { OpenRouterNonStreamChatCompletionResponseSchema } from "./schemas"; diff --git a/packages/ai-sdk-provider/src/chat/file-url-utils.ts b/packages/ai-sdk-provider/src/chat/file-url-utils.ts index c4c63c8..101a32c 100644 --- a/packages/ai-sdk-provider/src/chat/file-url-utils.ts +++ b/packages/ai-sdk-provider/src/chat/file-url-utils.ts @@ -1,3 +1,7 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import type { LanguageModelV3FilePart } from "@ai-sdk/provider"; import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils"; @@ -75,18 +79,18 @@ export const MIME_TO_FORMAT: Record = { }; /** - * Converts an audio file part to OpenRouter's input_audio data format. + * Converts an audio file part to Hyperbolic's input_audio data format. * * This function extracts base64-encoded audio data from a file part and - * normalizes the format to one of the supported OpenRouter audio formats. + * normalizes the format to one of the supported Hyperbolic audio formats. * * @param part - The file part containing audio data. Must have a mediaType * starting with "audio/" and contain either base64 data or a data URL. * * @returns An object with `data` (base64-encoded audio) and `format` - * suitable for use in OpenRouter's `input_audio` field. + * suitable for use in Hyperbolic's `input_audio` field. * - * @throws {Error} When audio is provided as an HTTP/HTTPS URL. OpenRouter requires + * @throws {Error} When audio is provided as an HTTP/HTTPS URL. Hyperbolic requires * audio to be base64-encoded inline. The error message includes instructions for * downloading and encoding the audio locally. * @@ -107,7 +111,7 @@ export function getInputAudioData(part: LanguageModelV3FilePart): { defaultMediaType: "audio/mpeg", }); - // OpenRouter's input_audio doesn't support URLs directly + // Hyperbolic's input_audio doesn't support URLs directly if ( isUrl({ url: fileData, @@ -116,7 +120,7 @@ export function getInputAudioData(part: LanguageModelV3FilePart): { ) { throw new Error( `Audio files cannot be provided as URLs.\n\n` + - `OpenRouter requires audio to be base64-encoded. Please:\n` + + `Hyperbolic requires audio to be base64-encoded. Please:\n` + `1. Download the audio file locally\n` + `2. Read it as a Buffer or Uint8Array\n` + `3. Pass it as the data parameter\n\n` + @@ -132,14 +136,14 @@ export function getInputAudioData(part: LanguageModelV3FilePart): { const mediaType = part.mediaType || "audio/mpeg"; const rawFormat = mediaType.replace("audio/", ""); - // Normalize format names for OpenRouter using MIME type mapping + // Normalize format names for Hyperbolic using MIME type mapping const format = MIME_TO_FORMAT[rawFormat]; if (format === undefined) { const supportedList = OPENROUTER_AUDIO_FORMATS.join(", "); throw new Error( `Unsupported audio format: "${mediaType}"\n\n` + - `OpenRouter supports the following audio formats: ${supportedList}\n\n` + + `Hyperbolic supports the following audio formats: ${supportedList}\n\n` + `Learn more: https://openrouter.ai/docs/features/multimodal/audio`, ); } diff --git a/packages/ai-sdk-provider/src/chat/get-tool-choice.ts b/packages/ai-sdk-provider/src/chat/get-tool-choice.ts index 69e3815..4261f75 100644 --- a/packages/ai-sdk-provider/src/chat/get-tool-choice.ts +++ b/packages/ai-sdk-provider/src/chat/get-tool-choice.ts @@ -1,3 +1,7 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import type { LanguageModelV3ToolChoice } from "@ai-sdk/provider"; import { InvalidArgumentError } from "@ai-sdk/provider"; import { z } from "zod/v4"; diff --git a/packages/ai-sdk-provider/src/chat/index.test.ts b/packages/ai-sdk-provider/src/chat/index.test.ts index 27a63a1..63ed42b 100644 --- a/packages/ai-sdk-provider/src/chat/index.test.ts +++ b/packages/ai-sdk-provider/src/chat/index.test.ts @@ -1,10 +1,14 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import type { LanguageModelV3Prompt, LanguageModelV3StreamPart } from "@ai-sdk/provider"; import type { JSONSchema7 } from "json-schema"; import { vi } from "vitest"; import type { ImageResponse } from "../schemas/image"; import type { ReasoningDetailUnion } from "../schemas/reasoning-details"; -import { createOpenRouter } from "../provider"; +import { createHyperbolic } from "../provider"; import { ReasoningDetailType } from "../schemas/reasoning-details"; import { convertReadableStreamToArray, createTestServer } from "../test-utils/test-server"; @@ -116,7 +120,7 @@ const TEST_IMAGE_URL = `data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABAAAAAQACA // eslint-disable-next-line @typescript-eslint/no-non-null-assertion const TEST_IMAGE_BASE64 = TEST_IMAGE_URL.split(",")[1]!; -const provider = createOpenRouter({ +const provider = createHyperbolic({ apiKey: "test-api-key", compatibility: "strict", }); @@ -352,7 +356,7 @@ describe("doGenerate", () => { type: "reasoning", text: "Let me analyze this request...", providerMetadata: { - openrouter: { + hyperbolic: { reasoning_details: [ { type: "reasoning.text", @@ -366,7 +370,7 @@ describe("doGenerate", () => { type: "reasoning", text: "The user wants a greeting response.", providerMetadata: { - openrouter: { + hyperbolic: { reasoning_details: [ { type: "reasoning.summary", @@ -403,7 +407,7 @@ describe("doGenerate", () => { type: "reasoning", text: "[REDACTED]", providerMetadata: { - openrouter: { + hyperbolic: { reasoning_details: [ { type: "reasoning.encrypted", @@ -445,7 +449,7 @@ describe("doGenerate", () => { type: "reasoning", text: "Processing from reasoning_details...", providerMetadata: { - openrouter: { + hyperbolic: { reasoning_details: [ { type: "reasoning.text", @@ -459,7 +463,7 @@ describe("doGenerate", () => { type: "reasoning", text: "Summary from reasoning_details", providerMetadata: { - openrouter: { + hyperbolic: { reasoning_details: [ { type: "reasoning.summary", @@ -639,7 +643,7 @@ describe("doGenerate", () => { it("should pass headers", async () => { prepareJsonResponse({ content: "" }); - const provider = createOpenRouter({ + const provider = createHyperbolic({ apiKey: "test-api-key", headers: { "Custom-Provider-Header": "provider-header-value", @@ -910,7 +914,7 @@ describe("doStream", () => { finishReason: { unified: "stop", raw: "stop" }, providerMetadata: { - openrouter: { + hyperbolic: { usage: { completionTokens: 227, promptTokens: 17, @@ -959,7 +963,7 @@ describe("doStream", () => { chunk.type === "finish", ); const openrouterUsage = ( - finishChunk?.providerMetadata?.openrouter as { + finishChunk?.providerMetadata?.hyperbolic as { usage?: { cost?: number; costDetails?: { upstreamInferenceCost: number }; @@ -995,7 +999,7 @@ describe("doStream", () => { chunk.type === "finish", ); const openrouterUsage = ( - finishChunk?.providerMetadata?.openrouter as { + finishChunk?.providerMetadata?.hyperbolic as { usage?: { cost?: number; costDetails?: { upstreamInferenceCost: number }; @@ -1087,7 +1091,7 @@ describe("doStream", () => { // First delta should have reasoning_details from first chunk expect(reasoningDeltaElements[0]?.providerMetadata).toEqual({ - openrouter: { + hyperbolic: { reasoning_details: [ { type: ReasoningDetailType.Text, @@ -1099,7 +1103,7 @@ describe("doStream", () => { // Second and third deltas should have reasoning_details from second chunk expect(reasoningDeltaElements[1]?.providerMetadata).toEqual({ - openrouter: { + hyperbolic: { reasoning_details: [ { type: ReasoningDetailType.Summary, @@ -1114,7 +1118,7 @@ describe("doStream", () => { }); expect(reasoningDeltaElements[2]?.providerMetadata).toEqual({ - openrouter: { + hyperbolic: { reasoning_details: [ { type: ReasoningDetailType.Summary, @@ -1176,7 +1180,7 @@ describe("doStream", () => { // Verify each delta has the correct reasoning_details in providerMetadata expect(reasoningDeltaElements[0]?.providerMetadata).toEqual({ - openrouter: { + hyperbolic: { reasoning_details: [ { type: ReasoningDetailType.Text, @@ -1187,7 +1191,7 @@ describe("doStream", () => { }); expect(reasoningDeltaElements[1]?.providerMetadata).toEqual({ - openrouter: { + hyperbolic: { reasoning_details: [ { type: ReasoningDetailType.Summary, @@ -1198,7 +1202,7 @@ describe("doStream", () => { }); expect(reasoningDeltaElements[2]?.providerMetadata).toEqual({ - openrouter: { + hyperbolic: { reasoning_details: [ { type: ReasoningDetailType.Encrypted, @@ -1212,7 +1216,7 @@ describe("doStream", () => { const reasoningStart = elements.find(isReasoningStartPart); expect(reasoningStart?.providerMetadata).toEqual({ - openrouter: { + hyperbolic: { reasoning_details: [ { type: ReasoningDetailType.Text, @@ -1469,7 +1473,7 @@ describe("doStream", () => { toolName: "test-tool", input: '{"value":"Sparkle Day"}', providerMetadata: { - openrouter: { + hyperbolic: { reasoning_details: [], }, }, @@ -1494,7 +1498,7 @@ describe("doStream", () => { type: "finish", finishReason: { unified: "tool-calls", raw: "tool_calls" }, providerMetadata: { - openrouter: { + hyperbolic: { usage: { completionTokens: 17, promptTokens: 53, @@ -1584,7 +1588,7 @@ describe("doStream", () => { toolName: "test-tool", input: '{"value":"Sparkle Day"}', providerMetadata: { - openrouter: { + hyperbolic: { reasoning_details: [], }, }, @@ -1609,7 +1613,7 @@ describe("doStream", () => { type: "finish", finishReason: { unified: "tool-calls", raw: "tool_calls" }, providerMetadata: { - openrouter: { + hyperbolic: { usage: { completionTokens: 17, promptTokens: 53, @@ -1742,7 +1746,7 @@ describe("doStream", () => { type: "finish", finishReason: { unified: "stop", raw: "stop" }, providerMetadata: { - openrouter: { + hyperbolic: { usage: { completionTokens: 17, promptTokens: 53, @@ -1799,7 +1803,7 @@ describe("doStream", () => { { finishReason: { unified: "error", raw: undefined }, providerMetadata: { - openrouter: { + hyperbolic: { usage: {}, }, }, @@ -1841,7 +1845,7 @@ describe("doStream", () => { type: "finish", providerMetadata: { - openrouter: { + hyperbolic: { usage: {}, }, }, @@ -1880,7 +1884,7 @@ describe("doStream", () => { it("should pass headers", async () => { prepareStreamResponse({ content: [] }); - const provider = createOpenRouter({ + const provider = createHyperbolic({ apiKey: "test-api-key", headers: { "Custom-Provider-Header": "provider-header-value", @@ -1909,7 +1913,7 @@ describe("doStream", () => { it("should pass extra body", async () => { prepareStreamResponse({ content: [] }); - const provider = createOpenRouter({ + const provider = createHyperbolic({ apiKey: "test-api-key", extraBody: { custom_field: "custom_value", @@ -2118,7 +2122,7 @@ describe("doStream", () => { expect(finishChunk).toBeDefined(); // Verify file annotations are included in providerMetadata - const openrouterMetadata = finishChunk?.providerMetadata?.openrouter as { + const openrouterMetadata = finishChunk?.providerMetadata?.hyperbolic as { annotations?: Array<{ type: "file"; file: { @@ -2185,7 +2189,7 @@ describe("doStream", () => { chunk.type === "finish", ); - const openrouterMetadata = finishChunk?.providerMetadata?.openrouter as { + const openrouterMetadata = finishChunk?.providerMetadata?.hyperbolic as { annotations?: Array<{ type: "file"; file: { diff --git a/packages/ai-sdk-provider/src/chat/index.ts b/packages/ai-sdk-provider/src/chat/index.ts index e024efd..d2117d1 100644 --- a/packages/ai-sdk-provider/src/chat/index.ts +++ b/packages/ai-sdk-provider/src/chat/index.ts @@ -1,3 +1,7 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import type { LanguageModelV3, LanguageModelV3CallOptions, @@ -26,12 +30,12 @@ import { import type { FileAnnotation } from "../schemas/provider-metadata"; import type { ReasoningDetailUnion } from "../schemas/reasoning-details"; import type { - OpenRouterChatModelId, - OpenRouterChatSettings, + HyperbolicChatModelId, + HyperbolicChatSettings, } from "../types/hyperbolic-chat-settings"; -import type { OpenRouterUsageAccounting } from "../types/index"; +import type { HyperbolicUsageAccounting } from "../types/index"; import { openrouterFailedResponseHandler } from "../schemas/error-response"; -import { OpenRouterProviderMetadataSchema } from "../schemas/provider-metadata"; +import { HyperbolicProviderMetadataSchema } from "../schemas/provider-metadata"; import { ReasoningDetailType } from "../schemas/reasoning-details"; import { createFinishReason, mapOpenRouterFinishReason } from "../utils/map-finish-reason"; import { convertToOpenRouterChatMessages } from "./convert-to-hyperbolic-chat-messages"; @@ -42,7 +46,7 @@ import { OpenRouterStreamChatCompletionChunkSchema, } from "./schemas"; -type OpenRouterChatConfig = { +type HyperbolicChatConfig = { provider: string; compatibility: "strict" | "compatible"; headers: () => Record; @@ -51,26 +55,26 @@ type OpenRouterChatConfig = { extraBody?: Record; }; -export class OpenRouterChatLanguageModel implements LanguageModelV3 { +export class HyperbolicChatLanguageModel implements LanguageModelV3 { readonly specificationVersion = "v3" as const; - readonly provider = "openrouter"; + readonly provider = "hyperbolic"; readonly defaultObjectGenerationMode = "tool" as const; - readonly modelId: OpenRouterChatModelId; + readonly modelId: HyperbolicChatModelId; readonly supportsImageUrls = true; readonly supportedUrls: Record = { "image/*": [/^data:image\/[a-zA-Z]+;base64,/, /^https?:\/\/.+\.(jpg|jpeg|png|gif|webp)$/i], // 'text/*': [/^data:text\//, /^https?:\/\/.+$/], "application/*": [/^data:application\//, /^https?:\/\/.+$/], }; - readonly settings: OpenRouterChatSettings; + readonly settings: HyperbolicChatSettings; - private readonly config: OpenRouterChatConfig; + private readonly config: HyperbolicChatConfig; constructor( - modelId: OpenRouterChatModelId, - settings: OpenRouterChatSettings, - config: OpenRouterChatConfig, + modelId: HyperbolicChatModelId, + settings: HyperbolicChatSettings, + config: HyperbolicChatConfig, ) { this.modelId = modelId; this.settings = settings; @@ -143,7 +147,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { // messages: messages: convertToOpenRouterChatMessages(prompt), - // OpenRouter specific settings: + // Hyperbolic specific settings: include_reasoning: this.settings.includeReasoning, reasoning: this.settings.reasoning, usage: this.settings.usage, @@ -190,10 +194,10 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { usage: LanguageModelV3Usage; warnings: Array; providerMetadata?: { - openrouter: { + hyperbolic: { provider: string; reasoning_details?: ReasoningDetailUnion[]; - usage: OpenRouterUsageAccounting; + usage: HyperbolicUsageAccounting; }; }; request?: { body?: unknown }; @@ -297,7 +301,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { type: "reasoning" as const, text: detail.text, providerMetadata: { - openrouter: { + hyperbolic: { reasoning_details: [detail], }, }, @@ -311,7 +315,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { type: "reasoning" as const, text: detail.summary, providerMetadata: { - openrouter: { + hyperbolic: { reasoning_details: [detail], }, }, @@ -326,7 +330,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { type: "reasoning" as const, text: "[REDACTED]", providerMetadata: { - openrouter: { + hyperbolic: { reasoning_details: [detail], }, }, @@ -370,7 +374,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { toolName: toolCall.function.name, input: toolCall.function.arguments, providerMetadata: { - openrouter: { + hyperbolic: { reasoning_details: reasoningDetails, }, }, @@ -398,7 +402,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { url: annotation.url_citation.url, title: annotation.url_citation.title, providerMetadata: { - openrouter: { + hyperbolic: { content: annotation.url_citation.content || "", }, }, @@ -441,7 +445,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { usage: usageInfo, warnings: [], providerMetadata: { - openrouter: OpenRouterProviderMetadataSchema.parse({ + hyperbolic: HyperbolicProviderMetadataSchema.parse({ provider: response.provider ?? "", reasoning_details: choice.message.reasoning_details ?? [], annotations: fileAnnotations && fileAnnotations.length > 0 ? fileAnnotations : undefined, @@ -555,7 +559,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { }; // Track provider-specific usage information - const openrouterUsage: Partial = {}; + const openrouterUsage: Partial = {}; // Track reasoning details to preserve for multi-turn conversations const accumulatedReasoningDetails: ReasoningDetailUnion[] = []; @@ -616,7 +620,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { usage.inputTokens.total = value.usage.prompt_tokens; usage.outputTokens.total = value.usage.completion_tokens; - // Collect OpenRouter specific usage information + // Collect Hyperbolic specific usage information openrouterUsage.promptTokens = value.usage.prompt_tokens; if (value.usage.prompt_tokens_details) { @@ -708,7 +712,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { // Emit reasoning_details in providerMetadata for each delta chunk // so users can accumulate them on their end before sending back const reasoningMetadata: SharedV3ProviderMetadata = { - openrouter: { + hyperbolic: { reasoning_details: delta.reasoning_details, }, }; @@ -779,7 +783,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { url: annotation.url_citation.url, title: annotation.url_citation.title, providerMetadata: { - openrouter: { + hyperbolic: { content: annotation.url_citation.content || "", }, }, @@ -799,7 +803,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { for (const toolCallDelta of delta.tool_calls) { const index = toolCallDelta.index ?? toolCalls.length - 1; - // Tool call start. OpenRouter returns all information except the arguments in the first chunk. + // Tool call start. Hyperbolic returns all information except the arguments in the first chunk. if (toolCalls[index] == null) { if (toolCallDelta.type !== "function") { throw new InvalidResponseDataError({ @@ -875,7 +879,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { toolName: toolCall.function.name, input: toolCall.function.arguments, providerMetadata: { - openrouter: { + hyperbolic: { reasoning_details: accumulatedReasoningDetails, }, }, @@ -933,7 +937,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { toolName: toolCall.function.name, input: toolCall.function.arguments, providerMetadata: { - openrouter: { + hyperbolic: { reasoning_details: accumulatedReasoningDetails, }, }, @@ -980,7 +984,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { ? toolCall.function.arguments : "{}", providerMetadata: { - openrouter: { + hyperbolic: { reasoning_details: accumulatedReasoningDetails, }, }, @@ -1005,7 +1009,7 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 { } const openrouterMetadata: { - usage: Partial; + usage: Partial; provider?: string; reasoning_details?: ReasoningDetailUnion[]; annotations?: FileAnnotation[]; diff --git a/packages/ai-sdk-provider/src/chat/is-url.ts b/packages/ai-sdk-provider/src/chat/is-url.ts index 3de7f0f..94a3691 100644 --- a/packages/ai-sdk-provider/src/chat/is-url.ts +++ b/packages/ai-sdk-provider/src/chat/is-url.ts @@ -1,3 +1,7 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + export function isUrl({ url, protocols, diff --git a/packages/ai-sdk-provider/src/chat/large-pdf-response.test.ts b/packages/ai-sdk-provider/src/chat/large-pdf-response.test.ts index bc92a70..7af4443 100644 --- a/packages/ai-sdk-provider/src/chat/large-pdf-response.test.ts +++ b/packages/ai-sdk-provider/src/chat/large-pdf-response.test.ts @@ -1,20 +1,24 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import type { LanguageModelV3Prompt } from "@ai-sdk/provider"; import { describe, expect, it } from "vitest"; -import { createOpenRouter } from "../provider"; +import { createHyperbolic } from "../provider"; import { createTestServer } from "../test-utils/test-server"; const TEST_PROMPT: LanguageModelV3Prompt = [ { role: "user", content: [{ type: "text", text: "Hello" }] }, ]; -const provider = createOpenRouter({ - baseURL: "https://test.openrouter.ai/api/v1", +const provider = createHyperbolic({ + baseURL: "https://api.hyperbolic.xyz/v1", apiKey: "test-api-key", }); const server = createTestServer({ - "https://test.openrouter.ai/api/v1/chat/completions": {}, + "https://api.hyperbolic.xyz/v1/chat/completions": {}, }); describe("Large PDF Response Handling", () => { @@ -23,7 +27,7 @@ describe("Large PDF Response Handling", () => { // This is the actual response OpenRouter returns for large PDF failures // HTTP 200 status but contains error object instead of choices // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://test.openrouter.ai/api/v1/chat/completions"]!.response = { + server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { type: "json-value", body: { error: { @@ -46,7 +50,7 @@ describe("Large PDF Response Handling", () => { it("should parse successful large PDF responses with file annotations", async () => { // Successful response with file annotations from FileParserPlugin // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://test.openrouter.ai/api/v1/chat/completions"]!.response = { + server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { type: "json-value", body: { id: "gen-123", diff --git a/packages/ai-sdk-provider/src/chat/payload-comparison.test.ts b/packages/ai-sdk-provider/src/chat/payload-comparison.test.ts index 14098c0..9016ee4 100644 --- a/packages/ai-sdk-provider/src/chat/payload-comparison.test.ts +++ b/packages/ai-sdk-provider/src/chat/payload-comparison.test.ts @@ -1,16 +1,20 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import type { LanguageModelV3Prompt } from "@ai-sdk/provider"; import { describe, expect, it, vi } from "vitest"; -import type { OpenRouterChatCompletionsInput } from "../types/hyperbolic-chat-completions-input"; -import type { OpenRouterChatSettings } from "../types/hyperbolic-chat-settings"; -import { createOpenRouter } from "../provider"; +import type { HyperbolicChatCompletionsInput } from "../types/hyperbolic-chat-completions-input"; +import type { HyperbolicChatSettings } from "../types/hyperbolic-chat-settings"; +import { createHyperbolic } from "../provider"; describe("Payload Comparison - Large PDF", () => { it("should send payload matching fetch baseline for large PDFs", async () => { interface CapturedRequestBody { model: string; - messages: OpenRouterChatCompletionsInput; - plugins?: OpenRouterChatSettings["plugins"]; + messages: HyperbolicChatCompletionsInput; + plugins?: HyperbolicChatSettings["plugins"]; usage?: { include: boolean }; } @@ -50,7 +54,7 @@ describe("Payload Comparison - Large PDF", () => { ); }) as typeof fetch; - const provider = createOpenRouter({ + const provider = createHyperbolic({ apiKey: "test-key", fetch: mockFetch, }); @@ -117,7 +121,7 @@ describe("Payload Comparison - Large PDF", () => { expect(filePart).toBeDefined(); // CRITICAL ASSERTION: The file part should have a nested 'file' object with 'file_data' - // This is what the fetch example sends and what OpenRouter expects + // This is what the fetch example sends and what Hyperbolic expects expect(filePart).toMatchObject({ type: "file", file: { diff --git a/packages/ai-sdk-provider/src/chat/schemas.ts b/packages/ai-sdk-provider/src/chat/schemas.ts index c8b5e7e..37e1744 100644 --- a/packages/ai-sdk-provider/src/chat/schemas.ts +++ b/packages/ai-sdk-provider/src/chat/schemas.ts @@ -1,3 +1,7 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import { z } from "zod/v4"; import { OpenRouterErrorResponseSchema } from "../schemas/error-response"; diff --git a/packages/ai-sdk-provider/src/completion/convert-to-hyperbolic-completion-prompt.ts b/packages/ai-sdk-provider/src/completion/convert-to-hyperbolic-completion-prompt.ts index addc37f..672ec73 100644 --- a/packages/ai-sdk-provider/src/completion/convert-to-hyperbolic-completion-prompt.ts +++ b/packages/ai-sdk-provider/src/completion/convert-to-hyperbolic-completion-prompt.ts @@ -1,3 +1,7 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import type { LanguageModelV3FilePart, LanguageModelV3Prompt, diff --git a/packages/ai-sdk-provider/src/completion/index.test.ts b/packages/ai-sdk-provider/src/completion/index.test.ts index 0c6e663..420ae46 100644 --- a/packages/ai-sdk-provider/src/completion/index.test.ts +++ b/packages/ai-sdk-provider/src/completion/index.test.ts @@ -1,7 +1,11 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import type { LanguageModelV3Prompt, LanguageModelV3StreamPart } from "@ai-sdk/provider"; import { vi } from "vitest"; -import { createOpenRouter } from "../provider"; +import { createHyperbolic } from "../provider"; import { convertReadableStreamToArray, createTestServer } from "../test-utils/test-server"; vi.mock("../version", () => ({ @@ -37,7 +41,7 @@ const TEST_LOGPROBS = { ] as Record[], }; -const provider = createOpenRouter({ +const provider = createHyperbolic({ apiKey: "test-api-key", compatibility: "strict", }); @@ -135,7 +139,7 @@ describe("doGenerate", () => { it("should extract logprobs", async () => { prepareJsonResponse({ logprobs: TEST_LOGPROBS }); - const provider = createOpenRouter({ apiKey: "test-api-key" }); + const provider = createHyperbolic({ apiKey: "test-api-key" }); await provider.completion("openai/gpt-3.5-turbo", { logprobs: 1 }).doGenerate({ prompt: TEST_PROMPT, @@ -204,7 +208,7 @@ describe("doGenerate", () => { it("should pass headers", async () => { prepareJsonResponse({ content: "" }); - const provider = createOpenRouter({ + const provider = createHyperbolic({ apiKey: "test-api-key", headers: { "Custom-Provider-Header": "provider-header-value", @@ -316,7 +320,7 @@ describe("doStream", () => { type: "finish", finishReason: { unified: "stop", raw: "stop" }, providerMetadata: { - openrouter: { + hyperbolic: { usage: { promptTokens: 10, completionTokens: 362, @@ -365,7 +369,7 @@ describe("doStream", () => { element.type === "finish", ); const openrouterUsage = ( - finishChunk?.providerMetadata?.openrouter as { + finishChunk?.providerMetadata?.hyperbolic as { usage?: { cost?: number; costDetails?: { upstreamInferenceCost: number }; @@ -401,7 +405,7 @@ describe("doStream", () => { element.type === "finish", ); const openrouterUsage = ( - finishChunk?.providerMetadata?.openrouter as { + finishChunk?.providerMetadata?.hyperbolic as { usage?: { cost?: number; costDetails?: { upstreamInferenceCost: number }; @@ -445,7 +449,7 @@ describe("doStream", () => { { finishReason: { unified: "error", raw: undefined }, providerMetadata: { - openrouter: { + hyperbolic: { usage: {}, }, }, @@ -485,7 +489,7 @@ describe("doStream", () => { expect(elements[1]).toStrictEqual({ finishReason: { unified: "error", raw: undefined }, providerMetadata: { - openrouter: { + hyperbolic: { usage: {}, }, }, @@ -525,7 +529,7 @@ describe("doStream", () => { it("should pass headers", async () => { prepareStreamResponse({ content: [] }); - const provider = createOpenRouter({ + const provider = createHyperbolic({ apiKey: "test-api-key", headers: { "Custom-Provider-Header": "provider-header-value", @@ -554,7 +558,7 @@ describe("doStream", () => { it("should pass extra body", async () => { prepareStreamResponse({ content: [] }); - const provider = createOpenRouter({ + const provider = createHyperbolic({ apiKey: "test-api-key", extraBody: { custom_field: "custom_value", diff --git a/packages/ai-sdk-provider/src/completion/index.ts b/packages/ai-sdk-provider/src/completion/index.ts index 945fe79..161a855 100644 --- a/packages/ai-sdk-provider/src/completion/index.ts +++ b/packages/ai-sdk-provider/src/completion/index.ts @@ -1,3 +1,7 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import type { LanguageModelV3, LanguageModelV3CallOptions, @@ -20,15 +24,15 @@ import { postJsonToApi, } from "@ai-sdk/provider-utils"; -import type { OpenRouterUsageAccounting } from "../types"; +import type { HyperbolicUsageAccounting } from "../types"; import type { - OpenRouterCompletionModelId, - OpenRouterCompletionSettings, + HyperbolicCompletionModelId, + HyperbolicCompletionSettings, } from "../types/hyperbolic-completion-settings"; import { openrouterFailedResponseHandler } from "../schemas/error-response"; import { createFinishReason, mapOpenRouterFinishReason } from "../utils/map-finish-reason"; import { convertToOpenRouterCompletionPrompt } from "./convert-to-hyperbolic-completion-prompt"; -import { OpenRouterCompletionChunkSchema } from "./schemas"; +import { HyperbolicCompletionChunkSchema } from "./schemas"; type OpenRouterCompletionConfig = { provider: string; @@ -39,10 +43,10 @@ type OpenRouterCompletionConfig = { extraBody?: Record; }; -export class OpenRouterCompletionLanguageModel implements LanguageModelV3 { +export class HyperbolicCompletionLanguageModel implements LanguageModelV3 { readonly specificationVersion = "v3" as const; readonly provider = "openrouter"; - readonly modelId: OpenRouterCompletionModelId; + readonly modelId: HyperbolicCompletionModelId; readonly supportsImageUrls = true; readonly supportedUrls: Record = { "image/*": [/^data:image\/[a-zA-Z]+;base64,/, /^https?:\/\/.+\.(jpg|jpeg|png|gif|webp)$/i], @@ -50,13 +54,13 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 { "application/*": [/^data:application\//, /^https?:\/\/.+$/], }; readonly defaultObjectGenerationMode = undefined; - readonly settings: OpenRouterCompletionSettings; + readonly settings: HyperbolicCompletionSettings; private readonly config: OpenRouterCompletionConfig; constructor( - modelId: OpenRouterCompletionModelId, - settings: OpenRouterCompletionSettings, + modelId: HyperbolicCompletionModelId, + settings: HyperbolicCompletionSettings, config: OpenRouterCompletionConfig, ) { this.modelId = modelId; @@ -128,7 +132,7 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 { // prompt: prompt: completionPrompt, - // OpenRouter specific settings: + // Hyperbolic specific settings: include_reasoning: this.settings.includeReasoning, reasoning: this.settings.reasoning, @@ -157,7 +161,7 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 { headers: combineHeaders(this.config.headers(), options.headers), body: args, failedResponseHandler: openrouterFailedResponseHandler, - successfulResponseHandler: createJsonResponseHandler(OpenRouterCompletionChunkSchema), + successfulResponseHandler: createJsonResponseHandler(HyperbolicCompletionChunkSchema), abortSignal: options.abortSignal, fetch: this.config.fetch, }); @@ -181,7 +185,7 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 { if (!choice) { throw new NoContentGeneratedError({ - message: "No choice in OpenRouter completion response", + message: "No choice in Hyperbolic completion response", }); } @@ -239,7 +243,7 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 { this.config.compatibility === "strict" ? { include_usage: true } : undefined, }, failedResponseHandler: openrouterFailedResponseHandler, - successfulResponseHandler: createEventSourceResponseHandler(OpenRouterCompletionChunkSchema), + successfulResponseHandler: createEventSourceResponseHandler(HyperbolicCompletionChunkSchema), abortSignal: options.abortSignal, fetch: this.config.fetch, }); @@ -259,11 +263,11 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 { }, }; - const openrouterUsage: Partial = {}; + const hyperbolicUsage: Partial = {}; return { stream: response.pipeThrough( new TransformStream< - ParseResult>, + ParseResult>, LanguageModelV3StreamPart >({ transform(chunk, controller) { @@ -287,33 +291,33 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 { usage.inputTokens.total = value.usage.prompt_tokens; usage.outputTokens.total = value.usage.completion_tokens; - // Collect OpenRouter specific usage information - openrouterUsage.promptTokens = value.usage.prompt_tokens; + // Collect Hyperbolic specific usage information + hyperbolicUsage.promptTokens = value.usage.prompt_tokens; if (value.usage.prompt_tokens_details) { const cachedInputTokens = value.usage.prompt_tokens_details.cached_tokens ?? 0; usage.inputTokens.cacheRead = cachedInputTokens; - openrouterUsage.promptTokensDetails = { + hyperbolicUsage.promptTokensDetails = { cachedTokens: cachedInputTokens, }; } - openrouterUsage.completionTokens = value.usage.completion_tokens; + hyperbolicUsage.completionTokens = value.usage.completion_tokens; if (value.usage.completion_tokens_details) { const reasoningTokens = value.usage.completion_tokens_details.reasoning_tokens ?? 0; usage.outputTokens.reasoning = reasoningTokens; - openrouterUsage.completionTokensDetails = { + hyperbolicUsage.completionTokensDetails = { reasoningTokens, }; } - openrouterUsage.cost = value.usage.cost; - openrouterUsage.totalTokens = value.usage.total_tokens; + hyperbolicUsage.cost = value.usage.cost; + hyperbolicUsage.totalTokens = value.usage.total_tokens; const upstreamInferenceCost = value.usage.cost_details?.upstream_inference_cost; if (upstreamInferenceCost != null) { - openrouterUsage.costDetails = { + hyperbolicUsage.costDetails = { upstreamInferenceCost, }; } @@ -340,8 +344,8 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 { finishReason, usage, providerMetadata: { - openrouter: { - usage: openrouterUsage, + hyperbolic: { + usage: hyperbolicUsage, }, }, }); diff --git a/packages/ai-sdk-provider/src/completion/schemas.ts b/packages/ai-sdk-provider/src/completion/schemas.ts index 2e48c1f..12ba2da 100644 --- a/packages/ai-sdk-provider/src/completion/schemas.ts +++ b/packages/ai-sdk-provider/src/completion/schemas.ts @@ -1,3 +1,7 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import { z } from "zod/v4"; import { OpenRouterErrorResponseSchema } from "../schemas/error-response"; @@ -5,7 +9,7 @@ import { ReasoningDetailArraySchema } from "../schemas/reasoning-details"; // limited version of the schema, focussed on what is needed for the implementation // this approach limits breakages when the API changes and increases efficiency -export const OpenRouterCompletionChunkSchema = z.union([ +export const HyperbolicCompletionChunkSchema = z.union([ z .object({ id: z.string().optional(), diff --git a/packages/ai-sdk-provider/src/facade.ts b/packages/ai-sdk-provider/src/facade.ts index 8b6c332..69bd412 100644 --- a/packages/ai-sdk-provider/src/facade.ts +++ b/packages/ai-sdk-provider/src/facade.ts @@ -1,21 +1,25 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import { loadApiKey, withoutTrailingSlash } from "@ai-sdk/provider-utils"; -import type { OpenRouterProviderSettings } from "./provider"; +import type { HyperbolicProviderSettings } from "./provider"; import type { - OpenRouterChatModelId, - OpenRouterChatSettings, + HyperbolicChatModelId, + HyperbolicChatSettings, } from "./types/hyperbolic-chat-settings"; import type { - OpenRouterCompletionModelId, - OpenRouterCompletionSettings, + HyperbolicCompletionModelId, + HyperbolicCompletionSettings, } from "./types/hyperbolic-completion-settings"; -import { OpenRouterChatLanguageModel } from "./chat"; -import { OpenRouterCompletionLanguageModel } from "./completion"; +import { HyperbolicChatLanguageModel } from "./chat"; +import { HyperbolicCompletionLanguageModel } from "./completion"; /** -@deprecated Use `createOpenRouter` instead. +@deprecated Use `createHyperbolic` instead. */ -export class OpenRouter { +export class Hyperbolic { /** Use a different URL prefix for API calls, e.g. to use proxy servers. The default prefix is `https://api.hyperbolic.xyz/v1`. @@ -39,9 +43,9 @@ Custom headers to include in the requests. readonly api_keys?: Record; /** - * Creates a new OpenRouter provider instance. + * Creates a new Hyperbolic provider instance. */ - constructor(options: OpenRouterProviderSettings = {}) { + constructor(options: HyperbolicProviderSettings = {}) { this.baseURL = withoutTrailingSlash(options.baseURL ?? options.baseUrl) ?? "https://api.hyperbolic.xyz/v1"; this.apiKey = options.apiKey; @@ -56,7 +60,7 @@ Custom headers to include in the requests. Authorization: `Bearer ${loadApiKey({ apiKey: this.apiKey, environmentVariableName: "OPENROUTER_API_KEY", - description: "OpenRouter", + description: "Hyperbolic", })}`, ...this.headers, ...(this.api_keys && @@ -67,18 +71,18 @@ Custom headers to include in the requests. }; } - chat(modelId: OpenRouterChatModelId, settings: OpenRouterChatSettings = {}) { - return new OpenRouterChatLanguageModel(modelId, settings, { - provider: "openrouter.chat", + chat(modelId: HyperbolicChatModelId, settings: HyperbolicChatSettings = {}) { + return new HyperbolicChatLanguageModel(modelId, settings, { + provider: "hyperbolic.chat", ...this.baseConfig, compatibility: "strict", url: ({ path }) => `${this.baseURL}${path}`, }); } - completion(modelId: OpenRouterCompletionModelId, settings: OpenRouterCompletionSettings = {}) { - return new OpenRouterCompletionLanguageModel(modelId, settings, { - provider: "openrouter.completion", + completion(modelId: HyperbolicCompletionModelId, settings: HyperbolicCompletionSettings = {}) { + return new HyperbolicCompletionLanguageModel(modelId, settings, { + provider: "hyperbolic.completion", ...this.baseConfig, compatibility: "strict", url: ({ path }) => `${this.baseURL}${path}`, diff --git a/packages/ai-sdk-provider/src/image/hyperbolic-image-settings.ts b/packages/ai-sdk-provider/src/image/hyperbolic-image-settings.ts index 739fc61..1f4b3c7 100644 --- a/packages/ai-sdk-provider/src/image/hyperbolic-image-settings.ts +++ b/packages/ai-sdk-provider/src/image/hyperbolic-image-settings.ts @@ -1,7 +1,3 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - import type { GenerateImageResult } from "ai"; import type { OpenRouterSharedSettings as HyperbolicSharedSettings } from "../types"; diff --git a/packages/ai-sdk-provider/src/image/index.ts b/packages/ai-sdk-provider/src/image/index.ts index f3314e3..17d57ce 100644 --- a/packages/ai-sdk-provider/src/image/index.ts +++ b/packages/ai-sdk-provider/src/image/index.ts @@ -1,7 +1,3 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - import type { ImageModelV3, SharedV3Warning } from "@ai-sdk/provider"; import { combineHeaders, createJsonResponseHandler, postJsonToApi } from "@ai-sdk/provider-utils"; import { z } from "zod"; diff --git a/packages/ai-sdk-provider/src/provider.ts b/packages/ai-sdk-provider/src/provider.ts index eb9bae2..0cd762b 100644 --- a/packages/ai-sdk-provider/src/provider.ts +++ b/packages/ai-sdk-provider/src/provider.ts @@ -1,3 +1,7 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import type { ProviderV3 } from "@ai-sdk/provider"; import { loadApiKey, withoutTrailingSlash } from "@ai-sdk/provider-utils"; @@ -6,59 +10,59 @@ import type { HyperbolicImageSettings, } from "./image/hyperbolic-image-settings"; import type { - OpenRouterChatModelId, - OpenRouterChatSettings, + HyperbolicChatModelId, + HyperbolicChatSettings, } from "./types/hyperbolic-chat-settings"; import type { - OpenRouterCompletionModelId, - OpenRouterCompletionSettings, + HyperbolicCompletionModelId, + HyperbolicCompletionSettings, } from "./types/hyperbolic-completion-settings"; -import { OpenRouterChatLanguageModel } from "./chat"; -import { OpenRouterCompletionLanguageModel } from "./completion"; +import { HyperbolicChatLanguageModel } from "./chat"; +import { HyperbolicCompletionLanguageModel } from "./completion"; import { HyperbolicImageModel } from "./image"; import { withUserAgentSuffix } from "./utils/with-user-agent-suffix"; import { VERSION } from "./version"; -export type { OpenRouterChatSettings, OpenRouterCompletionSettings }; +export type { HyperbolicChatSettings, HyperbolicCompletionSettings }; -export interface OpenRouterProvider extends ProviderV3 { +export interface HyperbolicProvider extends ProviderV3 { ( - modelId: OpenRouterChatModelId, - settings?: OpenRouterCompletionSettings, - ): OpenRouterCompletionLanguageModel; - (modelId: OpenRouterChatModelId, settings?: OpenRouterChatSettings): OpenRouterChatLanguageModel; + modelId: HyperbolicChatModelId, + settings?: HyperbolicCompletionSettings, + ): HyperbolicCompletionLanguageModel; + (modelId: HyperbolicChatModelId, settings?: HyperbolicChatSettings): HyperbolicChatLanguageModel; languageModel( - modelId: OpenRouterChatModelId, - settings?: OpenRouterCompletionSettings, - ): OpenRouterCompletionLanguageModel; + modelId: HyperbolicChatModelId, + settings?: HyperbolicCompletionSettings, + ): HyperbolicCompletionLanguageModel; languageModel( - modelId: OpenRouterChatModelId, - settings?: OpenRouterChatSettings, - ): OpenRouterChatLanguageModel; + modelId: HyperbolicChatModelId, + settings?: HyperbolicChatSettings, + ): HyperbolicChatLanguageModel; /** -Creates an OpenRouter chat model for text generation. +Creates an Hyperbolic chat model for text generation. */ chat( - modelId: OpenRouterChatModelId, - settings?: OpenRouterChatSettings, - ): OpenRouterChatLanguageModel; + modelId: HyperbolicChatModelId, + settings?: HyperbolicChatSettings, + ): HyperbolicChatLanguageModel; /** -Creates an OpenRouter completion model for text generation. +Creates an Hyperbolic completion model for text generation. */ completion( - modelId: OpenRouterCompletionModelId, - settings?: OpenRouterCompletionSettings, - ): OpenRouterCompletionLanguageModel; + modelId: HyperbolicCompletionModelId, + settings?: HyperbolicCompletionSettings, + ): HyperbolicCompletionLanguageModel; image(modelId: HyperbolicImageModelId, settings?: HyperbolicImageSettings): HyperbolicImageModel; } -export interface OpenRouterProviderSettings { +export interface HyperbolicProviderSettings { /** -Base URL for the OpenRouter API calls. +Base URL for the Hyperbolic API calls. */ baseURL?: string; @@ -78,7 +82,7 @@ Custom headers to include in the requests. headers?: Record; /** -OpenRouter compatibility mode. Should be set to `strict` when using the OpenRouter API, +Hyperbolic compatibility mode. Should be set to `strict` when using the Hyperbolic API, and `compatible` when using 3rd party providers. In `compatible` mode, newer information such as streamOptions are not being sent. Defaults to 'compatible'. */ @@ -91,7 +95,7 @@ or to provide a custom fetch implementation for e.g. testing. fetch?: typeof fetch; /** -A JSON object to send as the request body to access OpenRouter features & upstream provider features. +A JSON object to send as the request body to access Hyperbolic features & upstream provider features. */ extraBody?: Record; @@ -103,9 +107,9 @@ A JSON object to send as the request body to access OpenRouter features & upstre } /** -Create an OpenRouter provider instance. +Create an Hyperbolic provider instance. */ -export function createOpenRouter(options: OpenRouterProviderSettings = {}): OpenRouterProvider { +export function createHyperbolic(options: HyperbolicProviderSettings = {}): HyperbolicProvider { const baseURL = withoutTrailingSlash(options.baseURL ?? options.baseUrl) ?? "https://api.hyperbolic.xyz/v1"; @@ -118,7 +122,7 @@ export function createOpenRouter(options: OpenRouterProviderSettings = {}): Open Authorization: `Bearer ${loadApiKey({ apiKey: options.apiKey, environmentVariableName: "HYPERBOLIC_API_KEY", - description: "OpenRouter", + description: "Hyperbolic", })}`, ...options.headers, ...(options.api_keys && @@ -126,12 +130,12 @@ export function createOpenRouter(options: OpenRouterProviderSettings = {}): Open "X-Provider-API-Keys": JSON.stringify(options.api_keys), }), }, - `ai-sdk/openrouter/${VERSION}`, + `ai-sdk/hyperbolic/${VERSION}`, ); - const createChatModel = (modelId: OpenRouterChatModelId, settings: OpenRouterChatSettings = {}) => - new OpenRouterChatLanguageModel(modelId, settings, { - provider: "openrouter.chat", + const createChatModel = (modelId: HyperbolicChatModelId, settings: HyperbolicChatSettings = {}) => + new HyperbolicChatLanguageModel(modelId, settings, { + provider: "hyperbolic.chat", url: ({ path }) => `${baseURL}${path}`, headers: getHeaders, compatibility, @@ -140,11 +144,11 @@ export function createOpenRouter(options: OpenRouterProviderSettings = {}): Open }); const createCompletionModel = ( - modelId: OpenRouterCompletionModelId, - settings: OpenRouterCompletionSettings = {}, + modelId: HyperbolicCompletionModelId, + settings: HyperbolicCompletionSettings = {}, ) => - new OpenRouterCompletionLanguageModel(modelId, settings, { - provider: "openrouter.completion", + new HyperbolicCompletionLanguageModel(modelId, settings, { + provider: "hyperbolic.completion", url: ({ path }) => `${baseURL}${path}`, headers: getHeaders, compatibility, @@ -166,19 +170,19 @@ export function createOpenRouter(options: OpenRouterProviderSettings = {}): Open }); const createLanguageModel = ( - modelId: OpenRouterChatModelId | OpenRouterCompletionModelId, - settings?: OpenRouterChatSettings | OpenRouterCompletionSettings, + modelId: HyperbolicChatModelId | HyperbolicCompletionModelId, + settings?: HyperbolicChatSettings | HyperbolicCompletionSettings, ) => { if (new.target) { - throw new Error("The OpenRouter model function cannot be called with the new keyword."); + throw new Error("The Hyperbolic model function cannot be called with the new keyword."); } - return createChatModel(modelId, settings as OpenRouterChatSettings); + return createChatModel(modelId, settings as HyperbolicChatSettings); }; const provider = ( - modelId: OpenRouterChatModelId | OpenRouterCompletionModelId, - settings?: OpenRouterChatSettings | OpenRouterCompletionSettings, + modelId: HyperbolicChatModelId | HyperbolicCompletionModelId, + settings?: HyperbolicChatSettings | HyperbolicCompletionSettings, ) => createLanguageModel(modelId, settings); provider.languageModel = createLanguageModel; @@ -186,12 +190,12 @@ export function createOpenRouter(options: OpenRouterProviderSettings = {}): Open provider.completion = createCompletionModel; provider.image = createImageModel; - return provider as OpenRouterProvider; + return provider as HyperbolicProvider; } /** -Default OpenRouter provider instance. It uses 'strict' compatibility mode. +Default Hyperbolic provider instance. It uses 'strict' compatibility mode. */ -export const openrouter = createOpenRouter({ - compatibility: "strict", // strict for OpenRouter API +export const hyperbolic = createHyperbolic({ + compatibility: "strict", // strict for Hyperbolic API }); diff --git a/packages/ai-sdk-provider/src/schemas/error-response.test.ts b/packages/ai-sdk-provider/src/schemas/error-response.test.ts index 8f3f759..7da8009 100644 --- a/packages/ai-sdk-provider/src/schemas/error-response.test.ts +++ b/packages/ai-sdk-provider/src/schemas/error-response.test.ts @@ -1,3 +1,7 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import { OpenRouterErrorResponseSchema } from "./error-response"; describe("OpenRouterErrorResponseSchema", () => { diff --git a/packages/ai-sdk-provider/src/schemas/error-response.ts b/packages/ai-sdk-provider/src/schemas/error-response.ts index 84c4fe2..61b5df8 100644 --- a/packages/ai-sdk-provider/src/schemas/error-response.ts +++ b/packages/ai-sdk-provider/src/schemas/error-response.ts @@ -1,3 +1,7 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import type { ChatErrorError } from "@openrouter/sdk/models"; import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils"; import { z } from "zod/v4"; diff --git a/packages/ai-sdk-provider/src/schemas/format.ts b/packages/ai-sdk-provider/src/schemas/format.ts index 63f4b89..89856e3 100644 --- a/packages/ai-sdk-provider/src/schemas/format.ts +++ b/packages/ai-sdk-provider/src/schemas/format.ts @@ -1,3 +1,7 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + export enum ReasoningFormat { Unknown = "unknown", OpenAIResponsesV1 = "openai-responses-v1", diff --git a/packages/ai-sdk-provider/src/schemas/image.ts b/packages/ai-sdk-provider/src/schemas/image.ts index 7b2fbf2..8f57ab0 100644 --- a/packages/ai-sdk-provider/src/schemas/image.ts +++ b/packages/ai-sdk-provider/src/schemas/image.ts @@ -1,3 +1,7 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import { z } from "zod/v4"; const ImageResponseSchema = z @@ -7,9 +11,9 @@ const ImageResponseSchema = z .object({ url: z.string(), }) - .passthrough(), + .loose(), }) - .passthrough(); + .loose(); export type ImageResponse = z.infer; diff --git a/packages/ai-sdk-provider/src/schemas/provider-metadata.ts b/packages/ai-sdk-provider/src/schemas/provider-metadata.ts index 00ae983..73372db 100644 --- a/packages/ai-sdk-provider/src/schemas/provider-metadata.ts +++ b/packages/ai-sdk-provider/src/schemas/provider-metadata.ts @@ -1,3 +1,7 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import { z } from "zod/v4"; import { ReasoningDetailUnionSchema } from "./reasoning-details"; @@ -30,9 +34,9 @@ export const FileAnnotationSchema = z export type FileAnnotation = z.infer; /** - * Schema for OpenRouter provider metadata attached to responses + * Schema for Hyperbolic provider metadata attached to responses */ -export const OpenRouterProviderMetadataSchema = z +export const HyperbolicProviderMetadataSchema = z .object({ provider: z.string(), reasoning_details: z.array(ReasoningDetailUnionSchema).optional(), @@ -66,14 +70,14 @@ export const OpenRouterProviderMetadataSchema = z }) .catchall(z.any()); -export type OpenRouterProviderMetadata = z.infer; +export type OpenRouterProviderMetadata = z.infer; /** * Schema for parsing provider options that may contain reasoning_details and annotations */ -export const OpenRouterProviderOptionsSchema = z +export const HyperbolicProviderOptionsSchema = z .object({ - openrouter: z + hyperbolic: z .object({ reasoning_details: z.array(ReasoningDetailUnionSchema).optional(), annotations: z.array(FileAnnotationSchema).optional(), diff --git a/packages/ai-sdk-provider/src/schemas/reasoning-details.ts b/packages/ai-sdk-provider/src/schemas/reasoning-details.ts index 0396624..79f7a5e 100644 --- a/packages/ai-sdk-provider/src/schemas/reasoning-details.ts +++ b/packages/ai-sdk-provider/src/schemas/reasoning-details.ts @@ -1,3 +1,7 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import { z } from "zod/v4"; import { isDefinedOrNotNull } from "../utils/type-guards"; diff --git a/packages/ai-sdk-provider/src/scripts/update-models-list.ts b/packages/ai-sdk-provider/src/scripts/update-models-list.ts index 52aea76..b57ebd4 100644 --- a/packages/ai-sdk-provider/src/scripts/update-models-list.ts +++ b/packages/ai-sdk-provider/src/scripts/update-models-list.ts @@ -1,3 +1,7 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import "@hyperbolic/api"; import { readFileSync, writeFileSync } from "fs"; diff --git a/packages/ai-sdk-provider/src/test-utils/test-server.ts b/packages/ai-sdk-provider/src/test-utils/test-server.ts index 1037d90..329693a 100644 --- a/packages/ai-sdk-provider/src/test-utils/test-server.ts +++ b/packages/ai-sdk-provider/src/test-utils/test-server.ts @@ -1,3 +1,7 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + /** * Simple test server utility to replace the removed @ai-sdk/provider-utils/test createTestServer * This provides HTTP request interception for testing purposes. diff --git a/packages/ai-sdk-provider/src/tests/provider-options.test.ts b/packages/ai-sdk-provider/src/tests/provider-options.test.ts index cbe6464..c2a7f9a 100644 --- a/packages/ai-sdk-provider/src/tests/provider-options.test.ts +++ b/packages/ai-sdk-provider/src/tests/provider-options.test.ts @@ -1,8 +1,12 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import type { ModelMessage } from "ai"; import { streamText } from "ai"; import { describe, expect, it, vi } from "vitest"; -import { createOpenRouter } from "../provider"; +import { createHyperbolic } from "../provider"; import { createTestServer } from "../test-utils/test-server"; // Add type assertions for the mocked classes @@ -25,7 +29,7 @@ describe("providerOptions", () => { }); it("should set providerOptions openrouter to extra body", async () => { - const openrouter = createOpenRouter({ + const openrouter = createHyperbolic({ apiKey: "test", }); const model = openrouter("anthropic/claude-3.7-sonnet"); diff --git a/packages/ai-sdk-provider/src/tests/stream-usage-accounting.test.ts b/packages/ai-sdk-provider/src/tests/stream-usage-accounting.test.ts index a998c52..2156c13 100644 --- a/packages/ai-sdk-provider/src/tests/stream-usage-accounting.test.ts +++ b/packages/ai-sdk-provider/src/tests/stream-usage-accounting.test.ts @@ -1,12 +1,16 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import { describe, expect, it } from "vitest"; -import type { OpenRouterChatSettings } from "../types/hyperbolic-chat-settings"; -import { OpenRouterChatLanguageModel } from "../chat"; +import type { HyperbolicChatSettings } from "../types/hyperbolic-chat-settings"; +import { HyperbolicChatLanguageModel } from "../chat"; import { convertReadableStreamToArray, createTestServer } from "../test-utils/test-server"; -describe("OpenRouter Streaming Usage Accounting", () => { +describe("Hyperbolic Streaming Usage Accounting", () => { const server = createTestServer({ - "https://api.openrouter.ai/chat/completions": { + "https://api.hyperbolic.xyz/v1/chat/completions": { response: { type: "stream-chunks", chunks: [] }, }, }); @@ -37,7 +41,7 @@ describe("OpenRouter Streaming Usage Accounting", () => { chunks.push("data: [DONE]\n\n"); // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.openrouter.ai/chat/completions"]!.response = { + server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { type: "stream-chunks", chunks, }; @@ -47,13 +51,13 @@ describe("OpenRouter Streaming Usage Accounting", () => { prepareStreamResponse(); // Create model with usage accounting enabled - const settings: OpenRouterChatSettings = { + const settings: HyperbolicChatSettings = { usage: { include: true }, }; - const model = new OpenRouterChatLanguageModel("test-model", settings, { - provider: "openrouter.chat", - url: () => "https://api.openrouter.ai/chat/completions", + const model = new HyperbolicChatLanguageModel("test-model", settings, { + provider: "hyperbolic.chat", + url: () => "https://api.hyperbolic.xyz/v1/chat/completions", headers: () => ({}), compatibility: "strict", fetch: global.fetch, @@ -84,13 +88,13 @@ describe("OpenRouter Streaming Usage Accounting", () => { prepareStreamResponse(true); // Create model with usage accounting enabled - const settings: OpenRouterChatSettings = { + const settings: HyperbolicChatSettings = { usage: { include: true }, }; - const model = new OpenRouterChatLanguageModel("test-model", settings, { - provider: "openrouter.chat", - url: () => "https://api.openrouter.ai/chat/completions", + const model = new HyperbolicChatLanguageModel("test-model", settings, { + provider: "hyperbolic.chat", + url: () => "https://api.hyperbolic.xyz/v1/chat/completions", headers: () => ({}), compatibility: "strict", fetch: global.fetch, @@ -116,7 +120,7 @@ describe("OpenRouter Streaming Usage Accounting", () => { // Verify metadata is included expect(finishChunk?.providerMetadata).toBeDefined(); - const openrouterData = finishChunk?.providerMetadata?.openrouter; + const openrouterData = finishChunk?.providerMetadata?.hyperbolic; expect(openrouterData).toBeDefined(); const usage = openrouterData?.usage; @@ -135,13 +139,13 @@ describe("OpenRouter Streaming Usage Accounting", () => { prepareStreamResponse(false); // Create model with usage accounting disabled - const settings: OpenRouterChatSettings = { + const settings: HyperbolicChatSettings = { // No usage property }; - const model = new OpenRouterChatLanguageModel("test-model", settings, { - provider: "openrouter.chat", - url: () => "https://api.openrouter.ai/chat/completions", + const model = new HyperbolicChatLanguageModel("test-model", settings, { + provider: "hyperbolic.chat", + url: () => "https://api.hyperbolic.xyz/v1/chat/completions", headers: () => ({}), compatibility: "strict", fetch: global.fetch, @@ -166,7 +170,7 @@ describe("OpenRouter Streaming Usage Accounting", () => { expect(finishChunk).toBeDefined(); // Verify that provider metadata is not included - expect(finishChunk?.providerMetadata?.openrouter).toStrictEqual({ + expect(finishChunk?.providerMetadata?.hyperbolic).toStrictEqual({ usage: {}, }); }); diff --git a/packages/ai-sdk-provider/src/tests/usage-accounting.test.ts b/packages/ai-sdk-provider/src/tests/usage-accounting.test.ts index e044e54..b0eb8ef 100644 --- a/packages/ai-sdk-provider/src/tests/usage-accounting.test.ts +++ b/packages/ai-sdk-provider/src/tests/usage-accounting.test.ts @@ -1,12 +1,16 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import { describe, expect, it } from "vitest"; -import type { OpenRouterChatSettings } from "../types/hyperbolic-chat-settings"; -import { OpenRouterChatLanguageModel } from "../chat"; +import type { HyperbolicChatSettings } from "../types/hyperbolic-chat-settings"; +import { HyperbolicChatLanguageModel } from "../chat"; import { createTestServer } from "../test-utils/test-server"; -describe("OpenRouter Usage Accounting", () => { +describe("Hyperbolic Usage Accounting", () => { const server = createTestServer({ - "https://api.openrouter.ai/chat/completions": { + "https://api.hyperbolic.xyz/v1/chat/completions": { response: { type: "json-value", body: {} }, }, }); @@ -45,7 +49,7 @@ describe("OpenRouter Usage Accounting", () => { }; // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.openrouter.ai/chat/completions"]!.response = { + server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { type: "json-value", body: response, }; @@ -55,13 +59,13 @@ describe("OpenRouter Usage Accounting", () => { prepareJsonResponse(); // Create model with usage accounting enabled - const settings: OpenRouterChatSettings = { + const settings: HyperbolicChatSettings = { usage: { include: true }, }; - const model = new OpenRouterChatLanguageModel("test-model", settings, { - provider: "openrouter.chat", - url: () => "https://api.openrouter.ai/chat/completions", + const model = new HyperbolicChatLanguageModel("test-model", settings, { + provider: "hyperbolic.chat", + url: () => "https://api.hyperbolic.xyz/v1/chat/completions", headers: () => ({}), compatibility: "strict", fetch: global.fetch, @@ -90,13 +94,13 @@ describe("OpenRouter Usage Accounting", () => { prepareJsonResponse(); // Create model with usage accounting enabled - const settings: OpenRouterChatSettings = { + const settings: HyperbolicChatSettings = { usage: { include: true }, }; - const model = new OpenRouterChatLanguageModel("test-model", settings, { - provider: "openrouter.chat", - url: () => "https://api.openrouter.ai/chat/completions", + const model = new HyperbolicChatLanguageModel("test-model", settings, { + provider: "hyperbolic.chat", + url: () => "https://api.hyperbolic.xyz/v1/chat/completions", headers: () => ({}), compatibility: "strict", fetch: global.fetch, @@ -117,9 +121,9 @@ describe("OpenRouter Usage Accounting", () => { expect(result.providerMetadata).toBeDefined(); const providerData = result.providerMetadata; - // Check for OpenRouter usage data - expect(providerData?.openrouter).toBeDefined(); - const openrouterData = providerData?.openrouter as Record; + // Check for Hyperbolic usage data + expect(providerData?.hyperbolic).toBeDefined(); + const openrouterData = providerData?.hyperbolic as Record; expect(openrouterData.usage).toBeDefined(); const usage = openrouterData.usage; @@ -144,13 +148,13 @@ describe("OpenRouter Usage Accounting", () => { prepareJsonResponse(); // Create model with usage accounting disabled - const settings: OpenRouterChatSettings = { + const settings: HyperbolicChatSettings = { // No usage property }; - const model = new OpenRouterChatLanguageModel("test-model", settings, { - provider: "openrouter.chat", - url: () => "https://api.openrouter.ai/chat/completions", + const model = new HyperbolicChatLanguageModel("test-model", settings, { + provider: "hyperbolic.chat", + url: () => "https://api.hyperbolic.xyz/v1/chat/completions", headers: () => ({}), compatibility: "strict", fetch: global.fetch, @@ -167,8 +171,8 @@ describe("OpenRouter Usage Accounting", () => { maxOutputTokens: 100, }); - // Verify that OpenRouter metadata is not included - expect(result.providerMetadata?.openrouter?.usage).toStrictEqual({ + // Verify that Hyperbolic metadata is not included + expect(result.providerMetadata?.hyperbolic?.usage).toStrictEqual({ promptTokens: 10, completionTokens: 20, totalTokens: 30, @@ -210,18 +214,18 @@ describe("OpenRouter Usage Accounting", () => { }; // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.openrouter.ai/chat/completions"]!.response = { + server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { type: "json-value", body: response, }; - const settings: OpenRouterChatSettings = { + const settings: HyperbolicChatSettings = { usage: { include: true }, }; - const model = new OpenRouterChatLanguageModel("test-model", settings, { - provider: "openrouter.chat", - url: () => "https://api.openrouter.ai/chat/completions", + const model = new HyperbolicChatLanguageModel("test-model", settings, { + provider: "hyperbolic.chat", + url: () => "https://api.hyperbolic.xyz/v1/chat/completions", headers: () => ({}), compatibility: "strict", fetch: global.fetch, @@ -237,7 +241,7 @@ describe("OpenRouter Usage Accounting", () => { maxOutputTokens: 100, }); - const usage = (result.providerMetadata?.openrouter as Record)?.usage; + const usage = (result.providerMetadata?.hyperbolic as Record)?.usage; // Should include basic token counts expect(usage).toMatchObject({ @@ -281,18 +285,18 @@ describe("OpenRouter Usage Accounting", () => { }; // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.openrouter.ai/chat/completions"]!.response = { + server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { type: "json-value", body: response, }; - const settings: OpenRouterChatSettings = { + const settings: HyperbolicChatSettings = { usage: { include: true }, }; - const model = new OpenRouterChatLanguageModel("test-model", settings, { - provider: "openrouter.chat", - url: () => "https://api.openrouter.ai/chat/completions", + const model = new HyperbolicChatLanguageModel("test-model", settings, { + provider: "hyperbolic.chat", + url: () => "https://api.hyperbolic.xyz/v1/chat/completions", headers: () => ({}), compatibility: "strict", fetch: global.fetch, @@ -308,7 +312,7 @@ describe("OpenRouter Usage Accounting", () => { maxOutputTokens: 100, }); - const usage = (result.providerMetadata?.openrouter as Record)?.usage; + const usage = (result.providerMetadata?.hyperbolic as Record)?.usage; // Should include promptTokensDetails since cached_tokens is present expect(usage).toHaveProperty("promptTokensDetails"); diff --git a/packages/ai-sdk-provider/src/types/hyperbolic-chat-completions-input.ts b/packages/ai-sdk-provider/src/types/hyperbolic-chat-completions-input.ts index 5ea2dbc..f2690cc 100644 --- a/packages/ai-sdk-provider/src/types/hyperbolic-chat-completions-input.ts +++ b/packages/ai-sdk-provider/src/types/hyperbolic-chat-completions-input.ts @@ -1,10 +1,14 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import type { FileAnnotation } from "../schemas/provider-metadata"; import type { ReasoningDetailUnion } from "../schemas/reasoning-details"; -// Type for OpenRouter Cache Control following Anthropic's pattern -export type OpenRouterCacheControl = { type: "ephemeral" }; +// Type for Hyperbolic Cache Control following Anthropic's pattern +export type HyperbolicCacheControl = { type: "ephemeral" }; -export type OpenRouterChatCompletionsInput = Array; +export type HyperbolicChatCompletionsInput = Array; export type ChatCompletionMessageParam = | ChatCompletionSystemMessageParam @@ -15,13 +19,13 @@ export type ChatCompletionMessageParam = export interface ChatCompletionSystemMessageParam { role: "system"; content: string; - cache_control?: OpenRouterCacheControl; + cache_control?: HyperbolicCacheControl; } export interface ChatCompletionUserMessageParam { role: "user"; content: string | Array; - cache_control?: OpenRouterCacheControl; + cache_control?: HyperbolicCacheControl; } export type ChatCompletionContentPart = @@ -37,7 +41,7 @@ export interface ChatCompletionContentPartFile { file_data?: string; file_id?: string; }; - cache_control?: OpenRouterCacheControl; + cache_control?: HyperbolicCacheControl; } export interface ChatCompletionContentPartImage { @@ -45,14 +49,14 @@ export interface ChatCompletionContentPartImage { image_url: { url: string; }; - cache_control?: OpenRouterCacheControl; + cache_control?: HyperbolicCacheControl; } export interface ChatCompletionContentPartText { type: "text"; text: string; reasoning?: string | null; - cache_control?: OpenRouterCacheControl; + cache_control?: HyperbolicCacheControl; } /** https://openrouter.ai/docs/guides/overview/multimodal/audio */ @@ -76,7 +80,7 @@ export interface ChatCompletionContentPartInputAudio { data: string; format: OpenRouterAudioFormat; }; - cache_control?: OpenRouterCacheControl; + cache_control?: HyperbolicCacheControl; } export interface ChatCompletionAssistantMessageParam { @@ -86,7 +90,7 @@ export interface ChatCompletionAssistantMessageParam { reasoning_details?: ReasoningDetailUnion[]; annotations?: FileAnnotation[]; tool_calls?: Array; - cache_control?: OpenRouterCacheControl; + cache_control?: HyperbolicCacheControl; } export interface ChatCompletionMessageToolCall { @@ -102,5 +106,5 @@ export interface ChatCompletionToolMessageParam { role: "tool"; content: string; tool_call_id: string; - cache_control?: OpenRouterCacheControl; + cache_control?: HyperbolicCacheControl; } diff --git a/packages/ai-sdk-provider/src/types/hyperbolic-chat-settings.ts b/packages/ai-sdk-provider/src/types/hyperbolic-chat-settings.ts index 9aff314..073ead4 100644 --- a/packages/ai-sdk-provider/src/types/hyperbolic-chat-settings.ts +++ b/packages/ai-sdk-provider/src/types/hyperbolic-chat-settings.ts @@ -1,11 +1,15 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import type * as models from "@openrouter/sdk/models"; import type { OpenRouterSharedSettings } from ".."; // https://api.hyperbolic.xyz/v1/models -export type OpenRouterChatModelId = string; +export type HyperbolicChatModelId = string; -export type OpenRouterChatSettings = { +export type HyperbolicChatSettings = { /** Modify the likelihood of specified tokens appearing in the completion. @@ -41,7 +45,7 @@ Whether to enable parallel function calling during tool use. Default to true. parallelToolCalls?: boolean; /** -A unique identifier representing your end-user, which can help OpenRouter to +A unique identifier representing your end-user, which can help Hyperbolic to monitor and detect abuse. Learn more. */ user?: string; diff --git a/packages/ai-sdk-provider/src/types/hyperbolic-completion-settings.ts b/packages/ai-sdk-provider/src/types/hyperbolic-completion-settings.ts index d08978d..e7c61b6 100644 --- a/packages/ai-sdk-provider/src/types/hyperbolic-completion-settings.ts +++ b/packages/ai-sdk-provider/src/types/hyperbolic-completion-settings.ts @@ -1,8 +1,12 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import type { OpenRouterSharedSettings } from "."; -export type OpenRouterCompletionModelId = string; +export type HyperbolicCompletionModelId = string; -export type OpenRouterCompletionSettings = { +export type HyperbolicCompletionSettings = { /** Modify the likelihood of specified tokens appearing in the completion. diff --git a/packages/ai-sdk-provider/src/types/index.ts b/packages/ai-sdk-provider/src/types/index.ts index 088fc0d..86e20fa 100644 --- a/packages/ai-sdk-provider/src/types/index.ts +++ b/packages/ai-sdk-provider/src/types/index.ts @@ -1,3 +1,7 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import type { LanguageModelV3, LanguageModelV3Prompt } from "@ai-sdk/provider"; export type { LanguageModelV3, LanguageModelV3Prompt }; @@ -24,7 +28,7 @@ export type OpenRouterProviderOptions = { /** * A unique identifier representing your end-user, which can - * help OpenRouter to monitor and detect abuse. + * help Hyperbolic to monitor and detect abuse. */ user?: string; }; @@ -53,7 +57,7 @@ export type OpenRouterSharedSettings = OpenRouterProviderOptions & { * Usage accounting response * @see https://openrouter.ai/docs/use-cases/usage-accounting */ -export type OpenRouterUsageAccounting = { +export type HyperbolicUsageAccounting = { promptTokens: number; promptTokensDetails?: { cachedTokens: number; diff --git a/packages/ai-sdk-provider/src/utils/map-finish-reason.ts b/packages/ai-sdk-provider/src/utils/map-finish-reason.ts index 3d18d79..e836b07 100644 --- a/packages/ai-sdk-provider/src/utils/map-finish-reason.ts +++ b/packages/ai-sdk-provider/src/utils/map-finish-reason.ts @@ -1,3 +1,7 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import type { LanguageModelV3FinishReason } from "@ai-sdk/provider"; type UnifiedFinishReason = "stop" | "length" | "content-filter" | "tool-calls" | "error" | "other"; diff --git a/packages/ai-sdk-provider/src/utils/remove-undefined.ts b/packages/ai-sdk-provider/src/utils/remove-undefined.ts index 4c0b391..9a54448 100644 --- a/packages/ai-sdk-provider/src/utils/remove-undefined.ts +++ b/packages/ai-sdk-provider/src/utils/remove-undefined.ts @@ -1,3 +1,7 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + /** * Removes entries from a record where the value is null or undefined. * @param record - The input object whose entries may be null or undefined. diff --git a/packages/ai-sdk-provider/src/utils/type-guards.ts b/packages/ai-sdk-provider/src/utils/type-guards.ts index 4a0f6e8..b3c52c6 100644 --- a/packages/ai-sdk-provider/src/utils/type-guards.ts +++ b/packages/ai-sdk-provider/src/utils/type-guards.ts @@ -1,3 +1,7 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + /** * Type guard to check if a value is defined and not null */ diff --git a/packages/ai-sdk-provider/src/utils/with-user-agent-suffix.ts b/packages/ai-sdk-provider/src/utils/with-user-agent-suffix.ts index 0a7df9d..962fa7d 100644 --- a/packages/ai-sdk-provider/src/utils/with-user-agent-suffix.ts +++ b/packages/ai-sdk-provider/src/utils/with-user-agent-suffix.ts @@ -1,3 +1,7 @@ +// Modified by Hyperbolic Labs, Inc. on 2026-01-23 +// Original work Copyright 2025 OpenRouter Inc. +// Licensed under the Apache License, Version 2.0 + import { removeUndefinedEntries } from "../utils/remove-undefined"; /** diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 0a5faa8..aa2c01d 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -71,12 +71,6 @@ importers: '@ai-sdk/provider-utils': specifier: ^4.0.9 version: 4.0.9(zod@4.3.6) - ai: - specifier: ^6.0.48 - version: 6.0.48(zod@4.3.6) - zod: - specifier: ^4.0.0 - version: 4.3.6 devDependencies: '@edge-runtime/vm': specifier: ^5.0.0 @@ -93,12 +87,24 @@ importers: '@hyperbolic/tsconfig': specifier: workspace:* version: link:../../tooling/typescript + '@openrouter/sdk': + specifier: ^0.1.27 + version: 0.1.27 + '@types/json-schema': + specifier: 7.0.15 + version: 7.0.15 + ai: + specifier: ^6.0.48 + version: 6.0.48(zod@4.3.6) eslint: specifier: 'catalog:' version: 9.19.0(jiti@2.4.2) handlebars: specifier: ^4.7.8 version: 4.7.8 + msw: + specifier: 2.12.4 + version: 2.12.4(@types/node@22.13.10)(typescript@5.9.3) prettier: specifier: 'catalog:' version: 3.4.2 @@ -111,8 +117,17 @@ importers: typescript: specifier: 'catalog:' version: 5.9.3 + vite-tsconfig-paths: + specifier: ^5.1.4 + version: 5.1.4(typescript@5.9.3)(vite@5.4.14(@types/node@22.13.10)(lightningcss@1.29.1)) + vitest: + specifier: 3.2.4 + version: 3.2.4(@edge-runtime/vm@5.0.0)(@types/node@22.13.10)(lightningcss@1.29.1)(msw@2.12.4(@types/node@22.13.10)(typescript@5.9.3)) + zod: + specifier: ^4.0.0 + version: 4.3.6 - packages/ai-sdk-provider-2: + packages/ai-sdk-provider-old: dependencies: '@ai-sdk/provider': specifier: ^3.0.5 @@ -120,6 +135,12 @@ importers: '@ai-sdk/provider-utils': specifier: ^4.0.9 version: 4.0.9(zod@4.3.6) + ai: + specifier: ^6.0.48 + version: 6.0.48(zod@4.3.6) + zod: + specifier: ^4.0.0 + version: 4.3.6 devDependencies: '@edge-runtime/vm': specifier: ^5.0.0 @@ -136,24 +157,12 @@ importers: '@hyperbolic/tsconfig': specifier: workspace:* version: link:../../tooling/typescript - '@openrouter/sdk': - specifier: ^0.1.27 - version: 0.1.27 - '@types/json-schema': - specifier: 7.0.15 - version: 7.0.15 - ai: - specifier: ^6.0.48 - version: 6.0.48(zod@4.3.6) eslint: specifier: 'catalog:' version: 9.19.0(jiti@2.4.2) handlebars: specifier: ^4.7.8 version: 4.7.8 - msw: - specifier: 2.12.4 - version: 2.12.4(@types/node@22.13.10)(typescript@5.9.3) prettier: specifier: 'catalog:' version: 3.4.2 @@ -166,15 +175,6 @@ importers: typescript: specifier: 'catalog:' version: 5.9.3 - vite-tsconfig-paths: - specifier: ^5.1.4 - version: 5.1.4(typescript@5.9.3)(vite@5.4.14(@types/node@22.13.10)(lightningcss@1.29.1)) - vitest: - specifier: 3.2.4 - version: 3.2.4(@edge-runtime/vm@5.0.0)(@types/node@22.13.10)(lightningcss@1.29.1)(msw@2.12.4(@types/node@22.13.10)(typescript@5.9.3)) - zod: - specifier: ^4.0.0 - version: 4.3.6 packages/api: devDependencies: @@ -4045,7 +4045,7 @@ snapshots: '@types/node': 20.5.1 chalk: 4.1.2 cosmiconfig: 8.3.6(typescript@5.9.3) - cosmiconfig-typescript-loader: 4.4.0(@types/node@20.5.1)(cosmiconfig@8.3.6(typescript@5.9.3))(ts-node@10.9.2(@types/node@22.13.10)(typescript@5.9.3))(typescript@5.9.3) + cosmiconfig-typescript-loader: 4.4.0(@types/node@20.5.1)(cosmiconfig@8.3.6(typescript@5.9.3))(ts-node@10.9.2(@types/node@20.5.1)(typescript@5.9.3))(typescript@5.9.3) lodash.isplainobject: 4.0.6 lodash.merge: 4.6.2 lodash.uniq: 4.5.0 @@ -4267,7 +4267,7 @@ snapshots: '@eslint/config-array@0.19.1': dependencies: '@eslint/object-schema': 2.1.5 - debug: 4.4.0 + debug: 4.4.3 minimatch: 3.1.2 transitivePeerDependencies: - supports-color @@ -4279,7 +4279,7 @@ snapshots: '@eslint/eslintrc@3.2.0': dependencies: ajv: 6.12.6 - debug: 4.4.0 + debug: 4.4.3 espree: 10.3.0 globals: 14.0.0 ignore: 5.3.1 @@ -5053,7 +5053,7 @@ snapshots: cookie@1.1.1: {} - cosmiconfig-typescript-loader@4.4.0(@types/node@20.5.1)(cosmiconfig@8.3.6(typescript@5.9.3))(ts-node@10.9.2(@types/node@22.13.10)(typescript@5.9.3))(typescript@5.9.3): + cosmiconfig-typescript-loader@4.4.0(@types/node@20.5.1)(cosmiconfig@8.3.6(typescript@5.9.3))(ts-node@10.9.2(@types/node@20.5.1)(typescript@5.9.3))(typescript@5.9.3): dependencies: '@types/node': 20.5.1 cosmiconfig: 8.3.6(typescript@5.9.3) @@ -5525,7 +5525,7 @@ snapshots: ajv: 6.12.6 chalk: 4.1.2 cross-spawn: 7.0.6 - debug: 4.4.0 + debug: 4.4.3 escape-string-regexp: 4.0.0 eslint-scope: 8.2.0 eslint-visitor-keys: 4.2.0 From 81b2889372f8ba761810770ef6eda67455bd6270 Mon Sep 17 00:00:00 2001 From: Connor Chevli Date: Fri, 23 Jan 2026 13:17:18 -0800 Subject: [PATCH 13/22] rename remaining things to hyperbolic conventions --- ...onvert-to-hyperbolic-chat-messages.test.ts | 56 +++++++-------- .../convert-to-hyperbolic-chat-messages.ts | 2 +- .../src/chat/file-parser-schema.test.ts | 6 +- .../src/chat/file-url-utils.ts | 16 ++--- .../ai-sdk-provider/src/chat/index.test.ts | 31 ++++---- packages/ai-sdk-provider/src/chat/index.ts | 70 +++++++++---------- .../src/chat/large-pdf-response.test.ts | 2 +- packages/ai-sdk-provider/src/chat/schemas.ts | 16 ++--- ...convert-to-hyperbolic-completion-prompt.ts | 2 +- .../src/completion/index.test.ts | 20 +++--- .../ai-sdk-provider/src/completion/index.ts | 32 ++++----- .../ai-sdk-provider/src/completion/schemas.ts | 4 +- packages/ai-sdk-provider/src/facade.ts | 4 +- .../src/image/hyperbolic-image-settings.ts | 2 +- .../src/schemas/error-response.test.ts | 8 +-- .../src/schemas/error-response.ts | 10 +-- .../src/schemas/provider-metadata.ts | 2 +- .../src/tests/provider-options.test.ts | 8 +-- .../src/tests/stream-usage-accounting.test.ts | 6 +- .../src/tests/usage-accounting.test.ts | 6 +- .../hyperbolic-chat-completions-input.ts | 7 +- .../src/types/hyperbolic-chat-settings.ts | 6 +- .../types/hyperbolic-completion-settings.ts | 4 +- packages/ai-sdk-provider/src/types/index.ts | 7 +- .../src/utils/map-finish-reason.ts | 2 +- 25 files changed, 159 insertions(+), 170 deletions(-) diff --git a/packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.test.ts b/packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.test.ts index a8c7b7e..1e8040d 100644 --- a/packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.test.ts +++ b/packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.test.ts @@ -3,12 +3,12 @@ // Licensed under the Apache License, Version 2.0 import { ReasoningDetailType } from "../schemas/reasoning-details"; -import { convertToOpenRouterChatMessages } from "./convert-to-hyperbolic-chat-messages"; +import { convertToHyperbolicChatMessages } from "./convert-to-hyperbolic-chat-messages"; import { MIME_TO_FORMAT } from "./file-url-utils"; describe("user messages", () => { it("should convert image Uint8Array", async () => { - const result = convertToOpenRouterChatMessages([ + const result = convertToHyperbolicChatMessages([ { role: "user", content: [ @@ -37,7 +37,7 @@ describe("user messages", () => { }); it("should convert image urls", async () => { - const result = convertToOpenRouterChatMessages([ + const result = convertToHyperbolicChatMessages([ { role: "user", content: [ @@ -66,7 +66,7 @@ describe("user messages", () => { }); it("should convert messages with image base64", async () => { - const result = convertToOpenRouterChatMessages([ + const result = convertToHyperbolicChatMessages([ { role: "user", content: [ @@ -95,7 +95,7 @@ describe("user messages", () => { }); it("should convert messages with only a text part to a string content", async () => { - const result = convertToOpenRouterChatMessages([ + const result = convertToHyperbolicChatMessages([ { role: "user", content: [{ type: "text", text: "Hello" }], @@ -108,7 +108,7 @@ describe("user messages", () => { it.each( Object.entries(MIME_TO_FORMAT).map(([mimeSubtype, format]) => [`audio/${mimeSubtype}`, format]), )("should convert %s to input_audio with %s format", (mediaType, expectedFormat) => { - const result = convertToOpenRouterChatMessages([ + const result = convertToHyperbolicChatMessages([ { role: "user", content: [ @@ -138,7 +138,7 @@ describe("user messages", () => { }); it("should convert audio base64 data URL to input_audio", async () => { - const result = convertToOpenRouterChatMessages([ + const result = convertToHyperbolicChatMessages([ { role: "user", content: [ @@ -168,7 +168,7 @@ describe("user messages", () => { }); it("should convert raw audio base64 string to input_audio", async () => { - const result = convertToOpenRouterChatMessages([ + const result = convertToHyperbolicChatMessages([ { role: "user", content: [ @@ -199,7 +199,7 @@ describe("user messages", () => { it("should throw error for audio URLs", async () => { expect(() => - convertToOpenRouterChatMessages([ + convertToHyperbolicChatMessages([ { role: "user", content: [ @@ -216,7 +216,7 @@ describe("user messages", () => { it("should throw error for unsupported audio formats", async () => { expect(() => - convertToOpenRouterChatMessages([ + convertToHyperbolicChatMessages([ { role: "user", content: [ @@ -234,7 +234,7 @@ describe("user messages", () => { describe("cache control", () => { it("should pass cache control from system message provider metadata", () => { - const result = convertToOpenRouterChatMessages([ + const result = convertToHyperbolicChatMessages([ { role: "system", content: "System prompt", @@ -256,7 +256,7 @@ describe("cache control", () => { }); it("should pass cache control from user message provider metadata (single text part)", () => { - const result = convertToOpenRouterChatMessages([ + const result = convertToHyperbolicChatMessages([ { role: "user", content: [{ type: "text", text: "Hello" }], @@ -283,7 +283,7 @@ describe("cache control", () => { }); it("should pass cache control from content part provider metadata (single text part)", () => { - const result = convertToOpenRouterChatMessages([ + const result = convertToHyperbolicChatMessages([ { role: "user", content: [ @@ -315,7 +315,7 @@ describe("cache control", () => { }); it("should pass cache control from user message provider metadata (multiple parts)", () => { - const result = convertToOpenRouterChatMessages([ + const result = convertToHyperbolicChatMessages([ { role: "user", content: [ @@ -354,7 +354,7 @@ describe("cache control", () => { }); it("should pass cache control from user message provider metadata without cache control (single text part)", () => { - const result = convertToOpenRouterChatMessages([ + const result = convertToHyperbolicChatMessages([ { role: "user", content: [{ type: "text", text: "Hello" }], @@ -370,7 +370,7 @@ describe("cache control", () => { }); it("should pass cache control to multiple image parts from user message provider metadata", () => { - const result = convertToOpenRouterChatMessages([ + const result = convertToHyperbolicChatMessages([ { role: "user", content: [ @@ -419,7 +419,7 @@ describe("cache control", () => { }); it("should pass cache control to file parts from user message provider metadata", () => { - const result = convertToOpenRouterChatMessages([ + const result = convertToHyperbolicChatMessages([ { role: "user", content: [ @@ -466,7 +466,7 @@ describe("cache control", () => { }); it("should handle mixed part-specific and message-level cache control for multiple parts", () => { - const result = convertToOpenRouterChatMessages([ + const result = convertToHyperbolicChatMessages([ { role: "user", content: [ @@ -533,7 +533,7 @@ describe("cache control", () => { }); it("should pass cache control from individual content part provider metadata", () => { - const result = convertToOpenRouterChatMessages([ + const result = convertToHyperbolicChatMessages([ { role: "user", content: [ @@ -574,7 +574,7 @@ describe("cache control", () => { }); it("should pass cache control from assistant message provider metadata", () => { - const result = convertToOpenRouterChatMessages([ + const result = convertToHyperbolicChatMessages([ { role: "assistant", content: [{ type: "text", text: "Assistant response" }], @@ -596,7 +596,7 @@ describe("cache control", () => { }); it("should pass cache control from tool message provider metadata", () => { - const result = convertToOpenRouterChatMessages([ + const result = convertToHyperbolicChatMessages([ { role: "tool", content: [ @@ -629,7 +629,7 @@ describe("cache control", () => { }); it("should support the alias cache_control field", () => { - const result = convertToOpenRouterChatMessages([ + const result = convertToHyperbolicChatMessages([ { role: "system", content: "System prompt", @@ -651,7 +651,7 @@ describe("cache control", () => { }); it("should support cache control on last message in content array", () => { - const result = convertToOpenRouterChatMessages([ + const result = convertToHyperbolicChatMessages([ { role: "system", content: "System prompt", @@ -691,7 +691,7 @@ describe("cache control", () => { }); it("should pass cache control to audio input parts from user message provider metadata", () => { - const result = convertToOpenRouterChatMessages([ + const result = convertToHyperbolicChatMessages([ { role: "user", content: [ @@ -735,7 +735,7 @@ describe("cache control", () => { describe("reasoning_details accumulation", () => { it("should accumulate reasoning_details from reasoning part providerOptions", () => { - const result = convertToOpenRouterChatMessages([ + const result = convertToHyperbolicChatMessages([ { role: "assistant", content: [ @@ -809,7 +809,7 @@ describe("reasoning_details accumulation", () => { }); it("should use preserved reasoning_details from message-level providerOptions when available", () => { - const result = convertToOpenRouterChatMessages([ + const result = convertToHyperbolicChatMessages([ { role: "assistant", content: [ @@ -860,7 +860,7 @@ describe("reasoning_details accumulation", () => { }); it("should not include reasoning_details when not present in providerOptions", () => { - const result = convertToOpenRouterChatMessages([ + const result = convertToHyperbolicChatMessages([ { role: "assistant", content: [ @@ -890,7 +890,7 @@ describe("reasoning_details accumulation", () => { }); it("should handle mixed reasoning parts with and without providerOptions", () => { - const result = convertToOpenRouterChatMessages([ + const result = convertToHyperbolicChatMessages([ { role: "assistant", content: [ diff --git a/packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.ts b/packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.ts index 6fbdbe8..c5367cd 100644 --- a/packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.ts +++ b/packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.ts @@ -35,7 +35,7 @@ function getCacheControl( anthropic?.cache_control) as HyperbolicCacheControl | undefined; } -export function convertToOpenRouterChatMessages( +export function convertToHyperbolicChatMessages( prompt: LanguageModelV3Prompt, ): HyperbolicChatCompletionsInput { const messages: HyperbolicChatCompletionsInput = []; diff --git a/packages/ai-sdk-provider/src/chat/file-parser-schema.test.ts b/packages/ai-sdk-provider/src/chat/file-parser-schema.test.ts index 19da1ec..ebbfa78 100644 --- a/packages/ai-sdk-provider/src/chat/file-parser-schema.test.ts +++ b/packages/ai-sdk-provider/src/chat/file-parser-schema.test.ts @@ -4,7 +4,7 @@ import { describe, expect, it } from "vitest"; -import { OpenRouterNonStreamChatCompletionResponseSchema } from "./schemas"; +import { HyperbolicNonStreamChatCompletionResponseSchema } from "./schemas"; describe("FileParser annotation schema", () => { it("should parse response with all real API fields", () => { @@ -51,7 +51,7 @@ describe("FileParser annotation schema", () => { }, }; - const result = OpenRouterNonStreamChatCompletionResponseSchema.parse(response); + const result = HyperbolicNonStreamChatCompletionResponseSchema.parse(response); expect(result).toBeDefined(); }); @@ -102,7 +102,7 @@ describe("FileParser annotation schema", () => { }, }; - const result = OpenRouterNonStreamChatCompletionResponseSchema.parse(response); + const result = HyperbolicNonStreamChatCompletionResponseSchema.parse(response); // Check that parsing succeeded expect(result).toBeDefined(); diff --git a/packages/ai-sdk-provider/src/chat/file-url-utils.ts b/packages/ai-sdk-provider/src/chat/file-url-utils.ts index 101a32c..714b3f5 100644 --- a/packages/ai-sdk-provider/src/chat/file-url-utils.ts +++ b/packages/ai-sdk-provider/src/chat/file-url-utils.ts @@ -5,8 +5,8 @@ import type { LanguageModelV3FilePart } from "@ai-sdk/provider"; import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils"; -import type { OpenRouterAudioFormat } from "../types/hyperbolic-chat-completions-input"; -import { OPENROUTER_AUDIO_FORMATS } from "../types/hyperbolic-chat-completions-input"; +import type { HyperbolicAudioFormat } from "../types/hyperbolic-chat-completions-input"; +import { HYPERBOLIC_AUDIO_FORMATS } from "../types/hyperbolic-chat-completions-input"; import { isUrl } from "./is-url"; export function getFileUrl({ @@ -49,7 +49,7 @@ export function getBase64FromDataUrl(dataUrl: string): string { } /** MIME type to format mapping for normalization */ -export const MIME_TO_FORMAT: Record = { +export const MIME_TO_FORMAT: Record = { // MP3 variants mpeg: "mp3", mp3: "mp3", @@ -104,7 +104,7 @@ export const MIME_TO_FORMAT: Record = { */ export function getInputAudioData(part: LanguageModelV3FilePart): { data: string; - format: OpenRouterAudioFormat; + format: HyperbolicAudioFormat; } { const fileData = getFileUrl({ part, @@ -124,8 +124,7 @@ export function getInputAudioData(part: LanguageModelV3FilePart): { `1. Download the audio file locally\n` + `2. Read it as a Buffer or Uint8Array\n` + `3. Pass it as the data parameter\n\n` + - `The AI SDK will automatically handle base64 encoding.\n\n` + - `Learn more: https://openrouter.ai/docs/features/multimodal/audio`, + `The AI SDK will automatically handle base64 encoding.`, ); } @@ -140,11 +139,10 @@ export function getInputAudioData(part: LanguageModelV3FilePart): { const format = MIME_TO_FORMAT[rawFormat]; if (format === undefined) { - const supportedList = OPENROUTER_AUDIO_FORMATS.join(", "); + const supportedList = HYPERBOLIC_AUDIO_FORMATS.join(", "); throw new Error( `Unsupported audio format: "${mediaType}"\n\n` + - `Hyperbolic supports the following audio formats: ${supportedList}\n\n` + - `Learn more: https://openrouter.ai/docs/features/multimodal/audio`, + `Hyperbolic supports the following audio formats: ${supportedList}`, ); } diff --git a/packages/ai-sdk-provider/src/chat/index.test.ts b/packages/ai-sdk-provider/src/chat/index.test.ts index 63ed42b..8be3f19 100644 --- a/packages/ai-sdk-provider/src/chat/index.test.ts +++ b/packages/ai-sdk-provider/src/chat/index.test.ts @@ -666,7 +666,7 @@ describe("doGenerate", () => { "custom-provider-header": "provider-header-value", "custom-request-header": "request-header-value", }); - expect(requestHeaders["user-agent"]).toContain("ai-sdk/openrouter/0.0.0-test"); + expect(requestHeaders["user-agent"]).toContain("ai-sdk/hyperbolic/0.0.0-test"); }); it("should pass responseFormat for JSON schema structured outputs", async () => { @@ -962,7 +962,7 @@ describe("doStream", () => { (chunk): chunk is Extract => chunk.type === "finish", ); - const openrouterUsage = ( + const hyperbolicUsage = ( finishChunk?.providerMetadata?.hyperbolic as { usage?: { cost?: number; @@ -970,7 +970,7 @@ describe("doStream", () => { }; } )?.usage; - expect(openrouterUsage?.costDetails).toStrictEqual({ + expect(hyperbolicUsage?.costDetails).toStrictEqual({ upstreamInferenceCost: 0.0036, }); }); @@ -998,7 +998,7 @@ describe("doStream", () => { (chunk): chunk is Extract => chunk.type === "finish", ); - const openrouterUsage = ( + const hyperbolicUsage = ( finishChunk?.providerMetadata?.hyperbolic as { usage?: { cost?: number; @@ -1006,10 +1006,10 @@ describe("doStream", () => { }; } )?.usage; - expect(openrouterUsage?.costDetails).toStrictEqual({ + expect(hyperbolicUsage?.costDetails).toStrictEqual({ upstreamInferenceCost: 0.0036, }); - expect(openrouterUsage?.cost).toBe(0.0042); + expect(hyperbolicUsage?.cost).toBe(0.0042); }); it("should prioritize reasoning_details over reasoning when both are present in streaming", async () => { @@ -1778,7 +1778,7 @@ describe("doStream", () => { type: "stream-chunks", chunks: [ `data: {"error":{"message": "The server had an error processing your request. Sorry about that! You can retry your request, or contact us through our ` + - `help center at help.openrouter.com if you keep seeing this error.","type":"server_error","param":null,"code":null}}\n\n`, + `help center.","type":"server_error","param":null,"code":null}}\n\n`, "data: [DONE]\n\n", ], }; @@ -1793,8 +1793,7 @@ describe("doStream", () => { error: { message: "The server had an error processing your request. Sorry about that! " + - "You can retry your request, or contact us through our help center at " + - "help.openrouter.com if you keep seeing this error.", + "You can retry your request, or contact us through our help center", type: "server_error", code: null, param: null, @@ -1907,7 +1906,7 @@ describe("doStream", () => { "custom-provider-header": "provider-header-value", "custom-request-header": "request-header-value", }); - expect(requestHeaders["user-agent"]).toContain("ai-sdk/openrouter/0.0.0-test"); + expect(requestHeaders["user-agent"]).toContain("ai-sdk/hyperbolic/0.0.0-test"); }); it("should pass extra body", async () => { @@ -2122,7 +2121,7 @@ describe("doStream", () => { expect(finishChunk).toBeDefined(); // Verify file annotations are included in providerMetadata - const openrouterMetadata = finishChunk?.providerMetadata?.hyperbolic as { + const hyperbolicMetadata = finishChunk?.providerMetadata?.hyperbolic as { annotations?: Array<{ type: "file"; file: { @@ -2133,7 +2132,7 @@ describe("doStream", () => { }>; }; - expect(openrouterMetadata?.annotations).toStrictEqual([ + expect(hyperbolicMetadata?.annotations).toStrictEqual([ { type: "file", file: { @@ -2189,7 +2188,7 @@ describe("doStream", () => { chunk.type === "finish", ); - const openrouterMetadata = finishChunk?.providerMetadata?.hyperbolic as { + const hyperbolicMetadata = finishChunk?.providerMetadata?.hyperbolic as { annotations?: Array<{ type: "file"; file: { @@ -2201,9 +2200,9 @@ describe("doStream", () => { }; // Both file annotations should be accumulated - expect(openrouterMetadata?.annotations).toHaveLength(2); - expect(openrouterMetadata?.annotations?.[0]?.file.hash).toBe("hash1"); - expect(openrouterMetadata?.annotations?.[1]?.file.hash).toBe("hash2"); + expect(hyperbolicMetadata?.annotations).toHaveLength(2); + expect(hyperbolicMetadata?.annotations?.[0]?.file.hash).toBe("hash1"); + expect(hyperbolicMetadata?.annotations?.[1]?.file.hash).toBe("hash2"); }); }); diff --git a/packages/ai-sdk-provider/src/chat/index.ts b/packages/ai-sdk-provider/src/chat/index.ts index d2117d1..dd3cb19 100644 --- a/packages/ai-sdk-provider/src/chat/index.ts +++ b/packages/ai-sdk-provider/src/chat/index.ts @@ -34,16 +34,16 @@ import type { HyperbolicChatSettings, } from "../types/hyperbolic-chat-settings"; import type { HyperbolicUsageAccounting } from "../types/index"; -import { openrouterFailedResponseHandler } from "../schemas/error-response"; +import { hyperbolicFailedResponseHandler } from "../schemas/error-response"; import { HyperbolicProviderMetadataSchema } from "../schemas/provider-metadata"; import { ReasoningDetailType } from "../schemas/reasoning-details"; -import { createFinishReason, mapOpenRouterFinishReason } from "../utils/map-finish-reason"; -import { convertToOpenRouterChatMessages } from "./convert-to-hyperbolic-chat-messages"; +import { createFinishReason, mapHyperbolicFinishReason } from "../utils/map-finish-reason"; +import { convertToHyperbolicChatMessages } from "./convert-to-hyperbolic-chat-messages"; import { getBase64FromDataUrl, getMediaType } from "./file-url-utils"; import { getChatCompletionToolChoice } from "./get-tool-choice"; import { - OpenRouterNonStreamChatCompletionResponseSchema, - OpenRouterStreamChatCompletionChunkSchema, + HyperbolicNonStreamChatCompletionResponseSchema, + HyperbolicStreamChatCompletionChunkSchema, } from "./schemas"; type HyperbolicChatConfig = { @@ -145,7 +145,7 @@ export class HyperbolicChatLanguageModel implements LanguageModelV3 { top_k: topK, // messages: - messages: convertToOpenRouterChatMessages(prompt), + messages: convertToHyperbolicChatMessages(prompt), // Hyperbolic specific settings: include_reasoning: this.settings.includeReasoning, @@ -207,11 +207,11 @@ export class HyperbolicChatLanguageModel implements LanguageModelV3 { }; }> { const providerOptions = options.providerOptions || {}; - const openrouterOptions = providerOptions.openrouter || {}; + const hyperbolicOptions = providerOptions.hyperbolic || {}; const args = { ...this.getArgs(options), - ...openrouterOptions, + ...hyperbolicOptions, }; const { value: responseValue, responseHeaders } = await postJsonToApi({ @@ -221,9 +221,9 @@ export class HyperbolicChatLanguageModel implements LanguageModelV3 { }), headers: combineHeaders(this.config.headers(), options.headers), body: args, - failedResponseHandler: openrouterFailedResponseHandler, + failedResponseHandler: hyperbolicFailedResponseHandler, successfulResponseHandler: createJsonResponseHandler( - OpenRouterNonStreamChatCompletionResponseSchema, + HyperbolicNonStreamChatCompletionResponseSchema, ), abortSignal: options.abortSignal, fetch: this.config.fetch, @@ -437,7 +437,7 @@ export class HyperbolicChatLanguageModel implements LanguageModelV3 { const effectiveFinishReason = shouldOverrideFinishReason ? createFinishReason("tool-calls", choice.finish_reason ?? undefined) - : mapOpenRouterFinishReason(choice.finish_reason); + : mapHyperbolicFinishReason(choice.finish_reason); return { content, @@ -497,11 +497,11 @@ export class HyperbolicChatLanguageModel implements LanguageModelV3 { }; }> { const providerOptions = options.providerOptions || {}; - const openrouterOptions = providerOptions.openrouter || {}; + const hyperbolicOptions = providerOptions.hyperbolic || {}; const args = { ...this.getArgs(options), - ...openrouterOptions, + ...hyperbolicOptions, }; const { value: response, responseHeaders } = await postJsonToApi({ @@ -524,9 +524,9 @@ export class HyperbolicChatLanguageModel implements LanguageModelV3 { } : undefined, }, - failedResponseHandler: openrouterFailedResponseHandler, + failedResponseHandler: hyperbolicFailedResponseHandler, successfulResponseHandler: createEventSourceResponseHandler( - OpenRouterStreamChatCompletionChunkSchema, + HyperbolicStreamChatCompletionChunkSchema, ), abortSignal: options.abortSignal, fetch: this.config.fetch, @@ -559,7 +559,7 @@ export class HyperbolicChatLanguageModel implements LanguageModelV3 { }; // Track provider-specific usage information - const openrouterUsage: Partial = {}; + const hyperbolicUsage: Partial = {}; // Track reasoning details to preserve for multi-turn conversations const accumulatedReasoningDetails: ReasoningDetailUnion[] = []; @@ -571,13 +571,13 @@ export class HyperbolicChatLanguageModel implements LanguageModelV3 { let reasoningStarted = false; let textId: string | undefined; let reasoningId: string | undefined; - let openrouterResponseId: string | undefined; + let hyperbolicResponseId: string | undefined; let provider: string | undefined; return { stream: response.pipeThrough( new TransformStream< - ParseResult>, + ParseResult>, LanguageModelV3StreamPart >({ transform(chunk, controller) { @@ -602,7 +602,7 @@ export class HyperbolicChatLanguageModel implements LanguageModelV3 { } if (value.id) { - openrouterResponseId = value.id; + hyperbolicResponseId = value.id; controller.enqueue({ type: "response-metadata", id: value.id, @@ -621,32 +621,32 @@ export class HyperbolicChatLanguageModel implements LanguageModelV3 { usage.outputTokens.total = value.usage.completion_tokens; // Collect Hyperbolic specific usage information - openrouterUsage.promptTokens = value.usage.prompt_tokens; + hyperbolicUsage.promptTokens = value.usage.prompt_tokens; if (value.usage.prompt_tokens_details) { const cachedInputTokens = value.usage.prompt_tokens_details.cached_tokens ?? 0; usage.inputTokens.cacheRead = cachedInputTokens; - openrouterUsage.promptTokensDetails = { + hyperbolicUsage.promptTokensDetails = { cachedTokens: cachedInputTokens, }; } - openrouterUsage.completionTokens = value.usage.completion_tokens; + hyperbolicUsage.completionTokens = value.usage.completion_tokens; if (value.usage.completion_tokens_details) { const reasoningTokens = value.usage.completion_tokens_details.reasoning_tokens ?? 0; usage.outputTokens.reasoning = reasoningTokens; - openrouterUsage.completionTokensDetails = { + hyperbolicUsage.completionTokensDetails = { reasoningTokens, }; } - openrouterUsage.cost = value.usage.cost; - openrouterUsage.totalTokens = value.usage.total_tokens; + hyperbolicUsage.cost = value.usage.cost; + hyperbolicUsage.totalTokens = value.usage.total_tokens; const upstreamInferenceCost = value.usage.cost_details?.upstream_inference_cost; if (upstreamInferenceCost != null) { - openrouterUsage.costDetails = { + hyperbolicUsage.costDetails = { upstreamInferenceCost, }; } @@ -655,7 +655,7 @@ export class HyperbolicChatLanguageModel implements LanguageModelV3 { const choice = value.choices[0]; if (choice?.finish_reason != null) { - finishReason = mapOpenRouterFinishReason(choice.finish_reason); + finishReason = mapHyperbolicFinishReason(choice.finish_reason); } if (choice?.delta == null) { @@ -669,7 +669,7 @@ export class HyperbolicChatLanguageModel implements LanguageModelV3 { providerMetadata?: SharedV3ProviderMetadata, ) => { if (!reasoningStarted) { - reasoningId = openrouterResponseId || generateId(); + reasoningId = hyperbolicResponseId || generateId(); controller.enqueue({ providerMetadata, type: "reasoning-start", @@ -759,7 +759,7 @@ export class HyperbolicChatLanguageModel implements LanguageModelV3 { } if (!textStarted) { - textId = openrouterResponseId || generateId(); + textId = hyperbolicResponseId || generateId(); controller.enqueue({ type: "text-start", id: textId, @@ -1008,28 +1008,28 @@ export class HyperbolicChatLanguageModel implements LanguageModelV3 { }); } - const openrouterMetadata: { + const hyperbolicMetadata: { usage: Partial; provider?: string; reasoning_details?: ReasoningDetailUnion[]; annotations?: FileAnnotation[]; } = { - usage: openrouterUsage, + usage: hyperbolicUsage, }; // Only include provider if it's actually set if (provider !== undefined) { - openrouterMetadata.provider = provider; + hyperbolicMetadata.provider = provider; } // Include accumulated reasoning_details if any were received if (accumulatedReasoningDetails.length > 0) { - openrouterMetadata.reasoning_details = accumulatedReasoningDetails; + hyperbolicMetadata.reasoning_details = accumulatedReasoningDetails; } // Include accumulated file annotations if any were received if (accumulatedFileAnnotations.length > 0) { - openrouterMetadata.annotations = accumulatedFileAnnotations; + hyperbolicMetadata.annotations = accumulatedFileAnnotations; } controller.enqueue({ @@ -1037,7 +1037,7 @@ export class HyperbolicChatLanguageModel implements LanguageModelV3 { finishReason, usage, providerMetadata: { - openrouter: openrouterMetadata, + hyperbolic: hyperbolicMetadata, }, }); }, diff --git a/packages/ai-sdk-provider/src/chat/large-pdf-response.test.ts b/packages/ai-sdk-provider/src/chat/large-pdf-response.test.ts index 7af4443..7cbbdda 100644 --- a/packages/ai-sdk-provider/src/chat/large-pdf-response.test.ts +++ b/packages/ai-sdk-provider/src/chat/large-pdf-response.test.ts @@ -24,7 +24,7 @@ const server = createTestServer({ describe("Large PDF Response Handling", () => { describe("doGenerate", () => { it("should handle HTTP 200 responses with error payloads (500 internal errors)", async () => { - // This is the actual response OpenRouter returns for large PDF failures + // This is the actual response Hyperbolic returns for large PDF failures // HTTP 200 status but contains error object instead of choices // eslint-disable-next-line @typescript-eslint/no-non-null-assertion server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { diff --git a/packages/ai-sdk-provider/src/chat/schemas.ts b/packages/ai-sdk-provider/src/chat/schemas.ts index 37e1744..9a032d1 100644 --- a/packages/ai-sdk-provider/src/chat/schemas.ts +++ b/packages/ai-sdk-provider/src/chat/schemas.ts @@ -4,11 +4,11 @@ import { z } from "zod/v4"; -import { OpenRouterErrorResponseSchema } from "../schemas/error-response"; +import { HyperbolicErrorResponseSchema } from "../schemas/error-response"; import { ImageResponseArraySchema } from "../schemas/image"; import { ReasoningDetailArraySchema } from "../schemas/reasoning-details"; -const OpenRouterChatCompletionBaseResponseSchema = z +const HyperbolicChatCompletionBaseResponseSchema = z .object({ id: z.string().optional(), model: z.string().optional(), @@ -44,9 +44,9 @@ const OpenRouterChatCompletionBaseResponseSchema = z .passthrough(); // limited version of the schema, focussed on what is needed for the implementation // this approach limits breakages when the API changes and increases efficiency -export const OpenRouterNonStreamChatCompletionResponseSchema = z.union([ +export const HyperbolicNonStreamChatCompletionResponseSchema = z.union([ // Success response with choices - OpenRouterChatCompletionBaseResponseSchema.extend({ + HyperbolicChatCompletionBaseResponseSchema.extend({ choices: z.array( z .object({ @@ -163,14 +163,14 @@ export const OpenRouterNonStreamChatCompletionResponseSchema = z.union([ ), }), // Error response (HTTP 200 with error payload) - OpenRouterErrorResponseSchema.extend({ + HyperbolicErrorResponseSchema.extend({ user_id: z.string().optional(), }), ]); // limited version of the schema, focussed on what is needed for the implementation // this approach limits breakages when the API changes and increases efficiency -export const OpenRouterStreamChatCompletionChunkSchema = z.union([ - OpenRouterChatCompletionBaseResponseSchema.extend({ +export const HyperbolicStreamChatCompletionChunkSchema = z.union([ + HyperbolicChatCompletionBaseResponseSchema.extend({ choices: z.array( z .object({ @@ -286,5 +286,5 @@ export const OpenRouterStreamChatCompletionChunkSchema = z.union([ .passthrough(), ), }), - OpenRouterErrorResponseSchema, + HyperbolicErrorResponseSchema, ]); diff --git a/packages/ai-sdk-provider/src/completion/convert-to-hyperbolic-completion-prompt.ts b/packages/ai-sdk-provider/src/completion/convert-to-hyperbolic-completion-prompt.ts index 672ec73..5c0d69e 100644 --- a/packages/ai-sdk-provider/src/completion/convert-to-hyperbolic-completion-prompt.ts +++ b/packages/ai-sdk-provider/src/completion/convert-to-hyperbolic-completion-prompt.ts @@ -12,7 +12,7 @@ import type { } from "@ai-sdk/provider"; import { InvalidPromptError, UnsupportedFunctionalityError } from "@ai-sdk/provider"; -export function convertToOpenRouterCompletionPrompt({ +export function convertToHyperbolicCompletionPrompt({ prompt, inputFormat, user = "user", diff --git a/packages/ai-sdk-provider/src/completion/index.test.ts b/packages/ai-sdk-provider/src/completion/index.test.ts index 420ae46..679b9c4 100644 --- a/packages/ai-sdk-provider/src/completion/index.test.ts +++ b/packages/ai-sdk-provider/src/completion/index.test.ts @@ -231,7 +231,7 @@ describe("doGenerate", () => { "custom-provider-header": "provider-header-value", "custom-request-header": "request-header-value", }); - expect(requestHeaders["user-agent"]).toContain("ai-sdk/openrouter/0.0.0-test"); + expect(requestHeaders["user-agent"]).toContain("ai-sdk/hyperbolic/0.0.0-test"); }); }); @@ -368,7 +368,7 @@ describe("doStream", () => { (element): element is Extract => element.type === "finish", ); - const openrouterUsage = ( + const hyperbolicUsage = ( finishChunk?.providerMetadata?.hyperbolic as { usage?: { cost?: number; @@ -376,7 +376,7 @@ describe("doStream", () => { }; } )?.usage; - expect(openrouterUsage?.costDetails).toStrictEqual({ + expect(hyperbolicUsage?.costDetails).toStrictEqual({ upstreamInferenceCost: 0.0036, }); }); @@ -404,7 +404,7 @@ describe("doStream", () => { (element): element is Extract => element.type === "finish", ); - const openrouterUsage = ( + const hyperbolicUsage = ( finishChunk?.providerMetadata?.hyperbolic as { usage?: { cost?: number; @@ -412,10 +412,10 @@ describe("doStream", () => { }; } )?.usage; - expect(openrouterUsage?.costDetails).toStrictEqual({ + expect(hyperbolicUsage?.costDetails).toStrictEqual({ upstreamInferenceCost: 0.0036, }); - expect(openrouterUsage?.cost).toBe(0.0025); + expect(hyperbolicUsage?.cost).toBe(0.0025); }); it("should handle error stream parts", async () => { @@ -423,8 +423,7 @@ describe("doStream", () => { server.urls["https://api.hyperbolic.xyz/v1/completions"]!.response = { type: "stream-chunks", chunks: [ - `data: {"error":{"message": "The server had an error processing your request. Sorry about that! You can retry your request, or contact us through our ` + - `help center at help.openrouter.com if you keep seeing this error.","type":"server_error","param":null,"code":null}}\n\n`, + `data: {"error":{"message": "The server had an error processing your request. Sorry about that! You can retry your request, or contact us through our help center`, "data: [DONE]\n\n", ], }; @@ -439,8 +438,7 @@ describe("doStream", () => { error: { message: "The server had an error processing your request. Sorry about that! " + - "You can retry your request, or contact us through our help center at " + - "help.openrouter.com if you keep seeing this error.", + "You can retry your request, or contact us through our help center.", type: "server_error", code: null, param: null, @@ -552,7 +550,7 @@ describe("doStream", () => { "custom-provider-header": "provider-header-value", "custom-request-header": "request-header-value", }); - expect(requestHeaders["user-agent"]).toContain("ai-sdk/openrouter/0.0.0-test"); + expect(requestHeaders["user-agent"]).toContain("ai-sdk/hyperbolic/0.0.0-test"); }); it("should pass extra body", async () => { diff --git a/packages/ai-sdk-provider/src/completion/index.ts b/packages/ai-sdk-provider/src/completion/index.ts index 161a855..f24223a 100644 --- a/packages/ai-sdk-provider/src/completion/index.ts +++ b/packages/ai-sdk-provider/src/completion/index.ts @@ -29,12 +29,12 @@ import type { HyperbolicCompletionModelId, HyperbolicCompletionSettings, } from "../types/hyperbolic-completion-settings"; -import { openrouterFailedResponseHandler } from "../schemas/error-response"; -import { createFinishReason, mapOpenRouterFinishReason } from "../utils/map-finish-reason"; -import { convertToOpenRouterCompletionPrompt } from "./convert-to-hyperbolic-completion-prompt"; +import { hyperbolicFailedResponseHandler } from "../schemas/error-response"; +import { createFinishReason, mapHyperbolicFinishReason } from "../utils/map-finish-reason"; +import { convertToHyperbolicCompletionPrompt } from "./convert-to-hyperbolic-completion-prompt"; import { HyperbolicCompletionChunkSchema } from "./schemas"; -type OpenRouterCompletionConfig = { +type HyperbolicCompletionConfig = { provider: string; compatibility: "strict" | "compatible"; headers: () => Record; @@ -45,7 +45,7 @@ type OpenRouterCompletionConfig = { export class HyperbolicCompletionLanguageModel implements LanguageModelV3 { readonly specificationVersion = "v3" as const; - readonly provider = "openrouter"; + readonly provider = "hyperbolic"; readonly modelId: HyperbolicCompletionModelId; readonly supportsImageUrls = true; readonly supportedUrls: Record = { @@ -56,12 +56,12 @@ export class HyperbolicCompletionLanguageModel implements LanguageModelV3 { readonly defaultObjectGenerationMode = undefined; readonly settings: HyperbolicCompletionSettings; - private readonly config: OpenRouterCompletionConfig; + private readonly config: HyperbolicCompletionConfig; constructor( modelId: HyperbolicCompletionModelId, settings: HyperbolicCompletionSettings, - config: OpenRouterCompletionConfig, + config: HyperbolicCompletionConfig, ) { this.modelId = modelId; this.settings = settings; @@ -82,7 +82,7 @@ export class HyperbolicCompletionLanguageModel implements LanguageModelV3 { tools, toolChoice, }: LanguageModelV3CallOptions) { - const { prompt: completionPrompt } = convertToOpenRouterCompletionPrompt({ + const { prompt: completionPrompt } = convertToHyperbolicCompletionPrompt({ prompt, inputFormat: "prompt", }); @@ -146,11 +146,11 @@ export class HyperbolicCompletionLanguageModel implements LanguageModelV3 { options: LanguageModelV3CallOptions, ): Promise>> { const providerOptions = options.providerOptions || {}; - const openrouterOptions = providerOptions.openrouter || {}; + const hyperbolicOptions = providerOptions.hyperbolic || {}; const args = { ...this.getArgs(options), - ...openrouterOptions, + ...hyperbolicOptions, }; const { value: response, responseHeaders } = await postJsonToApi({ @@ -160,7 +160,7 @@ export class HyperbolicCompletionLanguageModel implements LanguageModelV3 { }), headers: combineHeaders(this.config.headers(), options.headers), body: args, - failedResponseHandler: openrouterFailedResponseHandler, + failedResponseHandler: hyperbolicFailedResponseHandler, successfulResponseHandler: createJsonResponseHandler(HyperbolicCompletionChunkSchema), abortSignal: options.abortSignal, fetch: this.config.fetch, @@ -196,7 +196,7 @@ export class HyperbolicCompletionLanguageModel implements LanguageModelV3 { text: choice.text ?? "", }, ], - finishReason: mapOpenRouterFinishReason(choice.finish_reason), + finishReason: mapHyperbolicFinishReason(choice.finish_reason), usage: { inputTokens: { total: response.usage?.prompt_tokens ?? 0, @@ -221,11 +221,11 @@ export class HyperbolicCompletionLanguageModel implements LanguageModelV3 { options: LanguageModelV3CallOptions, ): Promise>> { const providerOptions = options.providerOptions || {}; - const openrouterOptions = providerOptions.openrouter || {}; + const hyperbolicOptions = providerOptions.hyperbolic || {}; const args = { ...this.getArgs(options), - ...openrouterOptions, + ...hyperbolicOptions, }; const { value: response, responseHeaders } = await postJsonToApi({ @@ -242,7 +242,7 @@ export class HyperbolicCompletionLanguageModel implements LanguageModelV3 { stream_options: this.config.compatibility === "strict" ? { include_usage: true } : undefined, }, - failedResponseHandler: openrouterFailedResponseHandler, + failedResponseHandler: hyperbolicFailedResponseHandler, successfulResponseHandler: createEventSourceResponseHandler(HyperbolicCompletionChunkSchema), abortSignal: options.abortSignal, fetch: this.config.fetch, @@ -326,7 +326,7 @@ export class HyperbolicCompletionLanguageModel implements LanguageModelV3 { const choice = value.choices[0]; if (choice?.finish_reason != null) { - finishReason = mapOpenRouterFinishReason(choice.finish_reason); + finishReason = mapHyperbolicFinishReason(choice.finish_reason); } if (choice?.text != null) { diff --git a/packages/ai-sdk-provider/src/completion/schemas.ts b/packages/ai-sdk-provider/src/completion/schemas.ts index 12ba2da..01d8e66 100644 --- a/packages/ai-sdk-provider/src/completion/schemas.ts +++ b/packages/ai-sdk-provider/src/completion/schemas.ts @@ -4,7 +4,7 @@ import { z } from "zod/v4"; -import { OpenRouterErrorResponseSchema } from "../schemas/error-response"; +import { HyperbolicErrorResponseSchema } from "../schemas/error-response"; import { ReasoningDetailArraySchema } from "../schemas/reasoning-details"; // limited version of the schema, focussed on what is needed for the implementation @@ -64,5 +64,5 @@ export const HyperbolicCompletionChunkSchema = z.union([ .nullish(), }) .passthrough(), - OpenRouterErrorResponseSchema, + HyperbolicErrorResponseSchema, ]); diff --git a/packages/ai-sdk-provider/src/facade.ts b/packages/ai-sdk-provider/src/facade.ts index 69bd412..bfa4f4b 100644 --- a/packages/ai-sdk-provider/src/facade.ts +++ b/packages/ai-sdk-provider/src/facade.ts @@ -28,7 +28,7 @@ The default prefix is `https://api.hyperbolic.xyz/v1`. /** API key that is being sent using the `Authorization` header. -It defaults to the `OPENROUTER_API_KEY` environment variable. +It defaults to the `HYPERBOLIC_API_KEY` environment variable. */ readonly apiKey?: string; @@ -59,7 +59,7 @@ Custom headers to include in the requests. headers: () => ({ Authorization: `Bearer ${loadApiKey({ apiKey: this.apiKey, - environmentVariableName: "OPENROUTER_API_KEY", + environmentVariableName: "HYPERBOLIC_API_KEY", description: "Hyperbolic", })}`, ...this.headers, diff --git a/packages/ai-sdk-provider/src/image/hyperbolic-image-settings.ts b/packages/ai-sdk-provider/src/image/hyperbolic-image-settings.ts index 1f4b3c7..830aee9 100644 --- a/packages/ai-sdk-provider/src/image/hyperbolic-image-settings.ts +++ b/packages/ai-sdk-provider/src/image/hyperbolic-image-settings.ts @@ -1,6 +1,6 @@ import type { GenerateImageResult } from "ai"; -import type { OpenRouterSharedSettings as HyperbolicSharedSettings } from "../types"; +import type { HyperbolicSharedSettings } from "../types"; export type HyperbolicImageModelId = string; diff --git a/packages/ai-sdk-provider/src/schemas/error-response.test.ts b/packages/ai-sdk-provider/src/schemas/error-response.test.ts index 7da8009..fd8e27b 100644 --- a/packages/ai-sdk-provider/src/schemas/error-response.test.ts +++ b/packages/ai-sdk-provider/src/schemas/error-response.test.ts @@ -2,9 +2,9 @@ // Original work Copyright 2025 OpenRouter Inc. // Licensed under the Apache License, Version 2.0 -import { OpenRouterErrorResponseSchema } from "./error-response"; +import { HyperbolicErrorResponseSchema } from "./error-response"; -describe("OpenRouterErrorResponseSchema", () => { +describe("HyperbolicErrorResponseSchema", () => { it("should be valid without a type, code, and param", () => { const errorWithoutTypeCodeAndParam = { error: { @@ -14,7 +14,7 @@ describe("OpenRouterErrorResponseSchema", () => { user_id: "example_1", }; - const result = OpenRouterErrorResponseSchema.parse(errorWithoutTypeCodeAndParam); + const result = HyperbolicErrorResponseSchema.parse(errorWithoutTypeCodeAndParam); expect(result).toEqual({ error: { @@ -39,7 +39,7 @@ describe("OpenRouterErrorResponseSchema", () => { }, }; - const result = OpenRouterErrorResponseSchema.parse(errorWithType); + const result = HyperbolicErrorResponseSchema.parse(errorWithType); expect(result).toEqual({ error: { diff --git a/packages/ai-sdk-provider/src/schemas/error-response.ts b/packages/ai-sdk-provider/src/schemas/error-response.ts index 61b5df8..d4f5cb2 100644 --- a/packages/ai-sdk-provider/src/schemas/error-response.ts +++ b/packages/ai-sdk-provider/src/schemas/error-response.ts @@ -8,7 +8,7 @@ import { z } from "zod/v4"; // Use SDK's ChatErrorError type but wrap in response schema // SDK type: { code: string | number | null; message: string; param?: string | null; type?: string | null } -export const OpenRouterErrorResponseSchema = z +export const HyperbolicErrorResponseSchema = z .object({ error: z .object({ @@ -23,9 +23,9 @@ export const OpenRouterErrorResponseSchema = z }) .passthrough(); -export type OpenRouterErrorData = z.infer; +export type HyperbolicErrorData = z.infer; -export const openrouterFailedResponseHandler = createJsonErrorResponseHandler({ - errorSchema: OpenRouterErrorResponseSchema, - errorToMessage: (data: OpenRouterErrorData) => data.error.message, +export const hyperbolicFailedResponseHandler = createJsonErrorResponseHandler({ + errorSchema: HyperbolicErrorResponseSchema, + errorToMessage: (data: HyperbolicErrorData) => data.error.message, }); diff --git a/packages/ai-sdk-provider/src/schemas/provider-metadata.ts b/packages/ai-sdk-provider/src/schemas/provider-metadata.ts index 73372db..84bb789 100644 --- a/packages/ai-sdk-provider/src/schemas/provider-metadata.ts +++ b/packages/ai-sdk-provider/src/schemas/provider-metadata.ts @@ -70,7 +70,7 @@ export const HyperbolicProviderMetadataSchema = z }) .catchall(z.any()); -export type OpenRouterProviderMetadata = z.infer; +export type HyperbolicProviderMetadata = z.infer; /** * Schema for parsing provider options that may contain reasoning_details and annotations diff --git a/packages/ai-sdk-provider/src/tests/provider-options.test.ts b/packages/ai-sdk-provider/src/tests/provider-options.test.ts index c2a7f9a..9ca9ce2 100644 --- a/packages/ai-sdk-provider/src/tests/provider-options.test.ts +++ b/packages/ai-sdk-provider/src/tests/provider-options.test.ts @@ -28,17 +28,17 @@ describe("providerOptions", () => { vi.clearAllMocks(); }); - it("should set providerOptions openrouter to extra body", async () => { - const openrouter = createHyperbolic({ + it("should set providerOptions hyperbolic to extra body", async () => { + const hyperbolic = createHyperbolic({ apiKey: "test", }); - const model = openrouter("anthropic/claude-3.7-sonnet"); + const model = hyperbolic("anthropic/claude-3.7-sonnet"); await streamText({ model: model, messages: TEST_MESSAGES, providerOptions: { - openrouter: { + hyperbolic: { reasoning: { max_tokens: 1000, }, diff --git a/packages/ai-sdk-provider/src/tests/stream-usage-accounting.test.ts b/packages/ai-sdk-provider/src/tests/stream-usage-accounting.test.ts index 2156c13..c3b9ffc 100644 --- a/packages/ai-sdk-provider/src/tests/stream-usage-accounting.test.ts +++ b/packages/ai-sdk-provider/src/tests/stream-usage-accounting.test.ts @@ -120,10 +120,10 @@ describe("Hyperbolic Streaming Usage Accounting", () => { // Verify metadata is included expect(finishChunk?.providerMetadata).toBeDefined(); - const openrouterData = finishChunk?.providerMetadata?.hyperbolic; - expect(openrouterData).toBeDefined(); + const hyperbolicData = finishChunk?.providerMetadata?.hyperbolic; + expect(hyperbolicData).toBeDefined(); - const usage = openrouterData?.usage; + const usage = hyperbolicData?.usage; expect(usage).toMatchObject({ promptTokens: 10, completionTokens: 20, diff --git a/packages/ai-sdk-provider/src/tests/usage-accounting.test.ts b/packages/ai-sdk-provider/src/tests/usage-accounting.test.ts index b0eb8ef..793e548 100644 --- a/packages/ai-sdk-provider/src/tests/usage-accounting.test.ts +++ b/packages/ai-sdk-provider/src/tests/usage-accounting.test.ts @@ -123,10 +123,10 @@ describe("Hyperbolic Usage Accounting", () => { // Check for Hyperbolic usage data expect(providerData?.hyperbolic).toBeDefined(); - const openrouterData = providerData?.hyperbolic as Record; - expect(openrouterData.usage).toBeDefined(); + const hyperbolicData = providerData?.hyperbolic as Record; + expect(hyperbolicData.usage).toBeDefined(); - const usage = openrouterData.usage; + const usage = hyperbolicData.usage; expect(usage).toMatchObject({ promptTokens: 10, completionTokens: 20, diff --git a/packages/ai-sdk-provider/src/types/hyperbolic-chat-completions-input.ts b/packages/ai-sdk-provider/src/types/hyperbolic-chat-completions-input.ts index f2690cc..693d895 100644 --- a/packages/ai-sdk-provider/src/types/hyperbolic-chat-completions-input.ts +++ b/packages/ai-sdk-provider/src/types/hyperbolic-chat-completions-input.ts @@ -59,8 +59,7 @@ export interface ChatCompletionContentPartText { cache_control?: HyperbolicCacheControl; } -/** https://openrouter.ai/docs/guides/overview/multimodal/audio */ -export const OPENROUTER_AUDIO_FORMATS = [ +export const HYPERBOLIC_AUDIO_FORMATS = [ "wav", "mp3", "aiff", @@ -72,13 +71,13 @@ export const OPENROUTER_AUDIO_FORMATS = [ "pcm24", ] as const; -export type OpenRouterAudioFormat = (typeof OPENROUTER_AUDIO_FORMATS)[number]; +export type HyperbolicAudioFormat = (typeof HYPERBOLIC_AUDIO_FORMATS)[number]; export interface ChatCompletionContentPartInputAudio { type: "input_audio"; input_audio: { data: string; - format: OpenRouterAudioFormat; + format: HyperbolicAudioFormat; }; cache_control?: HyperbolicCacheControl; } diff --git a/packages/ai-sdk-provider/src/types/hyperbolic-chat-settings.ts b/packages/ai-sdk-provider/src/types/hyperbolic-chat-settings.ts index 073ead4..46aae13 100644 --- a/packages/ai-sdk-provider/src/types/hyperbolic-chat-settings.ts +++ b/packages/ai-sdk-provider/src/types/hyperbolic-chat-settings.ts @@ -4,7 +4,7 @@ import type * as models from "@openrouter/sdk/models"; -import type { OpenRouterSharedSettings } from ".."; +import type { HyperbolicSharedSettings } from ".."; // https://api.hyperbolic.xyz/v1/models export type HyperbolicChatModelId = string; @@ -89,7 +89,6 @@ monitor and detect abuse. Learn more. * - "native": Use provider's built-in web search * - "exa": Use Exa's search API * - undefined: Native if supported, otherwise Exa - * @see https://openrouter.ai/docs/features/web-search */ engine?: models.Engine; }; @@ -97,7 +96,6 @@ monitor and detect abuse. Learn more. /** * Debug options for troubleshooting API requests. * Only works with streaming requests. - * @see https://openrouter.ai/docs/api-reference/debugging */ debug?: { /** @@ -160,4 +158,4 @@ monitor and detect abuse. Learn more. */ zdr?: boolean; }; -} & OpenRouterSharedSettings; +} & HyperbolicSharedSettings; diff --git a/packages/ai-sdk-provider/src/types/hyperbolic-completion-settings.ts b/packages/ai-sdk-provider/src/types/hyperbolic-completion-settings.ts index e7c61b6..e8c1193 100644 --- a/packages/ai-sdk-provider/src/types/hyperbolic-completion-settings.ts +++ b/packages/ai-sdk-provider/src/types/hyperbolic-completion-settings.ts @@ -2,7 +2,7 @@ // Original work Copyright 2025 OpenRouter Inc. // Licensed under the Apache License, Version 2.0 -import type { OpenRouterSharedSettings } from "."; +import type { HyperbolicSharedSettings } from "."; export type HyperbolicCompletionModelId = string; @@ -40,4 +40,4 @@ tokens that were generated. The suffix that comes after a completion of inserted text. */ suffix?: string; -} & OpenRouterSharedSettings; +} & HyperbolicSharedSettings; diff --git a/packages/ai-sdk-provider/src/types/index.ts b/packages/ai-sdk-provider/src/types/index.ts index 86e20fa..8344498 100644 --- a/packages/ai-sdk-provider/src/types/index.ts +++ b/packages/ai-sdk-provider/src/types/index.ts @@ -6,11 +6,10 @@ import type { LanguageModelV3, LanguageModelV3Prompt } from "@ai-sdk/provider"; export type { LanguageModelV3, LanguageModelV3Prompt }; -export type OpenRouterProviderOptions = { +export type HyperbolicProviderOptions = { models?: string[]; /** - * https://openrouter.ai/docs/use-cases/reasoning-tokens * One of `max_tokens` or `effort` is required. * If `exclude` is true, reasoning will be removed from the response. Default is false. */ @@ -33,7 +32,7 @@ export type OpenRouterProviderOptions = { user?: string; }; -export type OpenRouterSharedSettings = OpenRouterProviderOptions & { +export type HyperbolicSharedSettings = HyperbolicProviderOptions & { /** * @deprecated use `reasoning` instead */ @@ -43,7 +42,6 @@ export type OpenRouterSharedSettings = OpenRouterProviderOptions & { /** * Enable usage accounting to get detailed token usage information. - * https://openrouter.ai/docs/use-cases/usage-accounting */ usage?: { /** @@ -55,7 +53,6 @@ export type OpenRouterSharedSettings = OpenRouterProviderOptions & { /** * Usage accounting response - * @see https://openrouter.ai/docs/use-cases/usage-accounting */ export type HyperbolicUsageAccounting = { promptTokens: number; diff --git a/packages/ai-sdk-provider/src/utils/map-finish-reason.ts b/packages/ai-sdk-provider/src/utils/map-finish-reason.ts index e836b07..1600de4 100644 --- a/packages/ai-sdk-provider/src/utils/map-finish-reason.ts +++ b/packages/ai-sdk-provider/src/utils/map-finish-reason.ts @@ -22,7 +22,7 @@ function mapToUnified(finishReason: string | null | undefined): UnifiedFinishRea } } -export function mapOpenRouterFinishReason( +export function mapHyperbolicFinishReason( finishReason: string | null | undefined, ): LanguageModelV3FinishReason { return { From 911dd3500c818f7ea4dafe4dcf1e6aae949ea1d4 Mon Sep 17 00:00:00 2001 From: Connor Chevli Date: Fri, 23 Jan 2026 13:55:48 -0800 Subject: [PATCH 14/22] update model list --- .../src/__generated__/models.gen.ts | 50 +++++++++++-------- .../src/scripts/templates/models.ts.hbs | 6 +-- 2 files changed, 31 insertions(+), 25 deletions(-) diff --git a/packages/ai-sdk-provider/src/__generated__/models.gen.ts b/packages/ai-sdk-provider/src/__generated__/models.gen.ts index 08a1ae7..06103ec 100644 --- a/packages/ai-sdk-provider/src/__generated__/models.gen.ts +++ b/packages/ai-sdk-provider/src/__generated__/models.gen.ts @@ -4,38 +4,44 @@ const _models = [ "Qwen/Qwen2.5-72B-Instruct", - "Qwen/Qwen2.5-VL-72B-Instruct", - "meta-llama/Meta-Llama-3-70B-Instruct", - "meta-llama/Meta-Llama-3.1-70B-Instruct", - "meta-llama/Meta-Llama-3.1-405B-FP8", - "Qwen/Qwen2.5-VL-7B-Instruct", - "meta-llama/Meta-Llama-3.1-405B-Instruct", - "Qwen/QwQ-32B", "deepseek-ai/DeepSeek-V3", - "Qwen/QwQ-32B-Preview", + "deepseek-ai/DeepSeek-V3-0324", "meta-llama/Llama-3.3-70B-Instruct", - "NousResearch/Hermes-3-Llama-3.1-70B", - "meta-llama/Meta-Llama-3.1-405B", + "Qwen/Qwen2.5-Coder-32B-Instruct", "meta-llama/Llama-3.2-3B-Instruct", - "FLUX.1-dev", - "mistralai/Pixtral-12B-2409", - "StableDiffusion", + "meta-llama/Meta-Llama-3.1-405B-Instruct", + "meta-llama/Meta-Llama-3.1-70B-Instruct", "meta-llama/Meta-Llama-3.1-8B-Instruct", - "Qwen/Qwen2.5-Coder-32B-Instruct", - "TTS", + "meta-llama/Meta-Llama-3.1-405B", "deepseek-ai/DeepSeek-R1", + "deepseek-ai/DeepSeek-R1-0528", + "Qwen/QwQ-32B", + "Qwen/Qwen3-235B-A22B", + "Qwen/Qwen3-235B-A22B-Instruct-2507", + "Qwen/Qwen3-Coder-480B-A35B-Instruct", + "openai/gpt-oss-120b-turbo", + "openai/gpt-oss-120b", + "openai/gpt-oss-20b", + "Qwen/Qwen3-Next-80B-A3B-Instruct", + "Qwen/Qwen3-Next-80B-A3B-Thinking", + "StableDiffusion", + "FLUX.1-dev", + "TTS", + "Qwen/Qwen2.5-VL-72B-Instruct", + "Qwen/Qwen2.5-VL-7B-Instruct", + "mistralai/Pixtral-12B-2409", ] as const; -const _imageModels = [] as const; +const _imageModels = [ +] as const; -const _chatModels = [] as const; +const _chatModels = [ +] as const; -const _completionModels = [] as const; +const _completionModels = [ +] as const; export type HyperbolicImageModelId = (typeof _imageModels)[number] | string; export type HyperbolicChatModelId = (typeof _chatModels)[number] | string; export type HyperbolicCompletionModelId = (typeof _completionModels)[number] | string; -export type HyperbolicModelId = - | HyperbolicImageModelId - | HyperbolicChatModelId - | HyperbolicCompletionModelId; +export type HyperbolicModelId = HyperbolicImageModelId | HyperbolicChatModelId | HyperbolicCompletionModelId; diff --git a/packages/ai-sdk-provider/src/scripts/templates/models.ts.hbs b/packages/ai-sdk-provider/src/scripts/templates/models.ts.hbs index 4eefd94..47daf9c 100644 --- a/packages/ai-sdk-provider/src/scripts/templates/models.ts.hbs +++ b/packages/ai-sdk-provider/src/scripts/templates/models.ts.hbs @@ -26,7 +26,7 @@ const _completionModels = [ {{/each}} ] as const; -export type HyperbolicImageModelId = (typeof _imageModels)[number] | (string & {}); -export type HyperbolicChatModelId = (typeof _chatModels)[number] | (string & {}); -export type HyperbolicCompletionModelId = (typeof _completionModels)[number] | (string & {}); +export type HyperbolicImageModelId = (typeof _imageModels)[number] | string; +export type HyperbolicChatModelId = (typeof _chatModels)[number] | string; +export type HyperbolicCompletionModelId = (typeof _completionModels)[number] | string; export type HyperbolicModelId = HyperbolicImageModelId | HyperbolicChatModelId | HyperbolicCompletionModelId; From 8f246875108a3b1033ddbe8ae145c15d1f4a3be5 Mon Sep 17 00:00:00 2001 From: Connor Chevli Date: Fri, 23 Jan 2026 14:02:27 -0800 Subject: [PATCH 15/22] cleanup models script --- .../src/__generated__/models.gen.ts | 44 +++++++++++++++---- .../src/scripts/templates/models.ts.hbs | 6 +-- .../src/scripts/update-models-list.ts | 15 +++++-- 3 files changed, 50 insertions(+), 15 deletions(-) diff --git a/packages/ai-sdk-provider/src/__generated__/models.gen.ts b/packages/ai-sdk-provider/src/__generated__/models.gen.ts index 06103ec..56c5081 100644 --- a/packages/ai-sdk-provider/src/__generated__/models.gen.ts +++ b/packages/ai-sdk-provider/src/__generated__/models.gen.ts @@ -3,6 +3,17 @@ // This file is auto-generated by the pnpm codegen:update-models script. Do not edit manually. const _models = [ +] as const; + +const _imageModels = [ + "StableDiffusion", + "FLUX.1-dev", + "Qwen/Qwen2.5-VL-72B-Instruct", + "Qwen/Qwen2.5-VL-7B-Instruct", + "mistralai/Pixtral-12B-2409", +] as const; + +const _chatModels = [ "Qwen/Qwen2.5-72B-Instruct", "deepseek-ai/DeepSeek-V3", "deepseek-ai/DeepSeek-V3-0324", @@ -24,21 +35,36 @@ const _models = [ "openai/gpt-oss-20b", "Qwen/Qwen3-Next-80B-A3B-Instruct", "Qwen/Qwen3-Next-80B-A3B-Thinking", - "StableDiffusion", - "FLUX.1-dev", - "TTS", "Qwen/Qwen2.5-VL-72B-Instruct", "Qwen/Qwen2.5-VL-7B-Instruct", "mistralai/Pixtral-12B-2409", ] as const; -const _imageModels = [ -] as const; - -const _chatModels = [ -] as const; - const _completionModels = [ + "Qwen/Qwen2.5-72B-Instruct", + "deepseek-ai/DeepSeek-V3", + "deepseek-ai/DeepSeek-V3-0324", + "meta-llama/Llama-3.3-70B-Instruct", + "Qwen/Qwen2.5-Coder-32B-Instruct", + "meta-llama/Llama-3.2-3B-Instruct", + "meta-llama/Meta-Llama-3.1-405B-Instruct", + "meta-llama/Meta-Llama-3.1-70B-Instruct", + "meta-llama/Meta-Llama-3.1-8B-Instruct", + "meta-llama/Meta-Llama-3.1-405B", + "deepseek-ai/DeepSeek-R1", + "deepseek-ai/DeepSeek-R1-0528", + "Qwen/QwQ-32B", + "Qwen/Qwen3-235B-A22B", + "Qwen/Qwen3-235B-A22B-Instruct-2507", + "Qwen/Qwen3-Coder-480B-A35B-Instruct", + "openai/gpt-oss-120b-turbo", + "openai/gpt-oss-120b", + "openai/gpt-oss-20b", + "Qwen/Qwen3-Next-80B-A3B-Instruct", + "Qwen/Qwen3-Next-80B-A3B-Thinking", + "Qwen/Qwen2.5-VL-72B-Instruct", + "Qwen/Qwen2.5-VL-7B-Instruct", + "mistralai/Pixtral-12B-2409", ] as const; export type HyperbolicImageModelId = (typeof _imageModels)[number] | string; diff --git a/packages/ai-sdk-provider/src/scripts/templates/models.ts.hbs b/packages/ai-sdk-provider/src/scripts/templates/models.ts.hbs index 47daf9c..3f61c98 100644 --- a/packages/ai-sdk-provider/src/scripts/templates/models.ts.hbs +++ b/packages/ai-sdk-provider/src/scripts/templates/models.ts.hbs @@ -9,19 +9,19 @@ const _models = [ ] as const; const _imageModels = [ -{{#each imageModelId}} +{{#each imageModelIds}} "{{this}}", {{/each}} ] as const; const _chatModels = [ -{{#each chatModelId}} +{{#each chatModelIds}} "{{this}}", {{/each}} ] as const; const _completionModels = [ -{{#each completionModelId}} +{{#each completionModelIds}} "{{this}}", {{/each}} ] as const; diff --git a/packages/ai-sdk-provider/src/scripts/update-models-list.ts b/packages/ai-sdk-provider/src/scripts/update-models-list.ts index b57ebd4..af5998e 100644 --- a/packages/ai-sdk-provider/src/scripts/update-models-list.ts +++ b/packages/ai-sdk-provider/src/scripts/update-models-list.ts @@ -19,15 +19,24 @@ const main = async () => { data: { data }, } = await showModelsV1ModelsGet({ client: hyperbolicClient, throwOnError: true }); - const models = data as { id: string; [key: string]: unknown }[]; - const modelIds = models.map((model) => model.id); + const models = data as { + id: string; + supports_chat: boolean; + supports_image_input: boolean; + [key: string]: unknown; + }[]; + const imageModelIds = models + .filter((model) => model.supports_image_input) + .map((model) => model.id); + const chatModelIds = models.filter((model) => model.supports_chat).map((model) => model.id); + const completionModelIds = chatModelIds; const __dirname = path.dirname(fileURLToPath(import.meta.url)); const templatePath = path.join(__dirname, "templates", "models.ts.hbs"); const templateContent = readFileSync(templatePath, "utf-8"); const template = Handlebars.compile(templateContent); - const output = template({ modelId: modelIds }); + const output = template({ imageModelIds, chatModelIds, completionModelIds }); writeFileSync(new URL("../__generated__/models.gen.ts", import.meta.url), output); }; From d6559c8ebb2378a09a3ac9894347870a26062355 Mon Sep 17 00:00:00 2001 From: Connor Chevli Date: Fri, 23 Jan 2026 14:07:33 -0800 Subject: [PATCH 16/22] more cleanup --- packages/ai-sdk-provider/src/__generated__/models.gen.ts | 6 +++--- .../ai-sdk-provider/src/image/hyperbolic-image-settings.ts | 2 +- .../ai-sdk-provider/src/scripts/templates/models.ts.hbs | 6 +++--- .../ai-sdk-provider/src/types/hyperbolic-chat-settings.ts | 2 +- .../src/types/hyperbolic-completion-settings.ts | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/packages/ai-sdk-provider/src/__generated__/models.gen.ts b/packages/ai-sdk-provider/src/__generated__/models.gen.ts index 56c5081..ad31532 100644 --- a/packages/ai-sdk-provider/src/__generated__/models.gen.ts +++ b/packages/ai-sdk-provider/src/__generated__/models.gen.ts @@ -67,7 +67,7 @@ const _completionModels = [ "mistralai/Pixtral-12B-2409", ] as const; -export type HyperbolicImageModelId = (typeof _imageModels)[number] | string; -export type HyperbolicChatModelId = (typeof _chatModels)[number] | string; -export type HyperbolicCompletionModelId = (typeof _completionModels)[number] | string; +export type HyperbolicImageModelId = (typeof _imageModels)[number] | (string & {}); +export type HyperbolicChatModelId = (typeof _chatModels)[number] | (string & {}); +export type HyperbolicCompletionModelId = (typeof _completionModels)[number] | (string & {}); export type HyperbolicModelId = HyperbolicImageModelId | HyperbolicChatModelId | HyperbolicCompletionModelId; diff --git a/packages/ai-sdk-provider/src/image/hyperbolic-image-settings.ts b/packages/ai-sdk-provider/src/image/hyperbolic-image-settings.ts index 830aee9..1692e45 100644 --- a/packages/ai-sdk-provider/src/image/hyperbolic-image-settings.ts +++ b/packages/ai-sdk-provider/src/image/hyperbolic-image-settings.ts @@ -2,7 +2,7 @@ import type { GenerateImageResult } from "ai"; import type { HyperbolicSharedSettings } from "../types"; -export type HyperbolicImageModelId = string; +export type { HyperbolicImageModelId } from "../__generated__/models.gen"; export type HyperbolicImageSettings = { /** diff --git a/packages/ai-sdk-provider/src/scripts/templates/models.ts.hbs b/packages/ai-sdk-provider/src/scripts/templates/models.ts.hbs index 3f61c98..26ff915 100644 --- a/packages/ai-sdk-provider/src/scripts/templates/models.ts.hbs +++ b/packages/ai-sdk-provider/src/scripts/templates/models.ts.hbs @@ -26,7 +26,7 @@ const _completionModels = [ {{/each}} ] as const; -export type HyperbolicImageModelId = (typeof _imageModels)[number] | string; -export type HyperbolicChatModelId = (typeof _chatModels)[number] | string; -export type HyperbolicCompletionModelId = (typeof _completionModels)[number] | string; +export type HyperbolicImageModelId = (typeof _imageModels)[number] | (string & {}); +export type HyperbolicChatModelId = (typeof _chatModels)[number] | (string & {}); +export type HyperbolicCompletionModelId = (typeof _completionModels)[number] | (string & {}); export type HyperbolicModelId = HyperbolicImageModelId | HyperbolicChatModelId | HyperbolicCompletionModelId; diff --git a/packages/ai-sdk-provider/src/types/hyperbolic-chat-settings.ts b/packages/ai-sdk-provider/src/types/hyperbolic-chat-settings.ts index 46aae13..aa1676e 100644 --- a/packages/ai-sdk-provider/src/types/hyperbolic-chat-settings.ts +++ b/packages/ai-sdk-provider/src/types/hyperbolic-chat-settings.ts @@ -7,7 +7,7 @@ import type * as models from "@openrouter/sdk/models"; import type { HyperbolicSharedSettings } from ".."; // https://api.hyperbolic.xyz/v1/models -export type HyperbolicChatModelId = string; +export type { HyperbolicChatModelId } from "../__generated__/models.gen"; export type HyperbolicChatSettings = { /** diff --git a/packages/ai-sdk-provider/src/types/hyperbolic-completion-settings.ts b/packages/ai-sdk-provider/src/types/hyperbolic-completion-settings.ts index e8c1193..f2c75b1 100644 --- a/packages/ai-sdk-provider/src/types/hyperbolic-completion-settings.ts +++ b/packages/ai-sdk-provider/src/types/hyperbolic-completion-settings.ts @@ -4,7 +4,7 @@ import type { HyperbolicSharedSettings } from "."; -export type HyperbolicCompletionModelId = string; +export type { HyperbolicCompletionModelId } from "../__generated__/models.gen"; export type HyperbolicCompletionSettings = { /** From dfd4945293797de827fb71f1bc59280d4d679cc2 Mon Sep 17 00:00:00 2001 From: Connor Chevli Date: Fri, 23 Jan 2026 15:41:34 -0800 Subject: [PATCH 17/22] consolidate error logic --- packages/ai-sdk-provider/src/image/index.ts | 2 +- packages/ai-sdk-provider/src/index.ts | 1 + .../src/utils/hyperbolic-error.ts | 49 ------------------- 3 files changed, 2 insertions(+), 50 deletions(-) delete mode 100644 packages/ai-sdk-provider/src/utils/hyperbolic-error.ts diff --git a/packages/ai-sdk-provider/src/image/index.ts b/packages/ai-sdk-provider/src/image/index.ts index 17d57ce..a04cdc8 100644 --- a/packages/ai-sdk-provider/src/image/index.ts +++ b/packages/ai-sdk-provider/src/image/index.ts @@ -8,7 +8,7 @@ import type { HyperbolicImageProviderResponseMetadata, HyperbolicImageSettings, } from "./hyperbolic-image-settings"; -import { hyperbolicFailedResponseHandler } from "../utils/hyperbolic-error"; +import { hyperbolicFailedResponseHandler } from "../schemas/error-response"; type HyperbolicImageModelConfig = { provider: string; diff --git a/packages/ai-sdk-provider/src/index.ts b/packages/ai-sdk-provider/src/index.ts index 9fa3755..505c903 100644 --- a/packages/ai-sdk-provider/src/index.ts +++ b/packages/ai-sdk-provider/src/index.ts @@ -1,3 +1,4 @@ export * from "./facade"; export * from "./provider"; export * from "./types"; +export * from "./schemas/error-response"; diff --git a/packages/ai-sdk-provider/src/utils/hyperbolic-error.ts b/packages/ai-sdk-provider/src/utils/hyperbolic-error.ts deleted file mode 100644 index 60eed7e..0000000 --- a/packages/ai-sdk-provider/src/utils/hyperbolic-error.ts +++ /dev/null @@ -1,49 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { TypeValidationError } from "ai"; -import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils"; -import { JSONParseError } from "ai"; -import { z } from "zod"; - -export const HyperbolicErrorResponseSchema = z.object({ - object: z.literal("error"), - message: z.string(), - type: z.string(), - param: z.any().nullable(), - code: z.coerce.number().nullable(), -}); - -// eslint-disable-next-line @typescript-eslint/no-explicit-any -export const isHyperbolicError = (data: any): data is HyperbolicErrorData => { - return "object" in data && data.object === "error"; -}; - -export type HyperbolicErrorData = z.infer; - -export const hyperbolicFailedResponseHandler = createJsonErrorResponseHandler({ - errorSchema: HyperbolicErrorResponseSchema, - errorToMessage: (data) => data.message, -}); - -/** - * Error messages from the API are sometimes an ugly combo of text and JSON in a single chunk. Extract data from error message if it contains JSON - */ -export const tryParsingHyperbolicError = (error: JSONParseError | TypeValidationError) => { - if (!JSONParseError.isInstance(error)) { - return undefined; - } - - const jsonMatch = error.text.match(/\{.*\}/); // Match between brackets - if (jsonMatch) { - try { - const parsedErrorJson = JSON.parse(jsonMatch[0]); - if (parsedErrorJson.message) { - return HyperbolicErrorResponseSchema.parse(parsedErrorJson); - } - } catch { - return undefined; - } - } -}; From e1192c1ef182bf875a7b7be9b6b9a6ac9102cfda Mon Sep 17 00:00:00 2001 From: Connor Chevli Date: Fri, 23 Jan 2026 16:34:11 -0800 Subject: [PATCH 18/22] remove all chat and completion logic - we now prefer openrouter package directly --- packages/ai-sdk-provider/LICENSE | 1 - packages/ai-sdk-provider/README.md | 52 +- packages/ai-sdk-provider/package.json | 1 - ...onvert-to-hyperbolic-chat-messages.test.ts | 948 ------- .../convert-to-hyperbolic-chat-messages.ts | 299 --- .../ai-sdk-provider/src/chat/errors.test.ts | 94 - .../src/chat/file-parser-schema.test.ts | 115 - .../src/chat/file-url-utils.ts | 150 -- .../src/chat/get-tool-choice.ts | 46 - .../ai-sdk-provider/src/chat/index.test.ts | 2278 ----------------- packages/ai-sdk-provider/src/chat/index.ts | 1051 -------- packages/ai-sdk-provider/src/chat/is-url.ts | 19 - .../src/chat/large-pdf-response.test.ts | 105 - .../src/chat/payload-comparison.test.ts | 160 -- packages/ai-sdk-provider/src/chat/schemas.ts | 290 --- ...convert-to-hyperbolic-completion-prompt.ts | 155 -- .../src/completion/index.test.ts | 581 ----- .../ai-sdk-provider/src/completion/index.ts | 360 --- .../ai-sdk-provider/src/completion/schemas.ts | 68 - packages/ai-sdk-provider/src/facade.ts | 91 - packages/ai-sdk-provider/src/image/index.ts | 1 - packages/ai-sdk-provider/src/index.ts | 2 - .../ai-sdk-provider/src/internal/index.ts | 5 - packages/ai-sdk-provider/src/provider.ts | 113 +- .../src/schemas/error-response.test.ts | 4 - .../src/schemas/error-response.ts | 11 +- .../ai-sdk-provider/src/schemas/format.ts | 15 - packages/ai-sdk-provider/src/schemas/image.ts | 4 - .../src/schemas/provider-metadata.ts | 87 - .../src/schemas/reasoning-details.ts | 89 - .../src/scripts/update-models-list.ts | 4 - .../src/test-utils/test-server.ts | 150 -- .../src/tests/provider-options.test.ts | 63 - .../src/tests/stream-usage-accounting.test.ts | 177 -- .../src/tests/usage-accounting.test.ts | 327 --- .../hyperbolic-chat-completions-input.ts | 109 - .../src/types/hyperbolic-chat-settings.ts | 161 -- .../types/hyperbolic-completion-settings.ts | 43 - packages/ai-sdk-provider/src/types/index.ts | 19 - .../src/utils/map-finish-reason.ts | 39 - .../src/utils/remove-undefined.ts | 4 - .../ai-sdk-provider/src/utils/type-guards.ts | 10 - .../src/utils/with-user-agent-suffix.ts | 4 - 43 files changed, 21 insertions(+), 8284 deletions(-) delete mode 100644 packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.test.ts delete mode 100644 packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.ts delete mode 100644 packages/ai-sdk-provider/src/chat/errors.test.ts delete mode 100644 packages/ai-sdk-provider/src/chat/file-parser-schema.test.ts delete mode 100644 packages/ai-sdk-provider/src/chat/file-url-utils.ts delete mode 100644 packages/ai-sdk-provider/src/chat/get-tool-choice.ts delete mode 100644 packages/ai-sdk-provider/src/chat/index.test.ts delete mode 100644 packages/ai-sdk-provider/src/chat/index.ts delete mode 100644 packages/ai-sdk-provider/src/chat/is-url.ts delete mode 100644 packages/ai-sdk-provider/src/chat/large-pdf-response.test.ts delete mode 100644 packages/ai-sdk-provider/src/chat/payload-comparison.test.ts delete mode 100644 packages/ai-sdk-provider/src/chat/schemas.ts delete mode 100644 packages/ai-sdk-provider/src/completion/convert-to-hyperbolic-completion-prompt.ts delete mode 100644 packages/ai-sdk-provider/src/completion/index.test.ts delete mode 100644 packages/ai-sdk-provider/src/completion/index.ts delete mode 100644 packages/ai-sdk-provider/src/completion/schemas.ts delete mode 100644 packages/ai-sdk-provider/src/facade.ts delete mode 100644 packages/ai-sdk-provider/src/schemas/format.ts delete mode 100644 packages/ai-sdk-provider/src/schemas/provider-metadata.ts delete mode 100644 packages/ai-sdk-provider/src/schemas/reasoning-details.ts delete mode 100644 packages/ai-sdk-provider/src/test-utils/test-server.ts delete mode 100644 packages/ai-sdk-provider/src/tests/provider-options.test.ts delete mode 100644 packages/ai-sdk-provider/src/tests/stream-usage-accounting.test.ts delete mode 100644 packages/ai-sdk-provider/src/tests/usage-accounting.test.ts delete mode 100644 packages/ai-sdk-provider/src/types/hyperbolic-chat-completions-input.ts delete mode 100644 packages/ai-sdk-provider/src/types/hyperbolic-chat-settings.ts delete mode 100644 packages/ai-sdk-provider/src/types/hyperbolic-completion-settings.ts delete mode 100644 packages/ai-sdk-provider/src/utils/map-finish-reason.ts delete mode 100644 packages/ai-sdk-provider/src/utils/type-guards.ts diff --git a/packages/ai-sdk-provider/LICENSE b/packages/ai-sdk-provider/LICENSE index 1bceb99..5bca256 100644 --- a/packages/ai-sdk-provider/LICENSE +++ b/packages/ai-sdk-provider/LICENSE @@ -186,7 +186,6 @@ APPENDIX: How to apply the Apache License to your work. same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2025 OpenRouter Inc, Copyright 2025 Hyperbolic Labs Inc, Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/packages/ai-sdk-provider/README.md b/packages/ai-sdk-provider/README.md index 45b5825..5c26533 100644 --- a/packages/ai-sdk-provider/README.md +++ b/packages/ai-sdk-provider/README.md @@ -1,8 +1,6 @@ # Hyperbolic Provider for Vercel AI SDK -The [Hyperbolic](https://hyperbolic.xyz/) provider for the [Vercel AI SDK](https://sdk.vercel.ai/docs) gives access to any model found at . - -This is based on the [OpenRouter](https://openrouter.ai/) provider for the Vercel AI SDK, with a number of changes to support the Hyperbolic API and add image generation support. +The [Hyperbolic](https://hyperbolic.xyz/) provider for the [Vercel AI SDK](https://sdk.vercel.ai/docs) gives access to image models found at . For chat and completion models, use the [@openrouter/ai-sdk-provider](https://www.npmjs.com/package/@openrouter/ai-sdk-provider) package instead. ## Setup @@ -36,50 +34,20 @@ const hyperbolic = createHyperbolic({ apiKey: process.env.HYPERBOLIC_API_KEY, // Found in settings after logging in at https://app.hyperbolic.ai }); -const { text } = await generateText({ - model: hyperbolic.chat("deepseek-ai/DeepSeek-R1"), - prompt: "Write a vegetarian lasagna recipe for 4 people.", -}); -``` - -## Supported models - -This list is not a definitive list of models supported by Hyperbolic, as it constantly changes as we add new models (and deprecate old ones) to our system. -You can find the latest list of models supported by Hyperbolic [here](https://app.hyperbolic.ai/models). - -## Using Models - -### Language Models - -```ts -const { text } = await generateText({ - model: hyperbolic.chat("deepseek-ai/DeepSeek-R1"), - prompt: "Write a vegetarian lasagna recipe for 4 people.", -}); - -const { text } = await generateText({ - model: hyperbolic.completion("deepseek-ai/DeepSeek-R1"), - prompt: "The capital of France is", -}); -``` - -### Image Generation Models - -```ts -import { experimental_generateImage as generateImage } from "ai"; - -// Text to Image -const { images } = await generateImage({ - model: hyperbolic.image("SDXL1.0-base"), - prompt: "A beautiful sunset over a calm ocean", - size: "1024x1024", +const result = await generateImage({ + model: hyperbolic.image("FLUX.1-dev"), + prompt: "An image of a man riding a horse in SF.", + size: `1020x1020`, providerOptions: { hyperbolic: { cfgScale: 5, steps: 30, - negativePrompt: "low quality, blurry, distorted", - enableRefiner: false, } satisfies HyperbolicImageProviderOptions, }, }); ``` + +## Supported models + +This list is not a definitive list of models supported by Hyperbolic, as it constantly changes as we add new models (and deprecate old ones) to our system. +You can find the latest list of models supported by Hyperbolic [here](https://app.hyperbolic.ai/models). diff --git a/packages/ai-sdk-provider/package.json b/packages/ai-sdk-provider/package.json index 213e59b..f9a31a9 100644 --- a/packages/ai-sdk-provider/package.json +++ b/packages/ai-sdk-provider/package.json @@ -54,7 +54,6 @@ "@hyperbolic/eslint-config": "workspace:*", "@hyperbolic/prettier-config": "workspace:*", "@hyperbolic/tsconfig": "workspace:*", - "@openrouter/sdk": "^0.1.27", "@types/json-schema": "7.0.15", "ai": "^6.0.48", "eslint": "catalog:", diff --git a/packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.test.ts b/packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.test.ts deleted file mode 100644 index 1e8040d..0000000 --- a/packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.test.ts +++ /dev/null @@ -1,948 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import { ReasoningDetailType } from "../schemas/reasoning-details"; -import { convertToHyperbolicChatMessages } from "./convert-to-hyperbolic-chat-messages"; -import { MIME_TO_FORMAT } from "./file-url-utils"; - -describe("user messages", () => { - it("should convert image Uint8Array", async () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [ - { type: "text", text: "Hello" }, - { - type: "file", - data: new Uint8Array([0, 1, 2, 3]), - mediaType: "image/png", - }, - ], - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: [ - { type: "text", text: "Hello" }, - { - type: "image_url", - image_url: { url: "data:image/png;base64,AAECAw==" }, - }, - ], - }, - ]); - }); - - it("should convert image urls", async () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [ - { type: "text", text: "Hello" }, - { - type: "file", - data: "https://example.com/image.png", - mediaType: "image/png", - }, - ], - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: [ - { type: "text", text: "Hello" }, - { - type: "image_url", - image_url: { url: "https://example.com/image.png" }, - }, - ], - }, - ]); - }); - - it("should convert messages with image base64", async () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [ - { type: "text", text: "Hello" }, - { - type: "file", - data: "data:image/png;base64,AAECAw==", - mediaType: "image/png", - }, - ], - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: [ - { type: "text", text: "Hello" }, - { - type: "image_url", - image_url: { url: "data:image/png;base64,AAECAw==" }, - }, - ], - }, - ]); - }); - - it("should convert messages with only a text part to a string content", async () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [{ type: "text", text: "Hello" }], - }, - ]); - - expect(result).toEqual([{ role: "user", content: "Hello" }]); - }); - - it.each( - Object.entries(MIME_TO_FORMAT).map(([mimeSubtype, format]) => [`audio/${mimeSubtype}`, format]), - )("should convert %s to input_audio with %s format", (mediaType, expectedFormat) => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [ - { - type: "file", - data: new Uint8Array([0, 1, 2, 3]), - mediaType, - }, - ], - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: [ - { - type: "input_audio", - input_audio: { - data: "AAECAw==", - format: expectedFormat, - }, - }, - ], - }, - ]); - }); - - it("should convert audio base64 data URL to input_audio", async () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [ - { - type: "file", - data: "data:audio/mpeg;base64,AAECAw==", - mediaType: "audio/mpeg", - }, - ], - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: [ - { - type: "input_audio", - input_audio: { - data: "AAECAw==", - format: "mp3", - }, - }, - ], - }, - ]); - }); - - it("should convert raw audio base64 string to input_audio", async () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [ - { - type: "file", - data: "AAECAw==", - mediaType: "audio/mpeg", - }, - ], - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: [ - { - type: "input_audio", - input_audio: { - data: "AAECAw==", - format: "mp3", - }, - }, - ], - }, - ]); - }); - - it("should throw error for audio URLs", async () => { - expect(() => - convertToHyperbolicChatMessages([ - { - role: "user", - content: [ - { - type: "file", - data: "https://example.com/audio.mp3", - mediaType: "audio/mpeg", - }, - ], - }, - ]), - ).toThrow(/Audio files cannot be provided as URLs/); - }); - - it("should throw error for unsupported audio formats", async () => { - expect(() => - convertToHyperbolicChatMessages([ - { - role: "user", - content: [ - { - type: "file", - data: new Uint8Array([0, 1, 2, 3]), - mediaType: "audio/webm", - }, - ], - }, - ]), - ).toThrow(/Unsupported audio format: "audio\/webm"/); - }); -}); - -describe("cache control", () => { - it("should pass cache control from system message provider metadata", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "system", - content: "System prompt", - providerOptions: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "system", - content: "System prompt", - cache_control: { type: "ephemeral" }, - }, - ]); - }); - - it("should pass cache control from user message provider metadata (single text part)", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [{ type: "text", text: "Hello" }], - providerOptions: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: [ - { - type: "text", - text: "Hello", - cache_control: { type: "ephemeral" }, - }, - ], - }, - ]); - }); - - it("should pass cache control from content part provider metadata (single text part)", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [ - { - type: "text", - text: "Hello", - providerOptions: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - ], - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: [ - { - type: "text", - text: "Hello", - cache_control: { type: "ephemeral" }, - }, - ], - }, - ]); - }); - - it("should pass cache control from user message provider metadata (multiple parts)", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [ - { type: "text", text: "Hello" }, - { - type: "file", - data: new Uint8Array([0, 1, 2, 3]), - mediaType: "image/png", - }, - ], - providerOptions: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: [ - { - type: "text", - text: "Hello", - cache_control: { type: "ephemeral" }, - }, - { - type: "image_url", - image_url: { url: "data:image/png;base64,AAECAw==" }, - cache_control: { type: "ephemeral" }, - }, - ], - }, - ]); - }); - - it("should pass cache control from user message provider metadata without cache control (single text part)", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [{ type: "text", text: "Hello" }], - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: "Hello", - }, - ]); - }); - - it("should pass cache control to multiple image parts from user message provider metadata", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [ - { type: "text", text: "Hello" }, - { - type: "file", - data: new Uint8Array([0, 1, 2, 3]), - mediaType: "image/png", - }, - { - type: "file", - data: new Uint8Array([4, 5, 6, 7]), - mediaType: "image/jpeg", - }, - ], - providerOptions: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: [ - { - type: "text", - text: "Hello", - cache_control: { type: "ephemeral" }, - }, - { - type: "image_url", - image_url: { url: "data:image/png;base64,AAECAw==" }, - cache_control: { type: "ephemeral" }, - }, - { - type: "image_url", - image_url: { url: "data:image/jpeg;base64,BAUGBw==" }, - cache_control: { type: "ephemeral" }, - }, - ], - }, - ]); - }); - - it("should pass cache control to file parts from user message provider metadata", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [ - { type: "text", text: "Hello" }, - { - type: "file", - data: "ZmlsZSBjb250ZW50", - mediaType: "text/plain", - providerOptions: { - hyperbolic: { - filename: "file.txt", - }, - }, - }, - ], - providerOptions: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: [ - { - type: "text", - text: "Hello", - cache_control: { type: "ephemeral" }, - }, - { - type: "file", - file: { - filename: "file.txt", - file_data: "data:text/plain;base64,ZmlsZSBjb250ZW50", - }, - cache_control: { type: "ephemeral" }, - }, - ], - }, - ]); - }); - - it("should handle mixed part-specific and message-level cache control for multiple parts", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [ - { - type: "text", - text: "Hello", - // No part-specific provider metadata - }, - { - type: "file", - data: new Uint8Array([0, 1, 2, 3]), - mediaType: "image/png", - providerOptions: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - { - type: "file", - data: "ZmlsZSBjb250ZW50", - mediaType: "text/plain", - providerOptions: { - hyperbolic: { - filename: "file.txt", - }, - }, - // No part-specific provider metadata - }, - ], - providerOptions: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: [ - { - type: "text", - text: "Hello", - cache_control: { type: "ephemeral" }, - }, - { - type: "image_url", - image_url: { url: "data:image/png;base64,AAECAw==" }, - cache_control: { type: "ephemeral" }, - }, - { - type: "file", - file: { - filename: "file.txt", - file_data: "data:text/plain;base64,ZmlsZSBjb250ZW50", - }, - cache_control: { type: "ephemeral" }, - }, - ], - }, - ]); - }); - - it("should pass cache control from individual content part provider metadata", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [ - { - type: "text", - text: "Hello", - providerOptions: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - { - type: "file", - data: new Uint8Array([0, 1, 2, 3]), - mediaType: "image/png", - }, - ], - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: [ - { - type: "text", - text: "Hello", - cache_control: { type: "ephemeral" }, - }, - { - type: "image_url", - image_url: { url: "data:image/png;base64,AAECAw==" }, - }, - ], - }, - ]); - }); - - it("should pass cache control from assistant message provider metadata", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "assistant", - content: [{ type: "text", text: "Assistant response" }], - providerOptions: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "assistant", - content: "Assistant response", - cache_control: { type: "ephemeral" }, - }, - ]); - }); - - it("should pass cache control from tool message provider metadata", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "tool", - content: [ - { - type: "tool-result", - toolCallId: "call-123", - toolName: "calculator", - output: { - type: "json", - value: { answer: 42 }, - }, - }, - ], - providerOptions: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "tool", - tool_call_id: "call-123", - content: JSON.stringify({ answer: 42 }), - cache_control: { type: "ephemeral" }, - }, - ]); - }); - - it("should support the alias cache_control field", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "system", - content: "System prompt", - providerOptions: { - anthropic: { - cache_control: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "system", - content: "System prompt", - cache_control: { type: "ephemeral" }, - }, - ]); - }); - - it("should support cache control on last message in content array", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "system", - content: "System prompt", - }, - { - role: "user", - content: [ - { type: "text", text: "User prompt" }, - { - type: "text", - text: "User prompt 2", - providerOptions: { - anthropic: { cacheControl: { type: "ephemeral" } }, - }, - }, - ], - }, - ]); - - expect(result).toEqual([ - { - role: "system", - content: "System prompt", - }, - { - role: "user", - content: [ - { type: "text", text: "User prompt" }, - { - type: "text", - text: "User prompt 2", - cache_control: { type: "ephemeral" }, - }, - ], - }, - ]); - }); - - it("should pass cache control to audio input parts from user message provider metadata", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [ - { type: "text", text: "Listen to this" }, - { - type: "file", - data: new Uint8Array([0, 1, 2, 3]), - mediaType: "audio/mpeg", - }, - ], - providerOptions: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: [ - { - type: "text", - text: "Listen to this", - cache_control: { type: "ephemeral" }, - }, - { - type: "input_audio", - input_audio: { - data: "AAECAw==", - format: "mp3", - }, - cache_control: { type: "ephemeral" }, - }, - ], - }, - ]); - }); -}); - -describe("reasoning_details accumulation", () => { - it("should accumulate reasoning_details from reasoning part providerOptions", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "assistant", - content: [ - { - type: "reasoning", - text: "First reasoning chunk", - providerOptions: { - hyperbolic: { - reasoning_details: [ - { - type: ReasoningDetailType.Text, - text: "First reasoning chunk", - }, - ], - }, - }, - }, - { - type: "reasoning", - text: "Second reasoning chunk", - providerOptions: { - hyperbolic: { - reasoning_details: [ - { - type: ReasoningDetailType.Text, - text: "Second reasoning chunk", - }, - ], - }, - }, - }, - { - type: "text", - text: "Final response", - }, - ], - providerOptions: { - hyperbolic: { - reasoning_details: [ - { - type: ReasoningDetailType.Text, - text: "First reasoning chunk", - }, - { - type: ReasoningDetailType.Text, - text: "Second reasoning chunk", - }, - ], - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "assistant", - content: "Final response", - reasoning: "First reasoning chunkSecond reasoning chunk", - reasoning_details: [ - { - type: ReasoningDetailType.Text, - text: "First reasoning chunk", - }, - { - type: ReasoningDetailType.Text, - text: "Second reasoning chunk", - }, - ], - }, - ]); - }); - - it("should use preserved reasoning_details from message-level providerOptions when available", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "assistant", - content: [ - { - type: "reasoning", - text: "Reasoning text", - // No providerOptions on part - }, - { - type: "text", - text: "Response", - }, - ], - providerOptions: { - hyperbolic: { - reasoning_details: [ - { - type: ReasoningDetailType.Text, - text: "Preserved reasoning detail", - }, - { - type: ReasoningDetailType.Summary, - summary: "Preserved summary", - }, - ], - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "assistant", - content: "Response", - reasoning: "Reasoning text", - reasoning_details: [ - { - type: ReasoningDetailType.Text, - text: "Preserved reasoning detail", - }, - { - type: ReasoningDetailType.Summary, - summary: "Preserved summary", - }, - ], - }, - ]); - }); - - it("should not include reasoning_details when not present in providerOptions", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "assistant", - content: [ - { - type: "reasoning", - text: "Reasoning text", - // No providerOptions - }, - { - type: "text", - text: "Response", - }, - ], - // No providerOptions - }, - ]); - - expect(result).toEqual([ - { - role: "assistant", - content: "Response", - reasoning: "Reasoning text", - // reasoning_details should be undefined when not preserved - reasoning_details: undefined, - }, - ]); - }); - - it("should handle mixed reasoning parts with and without providerOptions", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "assistant", - content: [ - { - type: "reasoning", - text: "First chunk", - providerOptions: { - hyperbolic: { - reasoning_details: [ - { - type: ReasoningDetailType.Text, - text: "First chunk", - }, - ], - }, - }, - }, - { - type: "reasoning", - text: "Second chunk", - // No providerOptions - }, - { - type: "text", - text: "Response", - }, - ], - providerOptions: { - hyperbolic: { - reasoning_details: [ - { - type: ReasoningDetailType.Text, - text: "First chunk", - }, - ], - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "assistant", - content: "Response", - reasoning: "First chunkSecond chunk", - reasoning_details: [ - { - type: ReasoningDetailType.Text, - text: "First chunk", - }, - ], - }, - ]); - }); -}); diff --git a/packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.ts b/packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.ts deleted file mode 100644 index c5367cd..0000000 --- a/packages/ai-sdk-provider/src/chat/convert-to-hyperbolic-chat-messages.ts +++ /dev/null @@ -1,299 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { - LanguageModelV3FilePart, - LanguageModelV3Prompt, - LanguageModelV3TextPart, - LanguageModelV3ToolResultPart, - SharedV3ProviderMetadata, -} from "@ai-sdk/provider"; - -import type { ReasoningDetailUnion } from "../schemas/reasoning-details"; -import type { - ChatCompletionContentPart, - HyperbolicChatCompletionsInput, -} from "../types/hyperbolic-chat-completions-input"; -import { HyperbolicProviderOptionsSchema } from "../schemas/provider-metadata"; -import { getFileUrl, getInputAudioData } from "./file-url-utils"; -import { isUrl } from "./is-url"; - -// Type for Hyperbolic Cache Control following Anthropic's pattern -export type HyperbolicCacheControl = { type: "ephemeral" }; - -function getCacheControl( - providerMetadata: SharedV3ProviderMetadata | undefined, -): HyperbolicCacheControl | undefined { - const anthropic = providerMetadata?.anthropic; - const hyperbolic = providerMetadata?.hyperbolic; - - // Allow both cacheControl and cache_control: - return (hyperbolic?.cacheControl ?? - hyperbolic?.cache_control ?? - anthropic?.cacheControl ?? - anthropic?.cache_control) as HyperbolicCacheControl | undefined; -} - -export function convertToHyperbolicChatMessages( - prompt: LanguageModelV3Prompt, -): HyperbolicChatCompletionsInput { - const messages: HyperbolicChatCompletionsInput = []; - for (const { role, content, providerOptions } of prompt) { - switch (role) { - case "system": { - messages.push({ - role: "system", - content, - cache_control: getCacheControl(providerOptions), - }); - break; - } - - case "user": { - if (content.length === 1 && content[0]?.type === "text") { - const cacheControl = - getCacheControl(providerOptions) ?? getCacheControl(content[0].providerOptions); - const contentWithCacheControl: string | ChatCompletionContentPart[] = cacheControl - ? [ - { - type: "text", - text: content[0].text, - cache_control: cacheControl, - }, - ] - : content[0].text; - messages.push({ - role: "user", - content: contentWithCacheControl, - }); - break; - } - - // Get message level cache control - const messageCacheControl = getCacheControl(providerOptions); - const contentParts: ChatCompletionContentPart[] = content.map( - (part: LanguageModelV3TextPart | LanguageModelV3FilePart) => { - const cacheControl = getCacheControl(part.providerOptions) ?? messageCacheControl; - - switch (part.type) { - case "text": - return { - type: "text" as const, - text: part.text, - // For text parts, only use part-specific cache control - cache_control: cacheControl, - }; - case "file": { - if (part.mediaType?.startsWith("image/")) { - const url = getFileUrl({ - part, - defaultMediaType: "image/jpeg", - }); - return { - type: "image_url" as const, - image_url: { - url, - }, - // For image parts, use part-specific or message-level cache control - cache_control: cacheControl, - }; - } - - // Handle audio files for input_audio format - if (part.mediaType?.startsWith("audio/")) { - return { - type: "input_audio" as const, - input_audio: getInputAudioData(part), - cache_control: cacheControl, - }; - } - - const fileName = String( - part.providerOptions?.hyperbolic?.filename ?? part.filename ?? "", - ); - - const fileData = getFileUrl({ - part, - defaultMediaType: "application/pdf", - }); - - if ( - isUrl({ - url: fileData, - protocols: new Set(["http:", "https:"] as const), - }) - ) { - return { - type: "file" as const, - file: { - filename: fileName, - file_data: fileData, - }, - } satisfies ChatCompletionContentPart; - } - - return { - type: "file" as const, - file: { - filename: fileName, - file_data: fileData, - }, - cache_control: cacheControl, - } satisfies ChatCompletionContentPart; - } - default: { - return { - type: "text" as const, - text: "", - cache_control: cacheControl, - }; - } - } - }, - ); - - // For multi-part messages, don't add cache_control at the root level - messages.push({ - role: "user", - content: contentParts, - }); - - break; - } - - case "assistant": { - let text = ""; - let reasoning = ""; - const toolCalls: Array<{ - id: string; - type: "function"; - function: { name: string; arguments: string }; - }> = []; - const accumulatedReasoningDetails: ReasoningDetailUnion[] = []; - - for (const part of content) { - switch (part.type) { - case "text": { - text += part.text; - - break; - } - case "tool-call": { - const partReasoningDetails = (part.providerOptions as Record) - ?.hyperbolic as Record | undefined; - if ( - partReasoningDetails?.reasoning_details && - Array.isArray(partReasoningDetails.reasoning_details) - ) { - accumulatedReasoningDetails.push( - ...(partReasoningDetails.reasoning_details as ReasoningDetailUnion[]), - ); - } - toolCalls.push({ - id: part.toolCallId, - type: "function", - function: { - name: part.toolName, - arguments: JSON.stringify(part.input), - }, - }); - break; - } - case "reasoning": { - reasoning += part.text; - const parsedPartProviderOptions = HyperbolicProviderOptionsSchema.safeParse( - part.providerOptions, - ); - if ( - parsedPartProviderOptions.success && - parsedPartProviderOptions.data?.hyperbolic?.reasoning_details - ) { - accumulatedReasoningDetails.push( - ...parsedPartProviderOptions.data.hyperbolic.reasoning_details, - ); - } - break; - } - - case "file": - break; - default: { - break; - } - } - } - - // Check message-level providerOptions for preserved reasoning_details and annotations - const parsedProviderOptions = HyperbolicProviderOptionsSchema.safeParse(providerOptions); - const messageReasoningDetails = parsedProviderOptions.success - ? parsedProviderOptions.data?.hyperbolic?.reasoning_details - : undefined; - const messageAnnotations = parsedProviderOptions.success - ? parsedProviderOptions.data?.hyperbolic?.annotations - : undefined; - - // Use message-level reasoning_details if available, otherwise use accumulated from parts - const finalReasoningDetails = - messageReasoningDetails && - Array.isArray(messageReasoningDetails) && - messageReasoningDetails.length > 0 - ? messageReasoningDetails - : accumulatedReasoningDetails.length > 0 - ? accumulatedReasoningDetails - : undefined; - - messages.push({ - role: "assistant", - content: text, - tool_calls: toolCalls.length > 0 ? toolCalls : undefined, - reasoning: reasoning || undefined, - reasoning_details: finalReasoningDetails, - annotations: messageAnnotations, - cache_control: getCacheControl(providerOptions), - }); - - break; - } - - case "tool": { - for (const toolResponse of content) { - // Skip tool approval responses - only process tool results - if (toolResponse.type === "tool-approval-response") { - continue; - } - const content = getToolResultContent(toolResponse); - - messages.push({ - role: "tool", - tool_call_id: toolResponse.toolCallId, - content, - cache_control: - getCacheControl(providerOptions) ?? getCacheControl(toolResponse.providerOptions), - }); - } - break; - } - - default: { - break; - } - } - } - - return messages; -} - -function getToolResultContent(input: LanguageModelV3ToolResultPart): string { - switch (input.output.type) { - case "text": - case "error-text": - return input.output.value; - case "json": - case "error-json": - case "content": - return JSON.stringify(input.output.value); - case "execution-denied": - return input.output.reason ?? "Tool execution denied"; - } -} diff --git a/packages/ai-sdk-provider/src/chat/errors.test.ts b/packages/ai-sdk-provider/src/chat/errors.test.ts deleted file mode 100644 index 4bc5f11..0000000 --- a/packages/ai-sdk-provider/src/chat/errors.test.ts +++ /dev/null @@ -1,94 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { LanguageModelV3Prompt } from "@ai-sdk/provider"; -import { describe, expect, it } from "vitest"; - -import { createHyperbolic } from "../provider"; -import { createTestServer } from "../test-utils/test-server"; - -const TEST_PROMPT: LanguageModelV3Prompt = [ - { role: "user", content: [{ type: "text", text: "Hello" }] }, -]; - -const provider = createHyperbolic({ - baseURL: "https://api.hyperbolic.xyz/v1", - apiKey: "test-api-key", -}); - -const server = createTestServer({ - "https://api.hyperbolic.xyz/v1/chat/completions": {}, -}); - -describe("HTTP 200 Error Response Handling", () => { - describe("doGenerate", () => { - it("should throw APICallError for HTTP 200 responses with error payloads", async () => { - // Hyperbolic sometimes returns HTTP 200 with an error object instead of choices - // This can occur for various server errors (e.g., internal errors, processing failures) - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { - type: "json-value", - body: { - error: { - message: "Internal Server Error", - code: 500, - }, - user_id: "org_abc123", - }, - }; - - const model = provider("anthropic/claude-3.5-sonnet"); - - await expect( - model.doGenerate({ - prompt: TEST_PROMPT, - }), - ).rejects.toThrow("Internal Server Error"); - }); - - it("should parse successful responses normally when no error present", async () => { - // Normal successful response without error - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { - type: "json-value", - body: { - id: "gen-123", - model: "anthropic/claude-3.5-sonnet", - provider: "Anthropic", - choices: [ - { - index: 0, - message: { - role: "assistant", - content: "Hello! How can I help you?", - }, - finish_reason: "stop", - }, - ], - usage: { - prompt_tokens: 10, - completion_tokens: 8, - total_tokens: 18, - }, - }, - }; - - const model = provider("anthropic/claude-3.5-sonnet"); - - const result = await model.doGenerate({ - prompt: TEST_PROMPT, - }); - - expect(result.content).toMatchObject([ - { - type: "text", - text: "Hello! How can I help you?", - }, - ]); - expect((result.usage.inputTokens?.total ?? 0) + (result.usage.outputTokens?.total ?? 0)).toBe( - 18, - ); - }); - }); -}); diff --git a/packages/ai-sdk-provider/src/chat/file-parser-schema.test.ts b/packages/ai-sdk-provider/src/chat/file-parser-schema.test.ts deleted file mode 100644 index ebbfa78..0000000 --- a/packages/ai-sdk-provider/src/chat/file-parser-schema.test.ts +++ /dev/null @@ -1,115 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import { describe, expect, it } from "vitest"; - -import { HyperbolicNonStreamChatCompletionResponseSchema } from "./schemas"; - -describe("FileParser annotation schema", () => { - it("should parse response with all real API fields", () => { - // This is based on actual API response structure (anonymized) - const response = { - id: "gen-xxx", - provider: "Amazon Bedrock", - model: "anthropic/claude-3.5-sonnet", - object: "chat.completion", - created: 1763157299, - choices: [ - { - logprobs: null, - finish_reason: "stop", - native_finish_reason: "stop", - index: 0, - message: { - role: "assistant" as const, - content: "Test response content", - refusal: null, - reasoning: null, - annotations: [ - { - type: "file" as const, - file: { - hash: "abc123", - name: "", - content: [ - { - type: "text", - text: '', - }, - ], - }, - }, - ], - }, - }, - ], - usage: { - prompt_tokens: 100, - completion_tokens: 50, - total_tokens: 150, - }, - }; - - const result = HyperbolicNonStreamChatCompletionResponseSchema.parse(response); - expect(result).toBeDefined(); - }); - - it("should parse file annotation with content array and extra fields", () => { - const response = { - id: "gen-test", - provider: "Amazon Bedrock", - model: "anthropic/claude-3.5-sonnet", - object: "chat.completion", - created: 1763157061, - choices: [ - { - logprobs: null, - finish_reason: "stop", - native_finish_reason: "stop", // Extra field from API - index: 0, - message: { - role: "assistant" as const, - content: "Test response", - refusal: null, // Extra field from API - reasoning: null, - annotations: [ - { - type: "file" as const, - file: { - hash: "85bd49b97b7ff5be002d9f654776119f253c1cae333b49ba8f4a53da346284ba", - name: "", - content: [ - { - type: "text", - text: '', - }, - { - type: "text", - text: "Some file content", - }, - ], - }, - }, - ], - }, - }, - ], - usage: { - prompt_tokens: 100, - completion_tokens: 50, - total_tokens: 150, - }, - }; - - const result = HyperbolicNonStreamChatCompletionResponseSchema.parse(response); - - // Check that parsing succeeded - expect(result).toBeDefined(); - // The schema uses passthrough so we can't strictly type check, but we can verify structure - // @ts-expect-error test intentionally inspects passthrough data - const firstChoice = result.choices?.[0]; - expect(firstChoice?.message.annotations).toBeDefined(); - expect(firstChoice?.message.annotations?.[0]?.type).toBe("file"); - }); -}); diff --git a/packages/ai-sdk-provider/src/chat/file-url-utils.ts b/packages/ai-sdk-provider/src/chat/file-url-utils.ts deleted file mode 100644 index 714b3f5..0000000 --- a/packages/ai-sdk-provider/src/chat/file-url-utils.ts +++ /dev/null @@ -1,150 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { LanguageModelV3FilePart } from "@ai-sdk/provider"; -import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils"; - -import type { HyperbolicAudioFormat } from "../types/hyperbolic-chat-completions-input"; -import { HYPERBOLIC_AUDIO_FORMATS } from "../types/hyperbolic-chat-completions-input"; -import { isUrl } from "./is-url"; - -export function getFileUrl({ - part, - defaultMediaType, -}: { - part: LanguageModelV3FilePart; - defaultMediaType: string; -}) { - if (part.data instanceof Uint8Array) { - const base64 = convertUint8ArrayToBase64(part.data); - return `data:${part.mediaType ?? defaultMediaType};base64,${base64}`; - } - - const stringUrl = part.data.toString(); - - if ( - isUrl({ - url: stringUrl, - protocols: new Set(["http:", "https:"] as const), - }) - ) { - return stringUrl; - } - - return stringUrl.startsWith("data:") - ? stringUrl - : `data:${part.mediaType ?? defaultMediaType};base64,${stringUrl}`; -} - -export function getMediaType(dataUrl: string, defaultMediaType: string): string { - const match = dataUrl.match(/^data:([^;]+)/); - return match ? (match[1] ?? defaultMediaType) : defaultMediaType; -} - -export function getBase64FromDataUrl(dataUrl: string): string { - const match = dataUrl.match(/^data:[^;]*;base64,(.+)$/); - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - return match ? match[1]! : dataUrl; -} - -/** MIME type to format mapping for normalization */ -export const MIME_TO_FORMAT: Record = { - // MP3 variants - mpeg: "mp3", - mp3: "mp3", - // WAV variants - "x-wav": "wav", - wave: "wav", - wav: "wav", - // OGG variants - ogg: "ogg", - vorbis: "ogg", - // AAC variants - aac: "aac", - "x-aac": "aac", - // M4A variants - m4a: "m4a", - "x-m4a": "m4a", - mp4: "m4a", - // AIFF variants - aiff: "aiff", - "x-aiff": "aiff", - // FLAC - flac: "flac", - "x-flac": "flac", - // PCM variants - pcm16: "pcm16", - pcm24: "pcm24", -}; - -/** - * Converts an audio file part to Hyperbolic's input_audio data format. - * - * This function extracts base64-encoded audio data from a file part and - * normalizes the format to one of the supported Hyperbolic audio formats. - * - * @param part - The file part containing audio data. Must have a mediaType - * starting with "audio/" and contain either base64 data or a data URL. - * - * @returns An object with `data` (base64-encoded audio) and `format` - * suitable for use in Hyperbolic's `input_audio` field. - * - * @throws {Error} When audio is provided as an HTTP/HTTPS URL. Hyperbolic requires - * audio to be base64-encoded inline. The error message includes instructions for - * downloading and encoding the audio locally. - * - * @throws {Error} When the audio format is not supported. - * - * @example - * ```ts - * const audioData = getInputAudioData(filePart); - * // Returns: { data: "base64string...", format: "mp3" } - * ``` - */ -export function getInputAudioData(part: LanguageModelV3FilePart): { - data: string; - format: HyperbolicAudioFormat; -} { - const fileData = getFileUrl({ - part, - defaultMediaType: "audio/mpeg", - }); - - // Hyperbolic's input_audio doesn't support URLs directly - if ( - isUrl({ - url: fileData, - protocols: new Set(["http:", "https:"] as const), - }) - ) { - throw new Error( - `Audio files cannot be provided as URLs.\n\n` + - `Hyperbolic requires audio to be base64-encoded. Please:\n` + - `1. Download the audio file locally\n` + - `2. Read it as a Buffer or Uint8Array\n` + - `3. Pass it as the data parameter\n\n` + - `The AI SDK will automatically handle base64 encoding.`, - ); - } - - // Extract base64 data (handles both data URLs and raw base64) - const data = getBase64FromDataUrl(fileData); - - // Map media type to format - const mediaType = part.mediaType || "audio/mpeg"; - const rawFormat = mediaType.replace("audio/", ""); - - // Normalize format names for Hyperbolic using MIME type mapping - const format = MIME_TO_FORMAT[rawFormat]; - - if (format === undefined) { - const supportedList = HYPERBOLIC_AUDIO_FORMATS.join(", "); - throw new Error( - `Unsupported audio format: "${mediaType}"\n\n` + - `Hyperbolic supports the following audio formats: ${supportedList}`, - ); - } - - return { data, format }; -} diff --git a/packages/ai-sdk-provider/src/chat/get-tool-choice.ts b/packages/ai-sdk-provider/src/chat/get-tool-choice.ts deleted file mode 100644 index 4261f75..0000000 --- a/packages/ai-sdk-provider/src/chat/get-tool-choice.ts +++ /dev/null @@ -1,46 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { LanguageModelV3ToolChoice } from "@ai-sdk/provider"; -import { InvalidArgumentError } from "@ai-sdk/provider"; -import { z } from "zod/v4"; - -// eslint-disable-next-line @typescript-eslint/no-unused-vars -const ChatCompletionToolChoiceSchema = z.union([ - z.literal("auto"), - z.literal("none"), - z.literal("required"), - z.object({ - type: z.literal("function"), - function: z.object({ - name: z.string(), - }), - }), -]); - -type ChatCompletionToolChoice = z.infer; - -export function getChatCompletionToolChoice( - toolChoice: LanguageModelV3ToolChoice, -): ChatCompletionToolChoice { - switch (toolChoice.type) { - case "auto": - case "none": - case "required": - return toolChoice.type; - case "tool": { - return { - type: "function", - function: { name: toolChoice.toolName }, - }; - } - default: { - toolChoice satisfies never; - throw new InvalidArgumentError({ - argument: "toolChoice", - message: `Invalid tool choice type: ${JSON.stringify(toolChoice)}`, - }); - } - } -} diff --git a/packages/ai-sdk-provider/src/chat/index.test.ts b/packages/ai-sdk-provider/src/chat/index.test.ts deleted file mode 100644 index 8be3f19..0000000 --- a/packages/ai-sdk-provider/src/chat/index.test.ts +++ /dev/null @@ -1,2278 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { LanguageModelV3Prompt, LanguageModelV3StreamPart } from "@ai-sdk/provider"; -import type { JSONSchema7 } from "json-schema"; -import { vi } from "vitest"; - -import type { ImageResponse } from "../schemas/image"; -import type { ReasoningDetailUnion } from "../schemas/reasoning-details"; -import { createHyperbolic } from "../provider"; -import { ReasoningDetailType } from "../schemas/reasoning-details"; -import { convertReadableStreamToArray, createTestServer } from "../test-utils/test-server"; - -vi.mock("../version", () => ({ - VERSION: "0.0.0-test", -})); - -const TEST_PROMPT: LanguageModelV3Prompt = [ - { role: "user", content: [{ type: "text", text: "Hello" }] }, -]; - -const TEST_LOGPROBS = { - content: [ - { - token: "Hello", - logprob: -0.0009994634, - top_logprobs: [ - { - token: "Hello", - logprob: -0.0009994634, - }, - ], - }, - { - token: "!", - logprob: -0.13410144, - top_logprobs: [ - { - token: "!", - logprob: -0.13410144, - }, - ], - }, - { - token: " How", - logprob: -0.0009250381, - top_logprobs: [ - { - token: " How", - logprob: -0.0009250381, - }, - ], - }, - { - token: " can", - logprob: -0.047709424, - top_logprobs: [ - { - token: " can", - logprob: -0.047709424, - }, - ], - }, - { - token: " I", - logprob: -0.000009014684, - top_logprobs: [ - { - token: " I", - logprob: -0.000009014684, - }, - ], - }, - { - token: " assist", - logprob: -0.009125131, - top_logprobs: [ - { - token: " assist", - logprob: -0.009125131, - }, - ], - }, - { - token: " you", - logprob: -0.0000066306106, - top_logprobs: [ - { - token: " you", - logprob: -0.0000066306106, - }, - ], - }, - { - token: " today", - logprob: -0.00011093382, - top_logprobs: [ - { - token: " today", - logprob: -0.00011093382, - }, - ], - }, - { - token: "?", - logprob: -0.00004596782, - top_logprobs: [ - { - token: "?", - logprob: -0.00004596782, - }, - ], - }, - ], -}; - -const TEST_IMAGE_URL = `data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABAAAAAQACAIAAADwf7zUAAAAiXpUWHRSYXcgcHJvZmlsZSB0eXBlIGlwdGMAAAiZTYwxDgIxDAT7vOKekDjrtV1T0VHwgbtcIiEhgfh/QaDgmGlWW0w6X66n5fl6jNu9p+ULkapDENgzpj+Kl5aFfa6KnYWgSjZjGOiSYRxTY/v8KIijI==`; - -// eslint-disable-next-line @typescript-eslint/no-non-null-assertion -const TEST_IMAGE_BASE64 = TEST_IMAGE_URL.split(",")[1]!; - -const provider = createHyperbolic({ - apiKey: "test-api-key", - compatibility: "strict", -}); - -const model = provider.chat("anthropic/claude-3.5-sonnet"); - -function isReasoningDeltaPart(part: LanguageModelV3StreamPart): part is Extract< - LanguageModelV3StreamPart, - { - type: "reasoning-delta"; - } -> { - return part.type === "reasoning-delta"; -} - -function isReasoningStartPart(part: LanguageModelV3StreamPart): part is Extract< - LanguageModelV3StreamPart, - { - type: "reasoning-start"; - } -> { - return part.type === "reasoning-start"; -} - -function isTextDeltaPart(part: LanguageModelV3StreamPart): part is Extract< - LanguageModelV3StreamPart, - { - type: "text-delta"; - } -> { - return part.type === "text-delta"; -} - -describe("doGenerate", () => { - const server = createTestServer({ - "https://api.hyperbolic.xyz/v1/chat/completions": { - response: { type: "json-value", body: {} }, - }, - }); - - function prepareJsonResponse({ - content = "", - reasoning, - reasoning_details, - images, - tool_calls, - usage = { - prompt_tokens: 4, - total_tokens: 34, - completion_tokens: 30, - }, - logprobs = null, - finish_reason = "stop", - }: { - content?: string; - reasoning?: string; - reasoning_details?: Array; - images?: Array; - tool_calls?: Array<{ - id: string; - type: "function"; - function: { name: string; arguments: string }; - }>; - usage?: { - prompt_tokens: number; - total_tokens: number; - completion_tokens: number; - }; - logprobs?: { - content: - | { - token: string; - logprob: number; - top_logprobs: { token: string; logprob: number }[]; - }[] - | null; - } | null; - finish_reason?: string; - } = {}) { - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { - type: "json-value", - body: { - id: "chatcmpl-95ZTZkhr0mHNKqerQfiwkuox3PHAd", - object: "chat.completion", - created: 1711115037, - model: "gpt-3.5-turbo-0125", - choices: [ - { - index: 0, - message: { - role: "assistant", - content, - reasoning, - reasoning_details, - images, - tool_calls, - }, - logprobs, - finish_reason, - }, - ], - usage, - system_fingerprint: "fp_3bc1b5746c", - }, - }; - } - - it("should extract text response", async () => { - prepareJsonResponse({ content: "Hello, World!" }); - - const result = await model.doGenerate({ - prompt: TEST_PROMPT, - }); - - expect(result.content[0]).toStrictEqual({ - type: "text", - text: "Hello, World!", - }); - }); - - it("should extract usage", async () => { - prepareJsonResponse({ - content: "", - usage: { prompt_tokens: 20, total_tokens: 25, completion_tokens: 5 }, - }); - - const { usage } = await model.doGenerate({ - prompt: TEST_PROMPT, - }); - - expect(usage).toStrictEqual({ - inputTokens: { - total: 20, - noCache: undefined, - cacheRead: undefined, - cacheWrite: undefined, - }, - outputTokens: { - total: 5, - text: undefined, - reasoning: undefined, - }, - }); - }); - - it("should extract logprobs", async () => { - prepareJsonResponse({ - logprobs: TEST_LOGPROBS, - }); - - await provider.chat("openai/gpt-3.5-turbo", { logprobs: 1 }).doGenerate({ - prompt: TEST_PROMPT, - }); - }); - - it("should extract finish reason", async () => { - prepareJsonResponse({ - content: "", - finish_reason: "stop", - }); - - const response = await model.doGenerate({ - prompt: TEST_PROMPT, - }); - - expect(response.finishReason).toStrictEqual({ - unified: "stop", - raw: "stop", - }); - }); - - it("should support unknown finish reason", async () => { - prepareJsonResponse({ - content: "", - finish_reason: "eos", - }); - - const response = await model.doGenerate({ - prompt: TEST_PROMPT, - }); - - expect(response.finishReason).toStrictEqual({ - unified: "other", - raw: "eos", - }); - }); - - it("should extract reasoning content from reasoning field", async () => { - prepareJsonResponse({ - content: "Hello!", - reasoning: - "I need to think about this... The user said hello, so I should respond with a greeting.", - }); - - const result = await model.doGenerate({ - prompt: TEST_PROMPT, - }); - - expect(result.content).toStrictEqual([ - { - type: "reasoning", - text: "I need to think about this... The user said hello, so I should respond with a greeting.", - }, - { - type: "text", - text: "Hello!", - }, - ]); - }); - - it("should extract reasoning content from reasoning_details", async () => { - prepareJsonResponse({ - content: "Hello!", - reasoning_details: [ - { - type: ReasoningDetailType.Text, - text: "Let me analyze this request...", - }, - { - type: ReasoningDetailType.Summary, - summary: "The user wants a greeting response.", - }, - ], - }); - - const result = await model.doGenerate({ - prompt: TEST_PROMPT, - }); - - expect(result.content).toStrictEqual([ - { - type: "reasoning", - text: "Let me analyze this request...", - providerMetadata: { - hyperbolic: { - reasoning_details: [ - { - type: "reasoning.text", - text: "Let me analyze this request...", - }, - ], - }, - }, - }, - { - type: "reasoning", - text: "The user wants a greeting response.", - providerMetadata: { - hyperbolic: { - reasoning_details: [ - { - type: "reasoning.summary", - summary: "The user wants a greeting response.", - }, - ], - }, - }, - }, - { - type: "text", - text: "Hello!", - }, - ]); - }); - - it("should handle encrypted reasoning details", async () => { - prepareJsonResponse({ - content: "Hello!", - reasoning_details: [ - { - type: ReasoningDetailType.Encrypted, - data: "encrypted_reasoning_data_here", - }, - ], - }); - - const result = await model.doGenerate({ - prompt: TEST_PROMPT, - }); - - expect(result.content).toStrictEqual([ - { - type: "reasoning", - text: "[REDACTED]", - providerMetadata: { - hyperbolic: { - reasoning_details: [ - { - type: "reasoning.encrypted", - data: "encrypted_reasoning_data_here", - }, - ], - }, - }, - }, - { - type: "text", - text: "Hello!", - }, - ]); - }); - - it("should prioritize reasoning_details over reasoning when both are present", async () => { - prepareJsonResponse({ - content: "Hello!", - reasoning: "This should be ignored when reasoning_details is present", - reasoning_details: [ - { - type: ReasoningDetailType.Text, - text: "Processing from reasoning_details...", - }, - { - type: ReasoningDetailType.Summary, - summary: "Summary from reasoning_details", - }, - ], - }); - - const result = await model.doGenerate({ - prompt: TEST_PROMPT, - }); - - expect(result.content).toStrictEqual([ - { - type: "reasoning", - text: "Processing from reasoning_details...", - providerMetadata: { - hyperbolic: { - reasoning_details: [ - { - type: "reasoning.text", - text: "Processing from reasoning_details...", - }, - ], - }, - }, - }, - { - type: "reasoning", - text: "Summary from reasoning_details", - providerMetadata: { - hyperbolic: { - reasoning_details: [ - { - type: "reasoning.summary", - summary: "Summary from reasoning_details", - }, - ], - }, - }, - }, - { - type: "text", - text: "Hello!", - }, - ]); - - // Verify that the reasoning field content is not included - expect(result.content).not.toContainEqual({ - type: "reasoning", - text: "This should be ignored when reasoning_details is present", - }); - }); - - it("should override finishReason to tool-calls when tool calls and encrypted reasoning are present", async () => { - prepareJsonResponse({ - content: "", - tool_calls: [ - { - id: "call_123", - type: "function", - function: { - name: "get_weather", - arguments: '{"location":"San Francisco"}', - }, - }, - ], - reasoning_details: [ - { - type: ReasoningDetailType.Encrypted, - data: "encrypted_reasoning_data_here", - }, - ], - // Gemini 3 returns 'stop' instead of 'tool_calls' when using thoughtSignature - finish_reason: "stop", - }); - - const result = await model.doGenerate({ - prompt: TEST_PROMPT, - }); - - // Should override to 'tool-calls' when encrypted reasoning + tool calls + stop - expect(result.finishReason).toStrictEqual({ - unified: "tool-calls", - raw: "stop", - }); - - // Should still have the tool call in content - expect(result.content).toContainEqual( - expect.objectContaining({ - type: "tool-call", - toolCallId: "call_123", - toolName: "get_weather", - }), - ); - }); - - it("should pass the model and the messages", async () => { - prepareJsonResponse({ content: "" }); - - await model.doGenerate({ - prompt: TEST_PROMPT, - }); - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ - model: "anthropic/claude-3.5-sonnet", - messages: [{ role: "user", content: "Hello" }], - }); - }); - - it("should pass the models array when provided", async () => { - prepareJsonResponse({ content: "" }); - - const customModel = provider.chat("anthropic/claude-3.5-sonnet", { - models: ["anthropic/claude-2", "gryphe/mythomax-l2-13b"], - }); - - await customModel.doGenerate({ - prompt: TEST_PROMPT, - }); - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ - model: "anthropic/claude-3.5-sonnet", - models: ["anthropic/claude-2", "gryphe/mythomax-l2-13b"], - messages: [{ role: "user", content: "Hello" }], - }); - }); - - it("should pass settings", async () => { - prepareJsonResponse(); - - await provider - .chat("openai/gpt-3.5-turbo", { - logitBias: { 50256: -100 }, - logprobs: 2, - parallelToolCalls: false, - user: "test-user-id", - }) - .doGenerate({ - prompt: TEST_PROMPT, - }); - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ - model: "openai/gpt-3.5-turbo", - messages: [{ role: "user", content: "Hello" }], - logprobs: true, - top_logprobs: 2, - logit_bias: { 50256: -100 }, - parallel_tool_calls: false, - user: "test-user-id", - }); - }); - - it("should pass tools and toolChoice", async () => { - prepareJsonResponse({ content: "" }); - - await model.doGenerate({ - prompt: TEST_PROMPT, - tools: [ - { - type: "function", - name: "test-tool", - description: "Test tool", - inputSchema: { - type: "object", - properties: { value: { type: "string" } }, - required: ["value"], - additionalProperties: false, - $schema: "http://json-schema.org/draft-07/schema#", - }, - }, - ], - toolChoice: { - type: "tool", - toolName: "test-tool", - }, - }); - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ - model: "anthropic/claude-3.5-sonnet", - messages: [{ role: "user", content: "Hello" }], - tools: [ - { - type: "function", - function: { - name: "test-tool", - description: "Test tool", - parameters: { - type: "object", - properties: { value: { type: "string" } }, - required: ["value"], - additionalProperties: false, - $schema: "http://json-schema.org/draft-07/schema#", - }, - }, - }, - ], - tool_choice: { - type: "function", - function: { name: "test-tool" }, - }, - }); - }); - - it("should pass headers", async () => { - prepareJsonResponse({ content: "" }); - - const provider = createHyperbolic({ - apiKey: "test-api-key", - headers: { - "Custom-Provider-Header": "provider-header-value", - }, - }); - - await provider.chat("openai/gpt-3.5-turbo").doGenerate({ - prompt: TEST_PROMPT, - headers: { - "Custom-Request-Header": "request-header-value", - }, - }); - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - const requestHeaders = server.calls[0]!.requestHeaders; - - expect(requestHeaders).toMatchObject({ - authorization: "Bearer test-api-key", - "content-type": "application/json", - "custom-provider-header": "provider-header-value", - "custom-request-header": "request-header-value", - }); - expect(requestHeaders["user-agent"]).toContain("ai-sdk/hyperbolic/0.0.0-test"); - }); - - it("should pass responseFormat for JSON schema structured outputs", async () => { - prepareJsonResponse({ content: '{"name": "John", "age": 30}' }); - - const testSchema: JSONSchema7 = { - type: "object", - properties: { - name: { type: "string" }, - age: { type: "number" }, - }, - required: ["name", "age"], - additionalProperties: false, - }; - - await model.doGenerate({ - prompt: TEST_PROMPT, - responseFormat: { - type: "json", - schema: testSchema, - name: "PersonResponse", - description: "A person object", - }, - }); - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ - model: "anthropic/claude-3.5-sonnet", - messages: [{ role: "user", content: "Hello" }], - response_format: { - type: "json_schema", - json_schema: { - schema: testSchema, - strict: true, - name: "PersonResponse", - description: "A person object", - }, - }, - }); - }); - - it("should use default name when name is not provided in responseFormat", async () => { - prepareJsonResponse({ content: '{"name": "John", "age": 30}' }); - - const testSchema: JSONSchema7 = { - type: "object", - properties: { - name: { type: "string" }, - age: { type: "number" }, - }, - required: ["name", "age"], - additionalProperties: false, - }; - - await model.doGenerate({ - prompt: TEST_PROMPT, - responseFormat: { - type: "json", - schema: testSchema, - }, - }); - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ - model: "anthropic/claude-3.5-sonnet", - messages: [{ role: "user", content: "Hello" }], - response_format: { - type: "json_schema", - json_schema: { - schema: testSchema, - strict: true, - name: "response", - }, - }, - }); - }); - - it("should pass images", async () => { - prepareJsonResponse({ - content: "", - images: [ - { - type: "image_url", - image_url: { url: TEST_IMAGE_URL }, - }, - ], - usage: { prompt_tokens: 53, total_tokens: 70, completion_tokens: 17 }, - }); - - const result = await model.doGenerate({ - prompt: TEST_PROMPT, - }); - - expect(result.content).toStrictEqual([ - { - type: "file", - mediaType: "image/png", - data: TEST_IMAGE_BASE64, - }, - ]); - }); -}); - -describe("doStream", () => { - const server = createTestServer({ - "https://api.hyperbolic.xyz/v1/chat/completions": { - response: { type: "json-value", body: {} }, - }, - }); - - function prepareStreamResponse({ - content, - usage = { - prompt_tokens: 17, - total_tokens: 244, - completion_tokens: 227, - }, - logprobs = null, - finish_reason = "stop", - }: { - content: string[]; - usage?: { - prompt_tokens: number; - total_tokens: number; - completion_tokens: number; - prompt_tokens_details?: { - cached_tokens: number; - }; - completion_tokens_details?: { - reasoning_tokens: number; - }; - cost?: number; - cost_details?: { - upstream_inference_cost: number; - }; - }; - logprobs?: { - content: - | { - token: string; - logprob: number; - top_logprobs: { token: string; logprob: number }[]; - }[] - | null; - } | null; - finish_reason?: string; - }) { - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { - type: "stream-chunks", - chunks: [ - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` + - `"system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null}]}\n\n`, - ...content.flatMap((text) => { - return `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":"${text}"},"finish_reason":null}]}\n\n`; - }), - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"finish_reason":"${finish_reason}","logprobs":${JSON.stringify( - logprobs, - )}}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":${JSON.stringify( - usage, - )}}\n\n`, - "data: [DONE]\n\n", - ], - }; - } - - it("should stream text deltas", async () => { - prepareStreamResponse({ - content: ["Hello", ", ", "World!"], - finish_reason: "stop", - usage: { - prompt_tokens: 17, - total_tokens: 244, - completion_tokens: 227, - }, - logprobs: TEST_LOGPROBS, - }); - - const { stream } = await model.doStream({ - prompt: TEST_PROMPT, - }); - - // note: space moved to last chunk bc of trimming - const elements = await convertReadableStreamToArray(stream); - expect(elements).toStrictEqual([ - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0613", - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0613", - }, - { type: "text-start", id: expect.any(String) }, - { type: "text-delta", delta: "Hello", id: expect.any(String) }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0613", - }, - { type: "text-delta", delta: ", ", id: expect.any(String) }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0613", - }, - { type: "text-delta", delta: "World!", id: expect.any(String) }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0613", - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0613", - }, - { - type: "text-end", - id: expect.any(String), - }, - { - type: "finish", - finishReason: { unified: "stop", raw: "stop" }, - - providerMetadata: { - hyperbolic: { - usage: { - completionTokens: 227, - promptTokens: 17, - totalTokens: 244, - cost: undefined, - }, - }, - }, - usage: { - inputTokens: { - total: 17, - noCache: undefined, - cacheRead: undefined, - cacheWrite: undefined, - }, - outputTokens: { - total: 227, - text: undefined, - reasoning: undefined, - }, - }, - }, - ]); - }); - - it("should include upstream inference cost in finish metadata when provided", async () => { - prepareStreamResponse({ - content: ["Hello"], - usage: { - prompt_tokens: 17, - total_tokens: 244, - completion_tokens: 227, - cost_details: { - upstream_inference_cost: 0.0036, - }, - }, - }); - - const { stream } = await model.doStream({ - prompt: TEST_PROMPT, - }); - - const elements = (await convertReadableStreamToArray(stream)) as LanguageModelV3StreamPart[]; - const finishChunk = elements.find( - (chunk): chunk is Extract => - chunk.type === "finish", - ); - const hyperbolicUsage = ( - finishChunk?.providerMetadata?.hyperbolic as { - usage?: { - cost?: number; - costDetails?: { upstreamInferenceCost: number }; - }; - } - )?.usage; - expect(hyperbolicUsage?.costDetails).toStrictEqual({ - upstreamInferenceCost: 0.0036, - }); - }); - - it("should handle both normal cost and upstream inference cost in finish metadata when both are provided", async () => { - prepareStreamResponse({ - content: ["Hello"], - usage: { - prompt_tokens: 17, - total_tokens: 244, - completion_tokens: 227, - cost: 0.0042, - cost_details: { - upstream_inference_cost: 0.0036, - }, - }, - }); - - const { stream } = await model.doStream({ - prompt: TEST_PROMPT, - }); - - const elements = (await convertReadableStreamToArray(stream)) as LanguageModelV3StreamPart[]; - const finishChunk = elements.find( - (chunk): chunk is Extract => - chunk.type === "finish", - ); - const hyperbolicUsage = ( - finishChunk?.providerMetadata?.hyperbolic as { - usage?: { - cost?: number; - costDetails?: { upstreamInferenceCost: number }; - }; - } - )?.usage; - expect(hyperbolicUsage?.costDetails).toStrictEqual({ - upstreamInferenceCost: 0.0036, - }); - expect(hyperbolicUsage?.cost).toBe(0.0042); - }); - - it("should prioritize reasoning_details over reasoning when both are present in streaming", async () => { - // This test verifies that when the API returns both 'reasoning' and 'reasoning_details' fields, - // we prioritize reasoning_details and ignore the reasoning field to avoid duplicates. - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { - type: "stream-chunks", - chunks: [ - // First chunk: both reasoning and reasoning_details with different content - `data: {"id":"chatcmpl-reasoning","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"role":"assistant","content":"",` + - `"reasoning":"This should be ignored...",` + - `"reasoning_details":[{"type":"${ReasoningDetailType.Text}","text":"Let me think about this..."}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - // Second chunk: reasoning_details with multiple types - `data: {"id":"chatcmpl-reasoning","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{` + - `"reasoning":"Also ignored",` + - `"reasoning_details":[{"type":"${ReasoningDetailType.Summary}","summary":"User wants a greeting"},{"type":"${ReasoningDetailType.Encrypted}","data":"secret"}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - // Third chunk: only reasoning field (should be processed) - `data: {"id":"chatcmpl-reasoning","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{` + - `"reasoning":"This reasoning is used"},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - // Content chunk - `data: {"id":"chatcmpl-reasoning","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"content":"Hello!"},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - // Finish chunk - `data: {"id":"chatcmpl-reasoning","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{},` + - `"logprobs":null,"finish_reason":"stop"}]}\n\n`, - `data: {"id":"chatcmpl-reasoning","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":17,"completion_tokens":30,"total_tokens":47}}\n\n`, - "data: [DONE]\n\n", - ], - }; - - const { stream } = await model.doStream({ - prompt: TEST_PROMPT, - }); - - const elements = await convertReadableStreamToArray(stream); - - // Filter for reasoning-related elements - const reasoningElements = elements.filter( - (el) => - el.type === "reasoning-start" || - el.type === "reasoning-delta" || - el.type === "reasoning-end", - ); - - // Debug output to see what we're getting - // console.log('Reasoning elements count:', reasoningElements.length); - // console.log('Reasoning element types:', reasoningElements.map(el => el.type)); - - // We should get reasoning content from reasoning_details when present, not reasoning field - // start + 4 deltas (text, summary, encrypted, reasoning-only) + end = 6 - expect(reasoningElements).toHaveLength(6); - - // Verify the content comes from reasoning_details, not reasoning field - const reasoningDeltas = reasoningElements.filter(isReasoningDeltaPart).map((el) => el.delta); - - expect(reasoningDeltas).toEqual([ - "Let me think about this...", // from reasoning_details text - "User wants a greeting", // from reasoning_details summary - "[REDACTED]", // from reasoning_details encrypted - "This reasoning is used", // from reasoning field (no reasoning_details) - ]); - - // Verify that "This should be ignored..." and "Also ignored" are NOT in the output - expect(reasoningDeltas).not.toContain("This should be ignored..."); - expect(reasoningDeltas).not.toContain("Also ignored"); - - // Verify that reasoning-delta chunks include providerMetadata with reasoning_details - const reasoningDeltaElements = elements.filter(isReasoningDeltaPart); - - // First delta should have reasoning_details from first chunk - expect(reasoningDeltaElements[0]?.providerMetadata).toEqual({ - hyperbolic: { - reasoning_details: [ - { - type: ReasoningDetailType.Text, - text: "Let me think about this...", - }, - ], - }, - }); - - // Second and third deltas should have reasoning_details from second chunk - expect(reasoningDeltaElements[1]?.providerMetadata).toEqual({ - hyperbolic: { - reasoning_details: [ - { - type: ReasoningDetailType.Summary, - summary: "User wants a greeting", - }, - { - type: ReasoningDetailType.Encrypted, - data: "secret", - }, - ], - }, - }); - - expect(reasoningDeltaElements[2]?.providerMetadata).toEqual({ - hyperbolic: { - reasoning_details: [ - { - type: ReasoningDetailType.Summary, - summary: "User wants a greeting", - }, - { - type: ReasoningDetailType.Encrypted, - data: "secret", - }, - ], - }, - }); - - // Fourth delta (from reasoning field only) should not have providerMetadata - expect(reasoningDeltaElements[3]?.providerMetadata).toBeUndefined(); - }); - - it("should emit reasoning_details in providerMetadata for all reasoning delta chunks", async () => { - // This test verifies that reasoning_details are included in providerMetadata - // for all reasoning-delta chunks, enabling users to accumulate them for multi-turn conversations - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { - type: "stream-chunks", - chunks: [ - // First chunk: reasoning_details with Text type - `data: {"id":"chatcmpl-metadata-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"role":"assistant","content":"",` + - `"reasoning_details":[{"type":"${ReasoningDetailType.Text}","text":"First reasoning chunk"}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - // Second chunk: reasoning_details with Summary type - `data: {"id":"chatcmpl-metadata-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{` + - `"reasoning_details":[{"type":"${ReasoningDetailType.Summary}","summary":"Summary reasoning"}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - // Third chunk: reasoning_details with Encrypted type - `data: {"id":"chatcmpl-metadata-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{` + - `"reasoning_details":[{"type":"${ReasoningDetailType.Encrypted}","data":"encrypted_data"}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - // Finish chunk - `data: {"id":"chatcmpl-metadata-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{},` + - `"logprobs":null,"finish_reason":"stop"}]}\n\n`, - `data: {"id":"chatcmpl-metadata-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":17,"completion_tokens":30,"total_tokens":47}}\n\n`, - "data: [DONE]\n\n", - ], - }; - - const { stream } = await model.doStream({ - prompt: TEST_PROMPT, - }); - - const elements = await convertReadableStreamToArray(stream); - - const reasoningDeltaElements = elements.filter(isReasoningDeltaPart); - - expect(reasoningDeltaElements).toHaveLength(3); - - // Verify each delta has the correct reasoning_details in providerMetadata - expect(reasoningDeltaElements[0]?.providerMetadata).toEqual({ - hyperbolic: { - reasoning_details: [ - { - type: ReasoningDetailType.Text, - text: "First reasoning chunk", - }, - ], - }, - }); - - expect(reasoningDeltaElements[1]?.providerMetadata).toEqual({ - hyperbolic: { - reasoning_details: [ - { - type: ReasoningDetailType.Summary, - summary: "Summary reasoning", - }, - ], - }, - }); - - expect(reasoningDeltaElements[2]?.providerMetadata).toEqual({ - hyperbolic: { - reasoning_details: [ - { - type: ReasoningDetailType.Encrypted, - data: "encrypted_data", - }, - ], - }, - }); - - // Verify reasoning-start also has providerMetadata when first delta includes it - const reasoningStart = elements.find(isReasoningStartPart); - - expect(reasoningStart?.providerMetadata).toEqual({ - hyperbolic: { - reasoning_details: [ - { - type: ReasoningDetailType.Text, - text: "First reasoning chunk", - }, - ], - }, - }); - }); - - it("should maintain correct reasoning order when content comes after reasoning (issue #7824)", async () => { - // This test reproduces the issue where reasoning appears first but then gets "pushed down" - // by content that comes later in the stream - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { - type: "stream-chunks", - chunks: [ - // First chunk: Start with reasoning - `data: {"id":"chatcmpl-order-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"role":"assistant",` + - `"reasoning":"I need to think about this step by step..."},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - // Second chunk: More reasoning - `data: {"id":"chatcmpl-order-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{` + - `"reasoning":" First, I should analyze the request."},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - // Third chunk: Even more reasoning - `data: {"id":"chatcmpl-order-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{` + - `"reasoning":" Then I should provide a helpful response."},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - // Fourth chunk: Content starts - `data: {"id":"chatcmpl-order-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"content":"Hello! "},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - // Fifth chunk: More content - `data: {"id":"chatcmpl-order-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"content":"How can I help you today?"},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - // Finish chunk - `data: {"id":"chatcmpl-order-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{},` + - `"logprobs":null,"finish_reason":"stop"}]}\n\n`, - `data: {"id":"chatcmpl-order-test","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":17,"completion_tokens":30,"total_tokens":47}}\n\n`, - "data: [DONE]\n\n", - ], - }; - - const { stream } = await model.doStream({ - prompt: TEST_PROMPT, - }); - - const elements = await convertReadableStreamToArray(stream); - - // The expected order should be: - // 1. reasoning-start - // 2. reasoning-delta (3 times) - // 3. reasoning-end (when text starts) - // 4. text-start - // 5. text-delta (2 times) - // 6. text-end (when stream finishes) - - const streamOrder = elements.map((el) => el.type); - - // Find the positions of key events - const reasoningStartIndex = streamOrder.indexOf("reasoning-start"); - const reasoningEndIndex = streamOrder.indexOf("reasoning-end"); - const textStartIndex = streamOrder.indexOf("text-start"); - - // Reasoning should come before text and end before text starts - expect(reasoningStartIndex).toBeLessThan(textStartIndex); - expect(reasoningEndIndex).toBeLessThan(textStartIndex); - - // Verify reasoning content - const reasoningDeltas = elements.filter(isReasoningDeltaPart).map((el) => el.delta); - - expect(reasoningDeltas).toEqual([ - "I need to think about this step by step...", - " First, I should analyze the request.", - " Then I should provide a helpful response.", - ]); - - // Verify text content - const textDeltas = elements.filter(isTextDeltaPart).map((el) => el.delta); - - expect(textDeltas).toEqual(["Hello! ", "How can I help you today?"]); - }); - - it("should stream tool deltas", async () => { - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { - type: "stream-chunks", - chunks: [ - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"role":"assistant","content":null,` + - `"tool_calls":[{"index":0,"id":"call_O17Uplv4lJvD6DVdIvFFeRMw","type":"function","function":{"name":"test-tool","arguments":""}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\\""}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"value"}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\\":\\""}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Spark"}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"le"}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" Day"}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\\"}"}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":53,"completion_tokens":17,"total_tokens":70}}\n\n`, - "data: [DONE]\n\n", - ], - }; - - const { stream } = await model.doStream({ - tools: [ - { - type: "function", - name: "test-tool", - inputSchema: { - type: "object", - properties: { value: { type: "string" } }, - required: ["value"], - additionalProperties: false, - $schema: "http://json-schema.org/draft-07/schema#", - }, - }, - ], - prompt: TEST_PROMPT, - }); - - expect(await convertReadableStreamToArray(stream)).toStrictEqual([ - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - id: "call_O17Uplv4lJvD6DVdIvFFeRMw", - toolName: "test-tool", - type: "tool-input-start", - }, - { - type: "tool-input-delta", - id: "call_O17Uplv4lJvD6DVdIvFFeRMw", - delta: '{"', - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "tool-input-delta", - id: "call_O17Uplv4lJvD6DVdIvFFeRMw", - delta: "value", - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "tool-input-delta", - id: "call_O17Uplv4lJvD6DVdIvFFeRMw", - delta: '":"', - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "tool-input-delta", - id: "call_O17Uplv4lJvD6DVdIvFFeRMw", - delta: "Spark", - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "tool-input-delta", - id: "call_O17Uplv4lJvD6DVdIvFFeRMw", - delta: "le", - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "tool-input-delta", - id: "call_O17Uplv4lJvD6DVdIvFFeRMw", - delta: " Day", - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "tool-input-delta", - id: "call_O17Uplv4lJvD6DVdIvFFeRMw", - delta: '"}', - }, - { - type: "tool-call", - toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", - toolName: "test-tool", - input: '{"value":"Sparkle Day"}', - providerMetadata: { - hyperbolic: { - reasoning_details: [], - }, - }, - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "finish", - finishReason: { unified: "tool-calls", raw: "tool_calls" }, - providerMetadata: { - hyperbolic: { - usage: { - completionTokens: 17, - promptTokens: 53, - totalTokens: 70, - cost: undefined, - }, - }, - }, - usage: { - inputTokens: { - total: 53, - noCache: undefined, - cacheRead: undefined, - cacheWrite: undefined, - }, - outputTokens: { - total: 17, - text: undefined, - reasoning: undefined, - }, - }, - }, - ]); - }); - - it("should stream tool call that is sent in one chunk", async () => { - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { - type: "stream-chunks", - chunks: [ - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"role":"assistant","content":null,` + - `"tool_calls":[{"index":0,"id":"call_O17Uplv4lJvD6DVdIvFFeRMw","type":"function","function":{"name":"test-tool","arguments":"{\\"value\\":\\"Sparkle Day\\"}"}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":53,"completion_tokens":17,"total_tokens":70}}\n\n`, - "data: [DONE]\n\n", - ], - }; - - const { stream } = await model.doStream({ - tools: [ - { - type: "function", - name: "test-tool", - inputSchema: { - type: "object", - properties: { value: { type: "string" } }, - required: ["value"], - additionalProperties: false, - $schema: "http://json-schema.org/draft-07/schema#", - }, - }, - ], - prompt: TEST_PROMPT, - }); - - const elements = await convertReadableStreamToArray(stream); - expect(elements).toStrictEqual([ - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "tool-input-start", - id: "call_O17Uplv4lJvD6DVdIvFFeRMw", - toolName: "test-tool", - }, - { - type: "tool-input-delta", - id: "call_O17Uplv4lJvD6DVdIvFFeRMw", - delta: '{"value":"Sparkle Day"}', - }, - { - type: "tool-input-end", - id: "call_O17Uplv4lJvD6DVdIvFFeRMw", - }, - { - type: "tool-call", - toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", - toolName: "test-tool", - input: '{"value":"Sparkle Day"}', - providerMetadata: { - hyperbolic: { - reasoning_details: [], - }, - }, - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "finish", - finishReason: { unified: "tool-calls", raw: "tool_calls" }, - providerMetadata: { - hyperbolic: { - usage: { - completionTokens: 17, - promptTokens: 53, - totalTokens: 70, - cost: undefined, - }, - }, - }, - usage: { - inputTokens: { - total: 53, - noCache: undefined, - cacheRead: undefined, - cacheWrite: undefined, - }, - outputTokens: { - total: 17, - text: undefined, - reasoning: undefined, - }, - }, - }, - ]); - }); - - it("should override finishReason to tool-calls in streaming when tool calls and encrypted reasoning are present", async () => { - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { - type: "stream-chunks", - chunks: [ - // First chunk: reasoning_details with encrypted data - `data: {"id":"chatcmpl-gemini3","object":"chat.completion.chunk","created":1711357598,"model":"google/gemini-3-pro",` + - `"system_fingerprint":"fp_gemini3","choices":[{"index":0,"delta":{"role":"assistant","content":null,` + - `"reasoning_details":[{"type":"reasoning.encrypted","data":"encrypted_thoughtsig_data"}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - // Second chunk: tool call - `data: {"id":"chatcmpl-gemini3","object":"chat.completion.chunk","created":1711357598,"model":"google/gemini-3-pro",` + - `"system_fingerprint":"fp_gemini3","choices":[{"index":0,"delta":{` + - `"tool_calls":[{"index":0,"id":"call_gemini3_123","type":"function","function":{"name":"get_weather","arguments":"{\\"location\\":\\"SF\\"}"}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - // Final chunk: finish_reason is "stop" (Gemini 3 bug) - should be overridden to "tool-calls" - `data: {"id":"chatcmpl-gemini3","object":"chat.completion.chunk","created":1711357598,"model":"google/gemini-3-pro",` + - `"system_fingerprint":"fp_gemini3","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}\n\n`, - `data: {"id":"chatcmpl-gemini3","object":"chat.completion.chunk","created":1711357598,"model":"google/gemini-3-pro",` + - `"system_fingerprint":"fp_gemini3","choices":[],"usage":{"prompt_tokens":10,"completion_tokens":20,"total_tokens":30}}\n\n`, - "data: [DONE]\n\n", - ], - }; - - const { stream } = await model.doStream({ - tools: [ - { - type: "function", - name: "get_weather", - inputSchema: { - type: "object", - properties: { location: { type: "string" } }, - required: ["location"], - additionalProperties: false, - $schema: "http://json-schema.org/draft-07/schema#", - }, - }, - ], - prompt: TEST_PROMPT, - }); - - const elements = await convertReadableStreamToArray(stream); - - // Find the finish event - const finishEvent = elements.find( - (el): el is LanguageModelV3StreamPart & { type: "finish" } => el.type === "finish", - ); - - // Should override to 'tool-calls' when encrypted reasoning + tool calls + stop - expect(finishEvent?.finishReason).toStrictEqual({ - unified: "tool-calls", - raw: "stop", - }); - - // Should have the tool call - const toolCallEvent = elements.find( - (el): el is LanguageModelV3StreamPart & { type: "tool-call" } => el.type === "tool-call", - ); - expect(toolCallEvent?.toolName).toBe("get_weather"); - expect(toolCallEvent?.toolCallId).toBe("call_gemini3_123"); - }); - - it("should stream images", async () => { - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { - type: "stream-chunks", - chunks: [ - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"role":"assistant","content":"",` + - `"images":[{"type":"image_url","image_url":{"url":"${TEST_IMAGE_URL}"},"index":0}]},` + - `"logprobs":null,"finish_reason":"stop"}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":53,"completion_tokens":17,"total_tokens":70}}\n\n`, - "data: [DONE]\n\n", - ], - }; - - const { stream } = await model.doStream({ - prompt: TEST_PROMPT, - }); - - expect(await convertReadableStreamToArray(stream)).toStrictEqual([ - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "file", - mediaType: "image/png", - data: TEST_IMAGE_BASE64, - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "finish", - finishReason: { unified: "stop", raw: "stop" }, - providerMetadata: { - hyperbolic: { - usage: { - completionTokens: 17, - promptTokens: 53, - totalTokens: 70, - cost: undefined, - }, - }, - }, - usage: { - inputTokens: { - total: 53, - noCache: undefined, - cacheRead: undefined, - cacheWrite: undefined, - }, - outputTokens: { - total: 17, - text: undefined, - reasoning: undefined, - }, - }, - }, - ]); - }); - - it("should handle error stream parts", async () => { - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { - type: "stream-chunks", - chunks: [ - `data: {"error":{"message": "The server had an error processing your request. Sorry about that! You can retry your request, or contact us through our ` + - `help center.","type":"server_error","param":null,"code":null}}\n\n`, - "data: [DONE]\n\n", - ], - }; - - const { stream } = await model.doStream({ - prompt: TEST_PROMPT, - }); - - expect(await convertReadableStreamToArray(stream)).toStrictEqual([ - { - type: "error", - error: { - message: - "The server had an error processing your request. Sorry about that! " + - "You can retry your request, or contact us through our help center", - type: "server_error", - code: null, - param: null, - }, - }, - { - finishReason: { unified: "error", raw: undefined }, - providerMetadata: { - hyperbolic: { - usage: {}, - }, - }, - type: "finish", - usage: { - inputTokens: { - total: undefined, - noCache: undefined, - cacheRead: undefined, - cacheWrite: undefined, - }, - outputTokens: { - total: undefined, - text: undefined, - reasoning: undefined, - }, - }, - }, - ]); - }); - - it("should handle unparsable stream parts", async () => { - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { - type: "stream-chunks", - chunks: ["data: {unparsable}\n\n", "data: [DONE]\n\n"], - }; - - const { stream } = await model.doStream({ - prompt: TEST_PROMPT, - }); - - const elements = await convertReadableStreamToArray(stream); - - expect(elements.length).toBe(2); - expect(elements[0]?.type).toBe("error"); - expect(elements[1]).toStrictEqual({ - finishReason: { unified: "error", raw: undefined }, - - type: "finish", - providerMetadata: { - hyperbolic: { - usage: {}, - }, - }, - usage: { - inputTokens: { - total: undefined, - noCache: undefined, - cacheRead: undefined, - cacheWrite: undefined, - }, - outputTokens: { - total: undefined, - text: undefined, - reasoning: undefined, - }, - }, - }); - }); - - it("should pass the messages and the model", async () => { - prepareStreamResponse({ content: [] }); - - await model.doStream({ - prompt: TEST_PROMPT, - }); - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ - stream: true, - stream_options: { include_usage: true }, - model: "anthropic/claude-3.5-sonnet", - messages: [{ role: "user", content: "Hello" }], - }); - }); - - it("should pass headers", async () => { - prepareStreamResponse({ content: [] }); - - const provider = createHyperbolic({ - apiKey: "test-api-key", - headers: { - "Custom-Provider-Header": "provider-header-value", - }, - }); - - await provider.chat("openai/gpt-3.5-turbo").doStream({ - prompt: TEST_PROMPT, - headers: { - "Custom-Request-Header": "request-header-value", - }, - }); - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - const requestHeaders = server.calls[0]!.requestHeaders; - - expect(requestHeaders).toMatchObject({ - authorization: "Bearer test-api-key", - "content-type": "application/json", - "custom-provider-header": "provider-header-value", - "custom-request-header": "request-header-value", - }); - expect(requestHeaders["user-agent"]).toContain("ai-sdk/hyperbolic/0.0.0-test"); - }); - - it("should pass extra body", async () => { - prepareStreamResponse({ content: [] }); - - const provider = createHyperbolic({ - apiKey: "test-api-key", - extraBody: { - custom_field: "custom_value", - providers: { - anthropic: { - custom_field: "custom_value", - }, - }, - }, - }); - - await provider.chat("anthropic/claude-3.5-sonnet").doStream({ - prompt: TEST_PROMPT, - }); - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - const requestBody = await server.calls[0]!.requestBodyJson; - - expect(requestBody).toHaveProperty("custom_field", "custom_value"); - expect(requestBody).toHaveProperty("providers.anthropic.custom_field", "custom_value"); - }); - - it("should pass responseFormat for JSON schema structured outputs", async () => { - prepareStreamResponse({ content: ['{"name": "John", "age": 30}'] }); - - const testSchema: JSONSchema7 = { - type: "object", - properties: { - name: { type: "string" }, - age: { type: "number" }, - }, - required: ["name", "age"], - additionalProperties: false, - }; - - await model.doStream({ - prompt: TEST_PROMPT, - responseFormat: { - type: "json", - schema: testSchema, - name: "PersonResponse", - description: "A person object", - }, - }); - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ - stream: true, - stream_options: { include_usage: true }, - model: "anthropic/claude-3.5-sonnet", - messages: [{ role: "user", content: "Hello" }], - response_format: { - type: "json_schema", - json_schema: { - schema: testSchema, - strict: true, - name: "PersonResponse", - description: "A person object", - }, - }, - }); - }); - - it("should pass responseFormat AND tools together", async () => { - prepareStreamResponse({ content: ['{"name": "John", "age": 30}'] }); - - const testSchema: JSONSchema7 = { - type: "object", - properties: { - name: { type: "string" }, - age: { type: "number" }, - }, - required: ["name", "age"], - additionalProperties: false, - }; - - await model.doStream({ - prompt: TEST_PROMPT, - responseFormat: { - type: "json", - schema: testSchema, - name: "PersonResponse", - description: "A person object", - }, - tools: [ - { - type: "function", - name: "test-tool", - description: "Test tool", - inputSchema: { - type: "object", - properties: { value: { type: "string" } }, - required: ["value"], - additionalProperties: false, - $schema: "http://json-schema.org/draft-07/schema#", - }, - }, - ], - toolChoice: { - type: "tool", - toolName: "test-tool", - }, - }); - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ - stream: true, - stream_options: { include_usage: true }, - model: "anthropic/claude-3.5-sonnet", - messages: [{ role: "user", content: "Hello" }], - response_format: { - type: "json_schema", - json_schema: { - schema: testSchema, - strict: true, - name: "PersonResponse", - description: "A person object", - }, - }, - tools: [ - { - type: "function", - function: { - name: "test-tool", - description: "Test tool", - parameters: { - type: "object", - properties: { value: { type: "string" } }, - required: ["value"], - additionalProperties: false, - $schema: "http://json-schema.org/draft-07/schema#", - }, - }, - }, - ], - tool_choice: { - type: "function", - function: { name: "test-tool" }, - }, - }); - }); - - it("should pass debug settings", async () => { - prepareStreamResponse({ content: ["Hello"] }); - - const debugModel = provider.chat("anthropic/claude-3.5-sonnet", { - debug: { - echo_upstream_body: true, - }, - }); - - await debugModel.doStream({ - prompt: TEST_PROMPT, - }); - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ - stream: true, - stream_options: { include_usage: true }, - model: "anthropic/claude-3.5-sonnet", - messages: [{ role: "user", content: "Hello" }], - debug: { - echo_upstream_body: true, - }, - }); - }); - - it("should include file annotations in finish metadata when streamed", async () => { - // This test verifies that file annotations from FileParserPlugin are accumulated - // during streaming and included in the finish event's providerMetadata - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { - type: "stream-chunks", - chunks: [ - // First chunk with role and content - `data: {"id":"chatcmpl-file-annotations","object":"chat.completion.chunk","created":1711357598,"model":"gpt-4o-mini",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"role":"assistant","content":"The title is Bitcoin."},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - // Chunk with file annotation - `data: {"id":"chatcmpl-file-annotations","object":"chat.completion.chunk","created":1711357598,"model":"gpt-4o-mini",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{` + - `"annotations":[{"type":"file","file":{"hash":"abc123def456","name":"bitcoin.pdf","content":[{"type":"text","text":"Page 1 content"},{"type":"text","text":"Page 2 content"}]}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - // Finish chunk - `data: {"id":"chatcmpl-file-annotations","object":"chat.completion.chunk","created":1711357598,"model":"gpt-4o-mini",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{},` + - `"logprobs":null,"finish_reason":"stop"}]}\n\n`, - `data: {"id":"chatcmpl-file-annotations","object":"chat.completion.chunk","created":1711357598,"model":"gpt-4o-mini",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":100,"completion_tokens":20,"total_tokens":120}}\n\n`, - "data: [DONE]\n\n", - ], - }; - - const { stream } = await model.doStream({ - prompt: TEST_PROMPT, - }); - - const elements = (await convertReadableStreamToArray(stream)) as LanguageModelV3StreamPart[]; - - // Find the finish chunk - const finishChunk = elements.find( - (chunk): chunk is Extract => - chunk.type === "finish", - ); - - expect(finishChunk).toBeDefined(); - - // Verify file annotations are included in providerMetadata - const hyperbolicMetadata = finishChunk?.providerMetadata?.hyperbolic as { - annotations?: Array<{ - type: "file"; - file: { - hash: string; - name: string; - content?: Array<{ type: string; text?: string }>; - }; - }>; - }; - - expect(hyperbolicMetadata?.annotations).toStrictEqual([ - { - type: "file", - file: { - hash: "abc123def456", - name: "bitcoin.pdf", - content: [ - { type: "text", text: "Page 1 content" }, - { type: "text", text: "Page 2 content" }, - ], - }, - }, - ]); - }); - - it("should accumulate multiple file annotations from stream", async () => { - // This test verifies that multiple file annotations are accumulated correctly - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { - type: "stream-chunks", - chunks: [ - // First chunk with content - `data: {"id":"chatcmpl-multi-files","object":"chat.completion.chunk","created":1711357598,"model":"gpt-4o-mini",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"role":"assistant","content":"Comparing two documents."},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - // First file annotation - `data: {"id":"chatcmpl-multi-files","object":"chat.completion.chunk","created":1711357598,"model":"gpt-4o-mini",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{` + - `"annotations":[{"type":"file","file":{"hash":"hash1","name":"doc1.pdf","content":[{"type":"text","text":"Doc 1"}]}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - // Second file annotation - `data: {"id":"chatcmpl-multi-files","object":"chat.completion.chunk","created":1711357598,"model":"gpt-4o-mini",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{` + - `"annotations":[{"type":"file","file":{"hash":"hash2","name":"doc2.pdf","content":[{"type":"text","text":"Doc 2"}]}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - // Finish chunk - `data: {"id":"chatcmpl-multi-files","object":"chat.completion.chunk","created":1711357598,"model":"gpt-4o-mini",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{},` + - `"logprobs":null,"finish_reason":"stop"}]}\n\n`, - `data: {"id":"chatcmpl-multi-files","object":"chat.completion.chunk","created":1711357598,"model":"gpt-4o-mini",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":100,"completion_tokens":20,"total_tokens":120}}\n\n`, - "data: [DONE]\n\n", - ], - }; - - const { stream } = await model.doStream({ - prompt: TEST_PROMPT, - }); - - const elements = (await convertReadableStreamToArray(stream)) as LanguageModelV3StreamPart[]; - - const finishChunk = elements.find( - (chunk): chunk is Extract => - chunk.type === "finish", - ); - - const hyperbolicMetadata = finishChunk?.providerMetadata?.hyperbolic as { - annotations?: Array<{ - type: "file"; - file: { - hash: string; - name: string; - content?: Array<{ type: string; text?: string }>; - }; - }>; - }; - - // Both file annotations should be accumulated - expect(hyperbolicMetadata?.annotations).toHaveLength(2); - expect(hyperbolicMetadata?.annotations?.[0]?.file.hash).toBe("hash1"); - expect(hyperbolicMetadata?.annotations?.[1]?.file.hash).toBe("hash2"); - }); -}); - -describe("debug settings", () => { - const server = createTestServer({ - "https://api.hyperbolic.xyz/v1/chat/completions": { - response: { type: "json-value", body: {} }, - }, - }); - - function prepareJsonResponse({ content = "" }: { content?: string } = {}) { - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { - type: "json-value", - body: { - id: "chatcmpl-test", - object: "chat.completion", - created: 1711115037, - model: "anthropic/claude-3.5-sonnet", - choices: [ - { - index: 0, - message: { - role: "assistant", - content, - }, - finish_reason: "stop", - }, - ], - usage: { - prompt_tokens: 4, - total_tokens: 34, - completion_tokens: 30, - }, - }, - }; - } - - it("should pass debug settings in doGenerate", async () => { - prepareJsonResponse({ content: "Hello!" }); - - const debugModel = provider.chat("anthropic/claude-3.5-sonnet", { - debug: { - echo_upstream_body: true, - }, - }); - - await debugModel.doGenerate({ - prompt: TEST_PROMPT, - }); - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ - model: "anthropic/claude-3.5-sonnet", - messages: [{ role: "user", content: "Hello" }], - debug: { - echo_upstream_body: true, - }, - }); - }); - - it("should not include debug when not set", async () => { - prepareJsonResponse({ content: "Hello!" }); - - await model.doGenerate({ - prompt: TEST_PROMPT, - }); - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - const requestBody = await server.calls[0]!.requestBodyJson; - expect(requestBody).not.toHaveProperty("debug"); - }); -}); diff --git a/packages/ai-sdk-provider/src/chat/index.ts b/packages/ai-sdk-provider/src/chat/index.ts deleted file mode 100644 index dd3cb19..0000000 --- a/packages/ai-sdk-provider/src/chat/index.ts +++ /dev/null @@ -1,1051 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { - LanguageModelV3, - LanguageModelV3CallOptions, - LanguageModelV3Content, - LanguageModelV3FinishReason, - LanguageModelV3FunctionTool, - LanguageModelV3ResponseMetadata, - LanguageModelV3StreamPart, - LanguageModelV3Usage, - SharedV3Headers, - SharedV3ProviderMetadata, - SharedV3Warning, -} from "@ai-sdk/provider"; -import type { ParseResult } from "@ai-sdk/provider-utils"; -import type { z } from "zod/v4"; -import { APICallError, InvalidResponseDataError, NoContentGeneratedError } from "@ai-sdk/provider"; -import { - combineHeaders, - createEventSourceResponseHandler, - createJsonResponseHandler, - generateId, - isParsableJson, - postJsonToApi, -} from "@ai-sdk/provider-utils"; - -import type { FileAnnotation } from "../schemas/provider-metadata"; -import type { ReasoningDetailUnion } from "../schemas/reasoning-details"; -import type { - HyperbolicChatModelId, - HyperbolicChatSettings, -} from "../types/hyperbolic-chat-settings"; -import type { HyperbolicUsageAccounting } from "../types/index"; -import { hyperbolicFailedResponseHandler } from "../schemas/error-response"; -import { HyperbolicProviderMetadataSchema } from "../schemas/provider-metadata"; -import { ReasoningDetailType } from "../schemas/reasoning-details"; -import { createFinishReason, mapHyperbolicFinishReason } from "../utils/map-finish-reason"; -import { convertToHyperbolicChatMessages } from "./convert-to-hyperbolic-chat-messages"; -import { getBase64FromDataUrl, getMediaType } from "./file-url-utils"; -import { getChatCompletionToolChoice } from "./get-tool-choice"; -import { - HyperbolicNonStreamChatCompletionResponseSchema, - HyperbolicStreamChatCompletionChunkSchema, -} from "./schemas"; - -type HyperbolicChatConfig = { - provider: string; - compatibility: "strict" | "compatible"; - headers: () => Record; - url: (options: { modelId: string; path: string }) => string; - fetch?: typeof fetch; - extraBody?: Record; -}; - -export class HyperbolicChatLanguageModel implements LanguageModelV3 { - readonly specificationVersion = "v3" as const; - readonly provider = "hyperbolic"; - readonly defaultObjectGenerationMode = "tool" as const; - - readonly modelId: HyperbolicChatModelId; - readonly supportsImageUrls = true; - readonly supportedUrls: Record = { - "image/*": [/^data:image\/[a-zA-Z]+;base64,/, /^https?:\/\/.+\.(jpg|jpeg|png|gif|webp)$/i], - // 'text/*': [/^data:text\//, /^https?:\/\/.+$/], - "application/*": [/^data:application\//, /^https?:\/\/.+$/], - }; - readonly settings: HyperbolicChatSettings; - - private readonly config: HyperbolicChatConfig; - - constructor( - modelId: HyperbolicChatModelId, - settings: HyperbolicChatSettings, - config: HyperbolicChatConfig, - ) { - this.modelId = modelId; - this.settings = settings; - this.config = config; - } - - private getArgs({ - prompt, - maxOutputTokens, - temperature, - topP, - frequencyPenalty, - presencePenalty, - seed, - stopSequences, - responseFormat, - topK, - tools, - toolChoice, - }: LanguageModelV3CallOptions) { - const baseArgs = { - // model id: - model: this.modelId, - models: this.settings.models, - - // model specific settings: - logit_bias: this.settings.logitBias, - logprobs: - this.settings.logprobs === true || typeof this.settings.logprobs === "number" - ? true - : undefined, - top_logprobs: - typeof this.settings.logprobs === "number" - ? this.settings.logprobs - : typeof this.settings.logprobs === "boolean" - ? this.settings.logprobs - ? 0 - : undefined - : undefined, - user: this.settings.user, - parallel_tool_calls: this.settings.parallelToolCalls, - - // standardized settings: - max_tokens: maxOutputTokens, - temperature, - top_p: topP, - frequency_penalty: frequencyPenalty, - presence_penalty: presencePenalty, - seed, - - stop: stopSequences, - response_format: - responseFormat?.type === "json" - ? responseFormat.schema != null - ? { - type: "json_schema", - json_schema: { - schema: responseFormat.schema, - strict: true, - name: responseFormat.name ?? "response", - ...(responseFormat.description && { - description: responseFormat.description, - }), - }, - } - : { type: "json_object" } - : undefined, - top_k: topK, - - // messages: - messages: convertToHyperbolicChatMessages(prompt), - - // Hyperbolic specific settings: - include_reasoning: this.settings.includeReasoning, - reasoning: this.settings.reasoning, - usage: this.settings.usage, - - // Web search settings: - plugins: this.settings.plugins, - web_search_options: this.settings.web_search_options, - // Provider routing settings: - provider: this.settings.provider, - // Debug settings: - debug: this.settings.debug, - - // extra body: - ...this.config.extraBody, - ...this.settings.extraBody, - }; - - if (tools && tools.length > 0) { - // TODO: support built-in tools - const mappedTools = tools - .filter((tool): tool is LanguageModelV3FunctionTool => tool.type === "function") - .map((tool) => ({ - type: "function" as const, - function: { - name: tool.name, - description: tool.description, - parameters: tool.inputSchema, - }, - })); - - return { - ...baseArgs, - tools: mappedTools, - tool_choice: toolChoice ? getChatCompletionToolChoice(toolChoice) : undefined, - }; - } - - return baseArgs; - } - - async doGenerate(options: LanguageModelV3CallOptions): Promise<{ - content: Array; - finishReason: LanguageModelV3FinishReason; - usage: LanguageModelV3Usage; - warnings: Array; - providerMetadata?: { - hyperbolic: { - provider: string; - reasoning_details?: ReasoningDetailUnion[]; - usage: HyperbolicUsageAccounting; - }; - }; - request?: { body?: unknown }; - response?: LanguageModelV3ResponseMetadata & { - headers?: SharedV3Headers; - body?: unknown; - }; - }> { - const providerOptions = options.providerOptions || {}; - const hyperbolicOptions = providerOptions.hyperbolic || {}; - - const args = { - ...this.getArgs(options), - ...hyperbolicOptions, - }; - - const { value: responseValue, responseHeaders } = await postJsonToApi({ - url: this.config.url({ - path: "/chat/completions", - modelId: this.modelId, - }), - headers: combineHeaders(this.config.headers(), options.headers), - body: args, - failedResponseHandler: hyperbolicFailedResponseHandler, - successfulResponseHandler: createJsonResponseHandler( - HyperbolicNonStreamChatCompletionResponseSchema, - ), - abortSignal: options.abortSignal, - fetch: this.config.fetch, - }); - - // Check if response is an error (HTTP 200 with error payload) - if ("error" in responseValue) { - const errorData = responseValue.error as { - message: string; - code?: string; - }; - throw new APICallError({ - message: errorData.message, - url: this.config.url({ - path: "/chat/completions", - modelId: this.modelId, - }), - requestBodyValues: args, - statusCode: 200, - responseHeaders, - data: errorData, - }); - } - - // Now TypeScript knows this is the success response - const response = responseValue; - - const choice = response.choices[0]; - - if (!choice) { - throw new NoContentGeneratedError({ - message: "No choice in response", - }); - } - - // Extract detailed usage information - const usageInfo: LanguageModelV3Usage = response.usage - ? { - inputTokens: { - total: response.usage.prompt_tokens ?? 0, - noCache: undefined, - cacheRead: response.usage.prompt_tokens_details?.cached_tokens ?? undefined, - cacheWrite: undefined, - }, - outputTokens: { - total: response.usage.completion_tokens ?? 0, - text: undefined, - reasoning: response.usage.completion_tokens_details?.reasoning_tokens ?? undefined, - }, - } - : { - inputTokens: { - total: 0, - noCache: undefined, - cacheRead: undefined, - cacheWrite: undefined, - }, - outputTokens: { - total: 0, - text: undefined, - reasoning: undefined, - }, - }; - - const reasoningDetails = choice.message.reasoning_details ?? []; - - const reasoning: Array = - reasoningDetails.length > 0 - ? (reasoningDetails - .map((detail) => { - switch (detail.type) { - case ReasoningDetailType.Text: { - if (detail.text) { - return { - type: "reasoning" as const, - text: detail.text, - providerMetadata: { - hyperbolic: { - reasoning_details: [detail], - }, - }, - }; - } - break; - } - case ReasoningDetailType.Summary: { - if (detail.summary) { - return { - type: "reasoning" as const, - text: detail.summary, - providerMetadata: { - hyperbolic: { - reasoning_details: [detail], - }, - }, - }; - } - break; - } - case ReasoningDetailType.Encrypted: { - // For encrypted reasoning, we include a redacted placeholder - if (detail.data) { - return { - type: "reasoning" as const, - text: "[REDACTED]", - providerMetadata: { - hyperbolic: { - reasoning_details: [detail], - }, - }, - }; - } - break; - } - default: { - detail satisfies never; - } - } - return null; - }) - .filter((p) => p !== null) as Array) - : choice.message.reasoning - ? [ - { - type: "reasoning" as const, - text: choice.message.reasoning, - }, - ] - : []; - - const content: Array = []; - - // Add reasoning content first - content.push(...reasoning); - - if (choice.message.content) { - content.push({ - type: "text" as const, - text: choice.message.content, - }); - } - - if (choice.message.tool_calls) { - for (const toolCall of choice.message.tool_calls) { - content.push({ - type: "tool-call" as const, - toolCallId: toolCall.id ?? generateId(), - toolName: toolCall.function.name, - input: toolCall.function.arguments, - providerMetadata: { - hyperbolic: { - reasoning_details: reasoningDetails, - }, - }, - }); - } - } - - if (choice.message.images) { - for (const image of choice.message.images) { - content.push({ - type: "file" as const, - mediaType: getMediaType(image.image_url.url, "image/jpeg"), - data: getBase64FromDataUrl(image.image_url.url), - }); - } - } - - if (choice.message.annotations) { - for (const annotation of choice.message.annotations) { - if (annotation.type === "url_citation") { - content.push({ - type: "source" as const, - sourceType: "url" as const, - id: annotation.url_citation.url, - url: annotation.url_citation.url, - title: annotation.url_citation.title, - providerMetadata: { - hyperbolic: { - content: annotation.url_citation.content || "", - }, - }, - }); - } - } - } - - // Extract file annotations to expose in providerMetadata - const fileAnnotations = choice.message.annotations?.filter( - ( - a, - ): a is { - type: "file"; - file: { - hash: string; - name: string; - content?: Array<{ type: string; text?: string }>; - }; - } => a.type === "file", - ); - - // Fix for Gemini 3 thoughtSignature: when there are tool calls with encrypted - // reasoning (thoughtSignature), the model returns 'stop' but expects continuation. - // Override to 'tool-calls' so the SDK knows to continue the conversation. - const hasToolCalls = choice.message.tool_calls && choice.message.tool_calls.length > 0; - const hasEncryptedReasoning = reasoningDetails.some( - (d) => d.type === ReasoningDetailType.Encrypted && d.data, - ); - const shouldOverrideFinishReason = - hasToolCalls && hasEncryptedReasoning && choice.finish_reason === "stop"; - - const effectiveFinishReason = shouldOverrideFinishReason - ? createFinishReason("tool-calls", choice.finish_reason ?? undefined) - : mapHyperbolicFinishReason(choice.finish_reason); - - return { - content, - finishReason: effectiveFinishReason, - usage: usageInfo, - warnings: [], - providerMetadata: { - hyperbolic: HyperbolicProviderMetadataSchema.parse({ - provider: response.provider ?? "", - reasoning_details: choice.message.reasoning_details ?? [], - annotations: fileAnnotations && fileAnnotations.length > 0 ? fileAnnotations : undefined, - usage: { - promptTokens: usageInfo.inputTokens.total ?? 0, - completionTokens: usageInfo.outputTokens.total ?? 0, - totalTokens: (usageInfo.inputTokens.total ?? 0) + (usageInfo.outputTokens.total ?? 0), - cost: response.usage?.cost, - ...(response.usage?.prompt_tokens_details?.cached_tokens != null - ? { - promptTokensDetails: { - cachedTokens: response.usage.prompt_tokens_details.cached_tokens, - }, - } - : {}), - ...(response.usage?.completion_tokens_details?.reasoning_tokens != null - ? { - completionTokensDetails: { - reasoningTokens: response.usage.completion_tokens_details.reasoning_tokens, - }, - } - : {}), - ...(response.usage?.cost_details?.upstream_inference_cost != null - ? { - costDetails: { - upstreamInferenceCost: response.usage.cost_details.upstream_inference_cost, - }, - } - : {}), - }, - }), - }, - request: { body: args }, - response: { - id: response.id, - modelId: response.model, - headers: responseHeaders, - }, - }; - } - - async doStream(options: LanguageModelV3CallOptions): Promise<{ - stream: ReadableStream; - warnings: Array; - request?: { body?: unknown }; - response?: LanguageModelV3ResponseMetadata & { - headers?: SharedV3Headers; - body?: unknown; - }; - }> { - const providerOptions = options.providerOptions || {}; - const hyperbolicOptions = providerOptions.hyperbolic || {}; - - const args = { - ...this.getArgs(options), - ...hyperbolicOptions, - }; - - const { value: response, responseHeaders } = await postJsonToApi({ - url: this.config.url({ - path: "/chat/completions", - modelId: this.modelId, - }), - headers: combineHeaders(this.config.headers(), options.headers), - body: { - ...args, - stream: true, - - // only include stream_options when in strict compatibility mode: - stream_options: - this.config.compatibility === "strict" - ? { - include_usage: true, - // If user has requested usage accounting, make sure we get it in the stream - ...(this.settings.usage?.include ? { include_usage: true } : {}), - } - : undefined, - }, - failedResponseHandler: hyperbolicFailedResponseHandler, - successfulResponseHandler: createEventSourceResponseHandler( - HyperbolicStreamChatCompletionChunkSchema, - ), - abortSignal: options.abortSignal, - fetch: this.config.fetch, - }); - - const toolCalls: Array<{ - id: string; - type: "function"; - function: { - name: string; - arguments: string; - }; - inputStarted: boolean; - sent: boolean; - }> = []; - - let finishReason: LanguageModelV3FinishReason = createFinishReason("other"); - const usage: LanguageModelV3Usage = { - inputTokens: { - total: undefined, - noCache: undefined, - cacheRead: undefined, - cacheWrite: undefined, - }, - outputTokens: { - total: undefined, - text: undefined, - reasoning: undefined, - }, - }; - - // Track provider-specific usage information - const hyperbolicUsage: Partial = {}; - - // Track reasoning details to preserve for multi-turn conversations - const accumulatedReasoningDetails: ReasoningDetailUnion[] = []; - - // Track file annotations to expose in providerMetadata - const accumulatedFileAnnotations: FileAnnotation[] = []; - - let textStarted = false; - let reasoningStarted = false; - let textId: string | undefined; - let reasoningId: string | undefined; - let hyperbolicResponseId: string | undefined; - let provider: string | undefined; - - return { - stream: response.pipeThrough( - new TransformStream< - ParseResult>, - LanguageModelV3StreamPart - >({ - transform(chunk, controller) { - // handle failed chunk parsing / validation: - if (!chunk.success) { - finishReason = createFinishReason("error"); - controller.enqueue({ type: "error", error: chunk.error }); - return; - } - - const value = chunk.value; - - // handle error chunks: - if ("error" in value) { - finishReason = createFinishReason("error"); - controller.enqueue({ type: "error", error: value.error }); - return; - } - - if (value.provider) { - provider = value.provider; - } - - if (value.id) { - hyperbolicResponseId = value.id; - controller.enqueue({ - type: "response-metadata", - id: value.id, - }); - } - - if (value.model) { - controller.enqueue({ - type: "response-metadata", - modelId: value.model, - }); - } - - if (value.usage != null) { - usage.inputTokens.total = value.usage.prompt_tokens; - usage.outputTokens.total = value.usage.completion_tokens; - - // Collect Hyperbolic specific usage information - hyperbolicUsage.promptTokens = value.usage.prompt_tokens; - - if (value.usage.prompt_tokens_details) { - const cachedInputTokens = value.usage.prompt_tokens_details.cached_tokens ?? 0; - - usage.inputTokens.cacheRead = cachedInputTokens; - hyperbolicUsage.promptTokensDetails = { - cachedTokens: cachedInputTokens, - }; - } - - hyperbolicUsage.completionTokens = value.usage.completion_tokens; - if (value.usage.completion_tokens_details) { - const reasoningTokens = value.usage.completion_tokens_details.reasoning_tokens ?? 0; - - usage.outputTokens.reasoning = reasoningTokens; - hyperbolicUsage.completionTokensDetails = { - reasoningTokens, - }; - } - - hyperbolicUsage.cost = value.usage.cost; - hyperbolicUsage.totalTokens = value.usage.total_tokens; - const upstreamInferenceCost = value.usage.cost_details?.upstream_inference_cost; - if (upstreamInferenceCost != null) { - hyperbolicUsage.costDetails = { - upstreamInferenceCost, - }; - } - } - - const choice = value.choices[0]; - - if (choice?.finish_reason != null) { - finishReason = mapHyperbolicFinishReason(choice.finish_reason); - } - - if (choice?.delta == null) { - return; - } - - const delta = choice.delta; - - const emitReasoningChunk = ( - chunkText: string, - providerMetadata?: SharedV3ProviderMetadata, - ) => { - if (!reasoningStarted) { - reasoningId = hyperbolicResponseId || generateId(); - controller.enqueue({ - providerMetadata, - type: "reasoning-start", - id: reasoningId, - }); - reasoningStarted = true; - } - controller.enqueue({ - providerMetadata, - type: "reasoning-delta", - delta: chunkText, - id: reasoningId || generateId(), - }); - }; - - if (delta.reasoning_details && delta.reasoning_details.length > 0) { - // Accumulate reasoning_details to preserve for multi-turn conversations - // Merge consecutive reasoning.text items into a single entry - for (const detail of delta.reasoning_details) { - if (detail.type === ReasoningDetailType.Text) { - const lastDetail = - accumulatedReasoningDetails[accumulatedReasoningDetails.length - 1]; - if (lastDetail?.type === ReasoningDetailType.Text) { - // Merge with the previous text detail - lastDetail.text = (lastDetail.text || "") + (detail.text || ""); - - lastDetail.signature = lastDetail.signature || detail.signature; - - lastDetail.format = lastDetail.format || detail.format; - } else { - // Start a new text detail - accumulatedReasoningDetails.push({ ...detail }); - } - } else { - // Non-text details (encrypted, summary) are pushed as-is - accumulatedReasoningDetails.push(detail); - } - } - - // Emit reasoning_details in providerMetadata for each delta chunk - // so users can accumulate them on their end before sending back - const reasoningMetadata: SharedV3ProviderMetadata = { - hyperbolic: { - reasoning_details: delta.reasoning_details, - }, - }; - - for (const detail of delta.reasoning_details) { - switch (detail.type) { - case ReasoningDetailType.Text: { - if (detail.text) { - emitReasoningChunk(detail.text, reasoningMetadata); - } - break; - } - case ReasoningDetailType.Encrypted: { - if (detail.data) { - emitReasoningChunk("[REDACTED]", reasoningMetadata); - } - break; - } - case ReasoningDetailType.Summary: { - if (detail.summary) { - emitReasoningChunk(detail.summary, reasoningMetadata); - } - break; - } - default: { - detail satisfies never; - break; - } - } - } - } else if (delta.reasoning) { - emitReasoningChunk(delta.reasoning); - } - - if (delta.content) { - // If reasoning was previously active and now we're starting text content, - // we should end the reasoning first to maintain proper order - if (reasoningStarted && !textStarted) { - controller.enqueue({ - type: "reasoning-end", - id: reasoningId || generateId(), - }); - reasoningStarted = false; // Mark as ended so we don't end it again in flush - } - - if (!textStarted) { - textId = hyperbolicResponseId || generateId(); - controller.enqueue({ - type: "text-start", - id: textId, - }); - textStarted = true; - } - controller.enqueue({ - type: "text-delta", - delta: delta.content, - id: textId || generateId(), - }); - } - - if (delta.annotations) { - for (const annotation of delta.annotations) { - if (annotation.type === "url_citation") { - controller.enqueue({ - type: "source", - sourceType: "url" as const, - id: annotation.url_citation.url, - url: annotation.url_citation.url, - title: annotation.url_citation.title, - providerMetadata: { - hyperbolic: { - content: annotation.url_citation.content || "", - }, - }, - }); - } else if (annotation.type === "file") { - // Accumulate file annotations to expose in providerMetadata - // Type guard to validate structure matches expected shape - const file = (annotation as { file?: unknown }).file; - if (file && typeof file === "object" && "hash" in file && "name" in file) { - accumulatedFileAnnotations.push(annotation as FileAnnotation); - } - } - } - } - - if (delta.tool_calls != null) { - for (const toolCallDelta of delta.tool_calls) { - const index = toolCallDelta.index ?? toolCalls.length - 1; - - // Tool call start. Hyperbolic returns all information except the arguments in the first chunk. - if (toolCalls[index] == null) { - if (toolCallDelta.type !== "function") { - throw new InvalidResponseDataError({ - data: toolCallDelta, - message: `Expected 'function' type.`, - }); - } - - if (toolCallDelta.id == null) { - throw new InvalidResponseDataError({ - data: toolCallDelta, - message: `Expected 'id' to be a string.`, - }); - } - - if (toolCallDelta.function?.name == null) { - throw new InvalidResponseDataError({ - data: toolCallDelta, - message: `Expected 'function.name' to be a string.`, - }); - } - - toolCalls[index] = { - id: toolCallDelta.id, - type: "function", - function: { - name: toolCallDelta.function.name, - arguments: toolCallDelta.function.arguments ?? "", - }, - inputStarted: false, - sent: false, - }; - - const toolCall = toolCalls[index]; - - if (toolCall == null) { - throw new InvalidResponseDataError({ - data: { index, toolCallsLength: toolCalls.length }, - message: `Tool call at index ${index} is missing after creation.`, - }); - } - - // check if tool call is complete (some providers send the full tool call in one chunk) - if ( - toolCall.function?.name != null && - toolCall.function?.arguments != null && - isParsableJson(toolCall.function.arguments) - ) { - toolCall.inputStarted = true; - - controller.enqueue({ - type: "tool-input-start", - id: toolCall.id, - toolName: toolCall.function.name, - }); - - // send delta - controller.enqueue({ - type: "tool-input-delta", - id: toolCall.id, - delta: toolCall.function.arguments, - }); - - controller.enqueue({ - type: "tool-input-end", - id: toolCall.id, - }); - - // send tool call - controller.enqueue({ - type: "tool-call", - toolCallId: toolCall.id, - toolName: toolCall.function.name, - input: toolCall.function.arguments, - providerMetadata: { - hyperbolic: { - reasoning_details: accumulatedReasoningDetails, - }, - }, - }); - - toolCall.sent = true; - } - - continue; - } - - // existing tool call, merge - const toolCall = toolCalls[index]; - - if (toolCall == null) { - throw new InvalidResponseDataError({ - data: { - index, - toolCallsLength: toolCalls.length, - toolCallDelta, - }, - message: `Tool call at index ${index} is missing during merge.`, - }); - } - - if (!toolCall.inputStarted) { - toolCall.inputStarted = true; - controller.enqueue({ - type: "tool-input-start", - id: toolCall.id, - toolName: toolCall.function.name, - }); - } - - if (toolCallDelta.function?.arguments != null) { - toolCall.function.arguments += toolCallDelta.function?.arguments ?? ""; - } - - // send delta - controller.enqueue({ - type: "tool-input-delta", - id: toolCall.id, - delta: toolCallDelta.function.arguments ?? "", - }); - - // check if tool call is complete - if ( - toolCall.function?.name != null && - toolCall.function?.arguments != null && - isParsableJson(toolCall.function.arguments) - ) { - controller.enqueue({ - type: "tool-call", - toolCallId: toolCall.id ?? generateId(), - toolName: toolCall.function.name, - input: toolCall.function.arguments, - providerMetadata: { - hyperbolic: { - reasoning_details: accumulatedReasoningDetails, - }, - }, - }); - - toolCall.sent = true; - } - } - } - - if (delta.images != null) { - for (const image of delta.images) { - controller.enqueue({ - type: "file", - mediaType: getMediaType(image.image_url.url, "image/jpeg"), - data: getBase64FromDataUrl(image.image_url.url), - }); - } - } - }, - - flush(controller) { - // Fix for Gemini 3 thoughtSignature: when there are tool calls with encrypted - // reasoning (thoughtSignature), the model returns 'stop' but expects continuation. - // Override to 'tool-calls' so the SDK knows to continue the conversation. - const hasToolCalls = toolCalls.length > 0; - const hasEncryptedReasoning = accumulatedReasoningDetails.some( - (d) => d.type === ReasoningDetailType.Encrypted && d.data, - ); - if (hasToolCalls && hasEncryptedReasoning && finishReason.unified === "stop") { - finishReason = createFinishReason("tool-calls", finishReason.raw); - } - - // Forward any unsent tool calls if finish reason is 'tool-calls' - if (finishReason.unified === "tool-calls") { - for (const toolCall of toolCalls) { - if (toolCall && !toolCall.sent) { - controller.enqueue({ - type: "tool-call", - toolCallId: toolCall.id ?? generateId(), - toolName: toolCall.function.name, - // Coerce invalid arguments to an empty JSON object - input: isParsableJson(toolCall.function.arguments) - ? toolCall.function.arguments - : "{}", - providerMetadata: { - hyperbolic: { - reasoning_details: accumulatedReasoningDetails, - }, - }, - }); - toolCall.sent = true; - } - } - } - - // End reasoning first if it was started, to maintain proper order - if (reasoningStarted) { - controller.enqueue({ - type: "reasoning-end", - id: reasoningId || generateId(), - }); - } - if (textStarted) { - controller.enqueue({ - type: "text-end", - id: textId || generateId(), - }); - } - - const hyperbolicMetadata: { - usage: Partial; - provider?: string; - reasoning_details?: ReasoningDetailUnion[]; - annotations?: FileAnnotation[]; - } = { - usage: hyperbolicUsage, - }; - - // Only include provider if it's actually set - if (provider !== undefined) { - hyperbolicMetadata.provider = provider; - } - - // Include accumulated reasoning_details if any were received - if (accumulatedReasoningDetails.length > 0) { - hyperbolicMetadata.reasoning_details = accumulatedReasoningDetails; - } - - // Include accumulated file annotations if any were received - if (accumulatedFileAnnotations.length > 0) { - hyperbolicMetadata.annotations = accumulatedFileAnnotations; - } - - controller.enqueue({ - type: "finish", - finishReason, - usage, - providerMetadata: { - hyperbolic: hyperbolicMetadata, - }, - }); - }, - }), - ), - warnings: [], - request: { body: args }, - response: { headers: responseHeaders }, - }; - } -} diff --git a/packages/ai-sdk-provider/src/chat/is-url.ts b/packages/ai-sdk-provider/src/chat/is-url.ts deleted file mode 100644 index 94a3691..0000000 --- a/packages/ai-sdk-provider/src/chat/is-url.ts +++ /dev/null @@ -1,19 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -export function isUrl({ - url, - protocols, -}: { - url: string | URL; - protocols: Set<`${string}:`>; -}): boolean { - try { - const urlObj = new URL(url); - // Cast to the literal string due to Set inferred input type - return protocols.has(urlObj.protocol as `${string}:`); - } catch (_) { - return false; - } -} diff --git a/packages/ai-sdk-provider/src/chat/large-pdf-response.test.ts b/packages/ai-sdk-provider/src/chat/large-pdf-response.test.ts deleted file mode 100644 index 7cbbdda..0000000 --- a/packages/ai-sdk-provider/src/chat/large-pdf-response.test.ts +++ /dev/null @@ -1,105 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { LanguageModelV3Prompt } from "@ai-sdk/provider"; -import { describe, expect, it } from "vitest"; - -import { createHyperbolic } from "../provider"; -import { createTestServer } from "../test-utils/test-server"; - -const TEST_PROMPT: LanguageModelV3Prompt = [ - { role: "user", content: [{ type: "text", text: "Hello" }] }, -]; - -const provider = createHyperbolic({ - baseURL: "https://api.hyperbolic.xyz/v1", - apiKey: "test-api-key", -}); - -const server = createTestServer({ - "https://api.hyperbolic.xyz/v1/chat/completions": {}, -}); - -describe("Large PDF Response Handling", () => { - describe("doGenerate", () => { - it("should handle HTTP 200 responses with error payloads (500 internal errors)", async () => { - // This is the actual response Hyperbolic returns for large PDF failures - // HTTP 200 status but contains error object instead of choices - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { - type: "json-value", - body: { - error: { - message: "Internal Server Error", - code: 500, - }, - user_id: "org_abc123", - }, - }; - - const model = provider("anthropic/claude-3.5-sonnet"); - - await expect( - model.doGenerate({ - prompt: TEST_PROMPT, - }), - ).rejects.toThrow("Internal Server Error"); - }); - - it("should parse successful large PDF responses with file annotations", async () => { - // Successful response with file annotations from FileParserPlugin - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { - type: "json-value", - body: { - id: "gen-123", - model: "anthropic/claude-3.5-sonnet", - provider: "Anthropic", - choices: [ - { - index: 0, - message: { - role: "assistant", - content: "LARGE-M9N3T", - annotations: [ - { - type: "file_annotation", - file_annotation: { - file_id: "file_abc123", - quote: "extracted text", - }, - }, - ], - }, - finish_reason: "stop", - }, - ], - usage: { - prompt_tokens: 100, - completion_tokens: 20, - total_tokens: 120, - }, - }, - }; - - const model = provider("anthropic/claude-3.5-sonnet", { - usage: { include: true }, - }); - - const result = await model.doGenerate({ - prompt: TEST_PROMPT, - }); - - expect(result.content).toMatchObject([ - { - type: "text", - text: "LARGE-M9N3T", - }, - ]); - expect((result.usage.inputTokens?.total ?? 0) + (result.usage.outputTokens?.total ?? 0)).toBe( - 120, - ); - }); - }); -}); diff --git a/packages/ai-sdk-provider/src/chat/payload-comparison.test.ts b/packages/ai-sdk-provider/src/chat/payload-comparison.test.ts deleted file mode 100644 index 9016ee4..0000000 --- a/packages/ai-sdk-provider/src/chat/payload-comparison.test.ts +++ /dev/null @@ -1,160 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { LanguageModelV3Prompt } from "@ai-sdk/provider"; -import { describe, expect, it, vi } from "vitest"; - -import type { HyperbolicChatCompletionsInput } from "../types/hyperbolic-chat-completions-input"; -import type { HyperbolicChatSettings } from "../types/hyperbolic-chat-settings"; -import { createHyperbolic } from "../provider"; - -describe("Payload Comparison - Large PDF", () => { - it("should send payload matching fetch baseline for large PDFs", async () => { - interface CapturedRequestBody { - model: string; - messages: HyperbolicChatCompletionsInput; - plugins?: HyperbolicChatSettings["plugins"]; - usage?: { include: boolean }; - } - - // Capture what the provider actually sends - let capturedRequestBody: CapturedRequestBody | null = null; - - const mockFetch = vi.fn(async (_url: string, init?: RequestInit) => { - // Capture the request body - if (init?.body) { - capturedRequestBody = JSON.parse(init.body as string) as CapturedRequestBody; - } - - // Return a minimal success response - return new Response( - JSON.stringify({ - id: "test-123", - model: "anthropic/claude-3.5-sonnet", - choices: [ - { - message: { - role: "assistant", - content: "Test response", - }, - finish_reason: "stop", - }, - ], - usage: { - prompt_tokens: 10, - completion_tokens: 5, - total_tokens: 15, - }, - }), - { - status: 200, - headers: { "Content-Type": "application/json" }, - }, - ); - }) as typeof fetch; - - const provider = createHyperbolic({ - apiKey: "test-key", - fetch: mockFetch, - }); - - // Simulate a large PDF (use a small base64 for testing, but structure matters) - const smallPdfBase64 = "JVBERi0xLjQKJeLjz9MKM...(truncated)"; - const dataUrl = `data:application/pdf;base64,${smallPdfBase64}`; - - const prompt: LanguageModelV3Prompt = [ - { - role: "user", - content: [ - { - type: "text", - text: "Extract the verification code. Reply with ONLY the code.", - }, - { - type: "file", - data: dataUrl, - mediaType: "application/pdf", - }, - ], - }, - ]; - - const model = provider("anthropic/claude-3.5-sonnet", { - plugins: [{ id: "file-parser", pdf: { engine: "mistral-ocr" } }], - usage: { include: true }, - }); - - await model.doGenerate({ prompt }); - - // Now assert the payload structure matches fetch baseline - expect(capturedRequestBody).toBeDefined(); - expect(capturedRequestBody).not.toBeNull(); - - // Expected structure based on fetch example: - // { - // model: 'anthropic/claude-3.5-sonnet', - // messages: [{ - // role: 'user', - // content: [ - // { type: 'file', file: { filename: '...', file_data: 'data:...' } }, - // { type: 'text', text: '...' } - // ] - // }], - // plugins: [{ id: 'file-parser', pdf: { engine: 'mistral-ocr' } }], - // usage: { include: true } - // } - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - const messages = capturedRequestBody!.messages; - expect(messages).toHaveLength(1); - expect(messages[0]?.role).toBe("user"); - expect(messages[0]?.content).toBeInstanceOf(Array); - - const content = messages[0]?.content; - if (!Array.isArray(content)) { - throw new Error("Content should be an array"); - } - - // Find the file part - const filePart = content.find((part) => part.type === "file"); - expect(filePart).toBeDefined(); - - // CRITICAL ASSERTION: The file part should have a nested 'file' object with 'file_data' - // This is what the fetch example sends and what Hyperbolic expects - expect(filePart).toMatchObject({ - type: "file", - file: { - file_data: expect.stringContaining("data:application/pdf;base64,"), - }, - }); - - // Find the text part - const textPart = content.find((part) => part.type === "text"); - expect(textPart).toMatchObject({ - type: "text", - text: "Extract the verification code. Reply with ONLY the code.", - }); - - // Check for plugins array - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - expect(capturedRequestBody!.plugins).toBeDefined(); - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - expect(capturedRequestBody!.plugins).toBeInstanceOf(Array); - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - const { plugins } = capturedRequestBody!; - if (!plugins) { - throw new Error("Plugins should be defined"); - } - - const fileParserPlugin = plugins.find((p) => p.id === "file-parser"); - expect(fileParserPlugin).toBeDefined(); - expect(fileParserPlugin).toMatchObject({ - id: "file-parser", - pdf: { - engine: expect.stringMatching(/^(mistral-ocr|pdf-text|native)$/), - }, - }); - }); -}); diff --git a/packages/ai-sdk-provider/src/chat/schemas.ts b/packages/ai-sdk-provider/src/chat/schemas.ts deleted file mode 100644 index 9a032d1..0000000 --- a/packages/ai-sdk-provider/src/chat/schemas.ts +++ /dev/null @@ -1,290 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import { z } from "zod/v4"; - -import { HyperbolicErrorResponseSchema } from "../schemas/error-response"; -import { ImageResponseArraySchema } from "../schemas/image"; -import { ReasoningDetailArraySchema } from "../schemas/reasoning-details"; - -const HyperbolicChatCompletionBaseResponseSchema = z - .object({ - id: z.string().optional(), - model: z.string().optional(), - provider: z.string().optional(), - usage: z - .object({ - prompt_tokens: z.number(), - prompt_tokens_details: z - .object({ - cached_tokens: z.number(), - }) - .passthrough() - .nullish(), - completion_tokens: z.number(), - completion_tokens_details: z - .object({ - reasoning_tokens: z.number(), - }) - .passthrough() - .nullish(), - total_tokens: z.number(), - cost: z.number().optional(), - cost_details: z - .object({ - upstream_inference_cost: z.number().nullish(), - }) - .passthrough() - .nullish(), - }) - .passthrough() - .nullish(), - }) - .passthrough(); -// limited version of the schema, focussed on what is needed for the implementation -// this approach limits breakages when the API changes and increases efficiency -export const HyperbolicNonStreamChatCompletionResponseSchema = z.union([ - // Success response with choices - HyperbolicChatCompletionBaseResponseSchema.extend({ - choices: z.array( - z - .object({ - message: z - .object({ - role: z.literal("assistant"), - content: z.string().nullable().optional(), - reasoning: z.string().nullable().optional(), - reasoning_details: ReasoningDetailArraySchema.nullish(), - images: ImageResponseArraySchema.nullish(), - - tool_calls: z - .array( - z - .object({ - id: z.string().optional().nullable(), - type: z.literal("function"), - function: z - .object({ - name: z.string(), - arguments: z.string(), - }) - .passthrough(), - }) - .passthrough(), - ) - .optional(), - - annotations: z - .array( - z.union([ - // URL citation from web search - z - .object({ - type: z.literal("url_citation"), - url_citation: z - .object({ - end_index: z.number(), - start_index: z.number(), - title: z.string(), - url: z.string(), - content: z.string().optional(), - }) - .passthrough(), - }) - .passthrough(), - // File annotation from FileParserPlugin (old format) - z - .object({ - type: z.literal("file_annotation"), - file_annotation: z - .object({ - file_id: z.string(), - quote: z.string().optional(), - }) - .passthrough(), - }) - .passthrough(), - // File annotation from FileParserPlugin (new format) - z - .object({ - type: z.literal("file"), - file: z - .object({ - hash: z.string(), - name: z.string(), - content: z - .array( - z - .object({ - type: z.string(), - text: z.string().optional(), - }) - .passthrough(), - ) - .optional(), - }) - .passthrough(), - }) - .passthrough(), - ]), - ) - .nullish(), - }) - .passthrough(), - index: z.number().nullish(), - logprobs: z - .object({ - content: z - .array( - z - .object({ - token: z.string(), - logprob: z.number(), - top_logprobs: z.array( - z - .object({ - token: z.string(), - logprob: z.number(), - }) - .passthrough(), - ), - }) - .passthrough(), - ) - .nullable(), - }) - .passthrough() - .nullable() - .optional(), - finish_reason: z.string().optional().nullable(), - }) - .passthrough(), - ), - }), - // Error response (HTTP 200 with error payload) - HyperbolicErrorResponseSchema.extend({ - user_id: z.string().optional(), - }), -]); -// limited version of the schema, focussed on what is needed for the implementation -// this approach limits breakages when the API changes and increases efficiency -export const HyperbolicStreamChatCompletionChunkSchema = z.union([ - HyperbolicChatCompletionBaseResponseSchema.extend({ - choices: z.array( - z - .object({ - delta: z - .object({ - role: z.enum(["assistant"]).optional(), - content: z.string().nullish(), - reasoning: z.string().nullish().optional(), - reasoning_details: ReasoningDetailArraySchema.nullish(), - images: ImageResponseArraySchema.nullish(), - tool_calls: z - .array( - z - .object({ - index: z.number().nullish(), - id: z.string().nullish(), - type: z.literal("function").optional(), - function: z - .object({ - name: z.string().nullish(), - arguments: z.string().nullish(), - }) - .passthrough(), - }) - .passthrough(), - ) - .nullish(), - - annotations: z - .array( - z.union([ - // URL citation from web search - z - .object({ - type: z.literal("url_citation"), - url_citation: z - .object({ - end_index: z.number(), - start_index: z.number(), - title: z.string(), - url: z.string(), - content: z.string().optional(), - }) - .passthrough(), - }) - .passthrough(), - // File annotation from FileParserPlugin (old format) - z - .object({ - type: z.literal("file_annotation"), - file_annotation: z - .object({ - file_id: z.string(), - quote: z.string().optional(), - }) - .passthrough(), - }) - .passthrough(), - // File annotation from FileParserPlugin (new format) - z - .object({ - type: z.literal("file"), - file: z - .object({ - hash: z.string(), - name: z.string(), - content: z - .array( - z - .object({ - type: z.string(), - text: z.string().optional(), - }) - .passthrough(), - ) - .optional(), - }) - .passthrough(), - }) - .passthrough(), - ]), - ) - .nullish(), - }) - .passthrough() - .nullish(), - logprobs: z - .object({ - content: z - .array( - z - .object({ - token: z.string(), - logprob: z.number(), - top_logprobs: z.array( - z - .object({ - token: z.string(), - logprob: z.number(), - }) - .passthrough(), - ), - }) - .passthrough(), - ) - .nullable(), - }) - .passthrough() - .nullish(), - finish_reason: z.string().nullable().optional(), - index: z.number().nullish(), - }) - .passthrough(), - ), - }), - HyperbolicErrorResponseSchema, -]); diff --git a/packages/ai-sdk-provider/src/completion/convert-to-hyperbolic-completion-prompt.ts b/packages/ai-sdk-provider/src/completion/convert-to-hyperbolic-completion-prompt.ts deleted file mode 100644 index 5c0d69e..0000000 --- a/packages/ai-sdk-provider/src/completion/convert-to-hyperbolic-completion-prompt.ts +++ /dev/null @@ -1,155 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { - LanguageModelV3FilePart, - LanguageModelV3Prompt, - LanguageModelV3ReasoningPart, - LanguageModelV3TextPart, - LanguageModelV3ToolCallPart, - LanguageModelV3ToolResultPart, -} from "@ai-sdk/provider"; -import { InvalidPromptError, UnsupportedFunctionalityError } from "@ai-sdk/provider"; - -export function convertToHyperbolicCompletionPrompt({ - prompt, - inputFormat, - user = "user", - assistant = "assistant", -}: { - prompt: LanguageModelV3Prompt; - inputFormat: "prompt" | "messages"; - user?: string; - assistant?: string; -}): { - prompt: string; -} { - // When the user supplied a prompt input, we don't transform it: - if ( - inputFormat === "prompt" && - prompt.length === 1 && - prompt[0] && - prompt[0].role === "user" && - prompt[0].content.length === 1 && - prompt[0].content[0] && - prompt[0].content[0].type === "text" - ) { - return { prompt: prompt[0].content[0].text }; - } - - // otherwise transform to a chat message format: - let text = ""; - - // if first message is a system message, add it to the text: - if (prompt[0] && prompt[0].role === "system") { - text += `${prompt[0].content}\n\n`; - prompt = prompt.slice(1); - } - - for (const { role, content } of prompt) { - switch (role) { - case "system": { - throw new InvalidPromptError({ - message: `Unexpected system message in prompt: ${content}`, - prompt, - }); - } - - case "user": { - const userMessage = content - .map((part: LanguageModelV3TextPart | LanguageModelV3FilePart) => { - switch (part.type) { - case "text": { - console.log("return 2", part.text); - return part.text; - } - - case "file": { - throw new UnsupportedFunctionalityError({ - functionality: "file attachments", - }); - } - default: { - console.log("return 3"); - return ""; - } - } - }) - .join(""); - - text += `${user}:\n${userMessage}\n\n`; - break; - } - - case "assistant": { - const assistantMessage = content - .map( - ( - part: - | LanguageModelV3TextPart - | LanguageModelV3FilePart - | LanguageModelV3ReasoningPart - | LanguageModelV3ToolCallPart - | LanguageModelV3ToolResultPart, - ) => { - switch (part.type) { - case "text": { - console.log("return 4"); - return part.text; - } - case "tool-call": { - throw new UnsupportedFunctionalityError({ - functionality: "tool-call messages", - }); - } - case "tool-result": { - throw new UnsupportedFunctionalityError({ - functionality: "tool-result messages", - }); - } - case "reasoning": { - throw new UnsupportedFunctionalityError({ - functionality: "reasoning messages", - }); - } - - case "file": { - throw new UnsupportedFunctionalityError({ - functionality: "file attachments", - }); - } - - default: { - console.log("return 5"); - return ""; - } - } - }, - ) - .join(""); - - text += `${assistant}:\n${assistantMessage}\n\n`; - break; - } - - case "tool": { - throw new UnsupportedFunctionalityError({ - functionality: "tool messages", - }); - } - - default: { - break; - } - } - } - - // Assistant message prefix: - text += `${assistant}:\n`; - console.log("return 6", { text }); - - return { - prompt: text, - }; -} diff --git a/packages/ai-sdk-provider/src/completion/index.test.ts b/packages/ai-sdk-provider/src/completion/index.test.ts deleted file mode 100644 index 679b9c4..0000000 --- a/packages/ai-sdk-provider/src/completion/index.test.ts +++ /dev/null @@ -1,581 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { LanguageModelV3Prompt, LanguageModelV3StreamPart } from "@ai-sdk/provider"; -import { vi } from "vitest"; - -import { createHyperbolic } from "../provider"; -import { convertReadableStreamToArray, createTestServer } from "../test-utils/test-server"; - -vi.mock("../version", () => ({ - VERSION: "0.0.0-test", -})); - -const TEST_PROMPT: LanguageModelV3Prompt = [ - { role: "user", content: [{ type: "text", text: "Hello" }] }, -]; - -const TEST_LOGPROBS = { - tokens: [" ever", " after", ".\n\n", "The", " end", "."], - token_logprobs: [-0.0664508, -0.014520033, -1.3820221, -0.7890417, -0.5323165, -0.10247037], - top_logprobs: [ - { - " ever": -0.0664508, - }, - { - " after": -0.014520033, - }, - { - ".\n\n": -1.3820221, - }, - { - The: -0.7890417, - }, - { - " end": -0.5323165, - }, - { - ".": -0.10247037, - }, - ] as Record[], -}; - -const provider = createHyperbolic({ - apiKey: "test-api-key", - compatibility: "strict", -}); - -const model = provider.completion("openai/gpt-3.5-turbo-instruct"); - -describe("doGenerate", () => { - const server = createTestServer({ - "https://api.hyperbolic.xyz/v1/completions": { - response: { type: "json-value", body: {} }, - }, - }); - - function prepareJsonResponse({ - content = "", - usage = { - prompt_tokens: 4, - total_tokens: 34, - completion_tokens: 30, - }, - logprobs = null, - finish_reason = "stop", - }: { - content?: string; - usage?: { - prompt_tokens: number; - total_tokens: number; - completion_tokens: number; - }; - logprobs?: { - tokens: string[]; - token_logprobs: number[]; - top_logprobs: Record[]; - } | null; - finish_reason?: string; - }) { - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.hyperbolic.xyz/v1/completions"]!.response = { - type: "json-value", - body: { - id: "cmpl-96cAM1v77r4jXa4qb2NSmRREV5oWB", - object: "text_completion", - created: 1711363706, - model: "openai/gpt-3.5-turbo-instruct", - choices: [ - { - text: content, - index: 0, - logprobs, - finish_reason, - }, - ], - usage, - }, - }; - } - - it("should extract text response", async () => { - prepareJsonResponse({ content: "Hello, World!" }); - - const { content } = await model.doGenerate({ - prompt: TEST_PROMPT, - }); - - const text = content[0]?.type === "text" ? content[0].text : ""; - - expect(text).toStrictEqual("Hello, World!"); - }); - - it("should extract usage", async () => { - prepareJsonResponse({ - content: "", - usage: { prompt_tokens: 20, total_tokens: 25, completion_tokens: 5 }, - }); - - const { usage } = await model.doGenerate({ - prompt: TEST_PROMPT, - }); - - expect(usage).toStrictEqual({ - inputTokens: { - total: 20, - noCache: undefined, - cacheRead: undefined, - cacheWrite: undefined, - }, - outputTokens: { - total: 5, - text: undefined, - reasoning: undefined, - }, - }); - }); - - it("should extract logprobs", async () => { - prepareJsonResponse({ logprobs: TEST_LOGPROBS }); - - const provider = createHyperbolic({ apiKey: "test-api-key" }); - - await provider.completion("openai/gpt-3.5-turbo", { logprobs: 1 }).doGenerate({ - prompt: TEST_PROMPT, - }); - }); - - it("should extract finish reason", async () => { - prepareJsonResponse({ - content: "", - finish_reason: "stop", - }); - - const { finishReason } = await provider.completion("openai/gpt-3.5-turbo-instruct").doGenerate({ - prompt: TEST_PROMPT, - }); - - expect(finishReason).toStrictEqual({ unified: "stop", raw: "stop" }); - }); - - it("should support unknown finish reason", async () => { - prepareJsonResponse({ - content: "", - finish_reason: "eos", - }); - - const { finishReason } = await provider.completion("openai/gpt-3.5-turbo-instruct").doGenerate({ - prompt: TEST_PROMPT, - }); - - expect(finishReason).toStrictEqual({ unified: "other", raw: "eos" }); - }); - - it("should pass the model and the prompt", async () => { - prepareJsonResponse({ content: "" }); - - await model.doGenerate({ - prompt: TEST_PROMPT, - }); - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ - model: "openai/gpt-3.5-turbo-instruct", - prompt: "Hello", - }); - }); - - it("should pass the models array when provided", async () => { - prepareJsonResponse({ content: "" }); - - const customModel = provider.completion("openai/gpt-3.5-turbo-instruct", { - models: ["openai/gpt-4", "anthropic/claude-2"], - }); - - await customModel.doGenerate({ - prompt: TEST_PROMPT, - }); - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ - model: "openai/gpt-3.5-turbo-instruct", - models: ["openai/gpt-4", "anthropic/claude-2"], - prompt: "Hello", - }); - }); - - it("should pass headers", async () => { - prepareJsonResponse({ content: "" }); - - const provider = createHyperbolic({ - apiKey: "test-api-key", - headers: { - "Custom-Provider-Header": "provider-header-value", - }, - }); - - await provider.completion("openai/gpt-3.5-turbo-instruct").doGenerate({ - prompt: TEST_PROMPT, - headers: { - "Custom-Request-Header": "request-header-value", - }, - }); - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - const requestHeaders = server.calls[0]!.requestHeaders; - - expect(requestHeaders).toMatchObject({ - authorization: "Bearer test-api-key", - "content-type": "application/json", - "custom-provider-header": "provider-header-value", - "custom-request-header": "request-header-value", - }); - expect(requestHeaders["user-agent"]).toContain("ai-sdk/hyperbolic/0.0.0-test"); - }); -}); - -describe("doStream", () => { - const server = createTestServer({ - "https://api.hyperbolic.xyz/v1/completions": { - response: { type: "stream-chunks", chunks: [] }, - }, - }); - - function prepareStreamResponse({ - content, - finish_reason = "stop", - usage = { - prompt_tokens: 10, - total_tokens: 372, - completion_tokens: 362, - }, - logprobs = null, - }: { - content: string[]; - usage?: { - prompt_tokens: number; - total_tokens: number; - completion_tokens: number; - prompt_tokens_details?: { - cached_tokens: number; - }; - completion_tokens_details?: { - reasoning_tokens: number; - }; - cost?: number; - cost_details?: { - upstream_inference_cost: number; - }; - }; - logprobs?: { - tokens: string[]; - token_logprobs: number[]; - top_logprobs: Record[]; - } | null; - finish_reason?: string; - }) { - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.hyperbolic.xyz/v1/completions"]!.response = { - type: "stream-chunks", - chunks: [ - ...content.map((text) => { - return `data: {"id":"cmpl-96c64EdfhOw8pjFFgVpLuT8k2MtdT","object":"text_completion","created":1711363440,"choices":[{"text":"${text}","index":0,"logprobs":null,"finish_reason":null}],"model":"openai/gpt-3.5-turbo-instruct"}\n\n`; - }), - `data: {"id":"cmpl-96c3yLQE1TtZCd6n6OILVmzev8M8H","object":"text_completion","created":1711363310,"choices":[{"text":"","index":0,"logprobs":${JSON.stringify( - logprobs, - )},"finish_reason":"${finish_reason}"}],"model":"openai/gpt-3.5-turbo-instruct"}\n\n`, - `data: {"id":"cmpl-96c3yLQE1TtZCd6n6OILVmzev8M8H","object":"text_completion","created":1711363310,"model":"openai/gpt-3.5-turbo-instruct","usage":${JSON.stringify( - usage, - )},"choices":[]}\n\n`, - "data: [DONE]\n\n", - ], - }; - } - - it("should stream text deltas", async () => { - prepareStreamResponse({ - content: ["Hello", ", ", "World!"], - finish_reason: "stop", - usage: { - prompt_tokens: 10, - total_tokens: 372, - completion_tokens: 362, - }, - logprobs: TEST_LOGPROBS, - }); - - const { stream } = await model.doStream({ - prompt: TEST_PROMPT, - }); - - // note: space moved to last chunk bc of trimming - const elements = await convertReadableStreamToArray(stream); - expect(elements).toStrictEqual([ - { type: "text-delta", delta: "Hello", id: expect.any(String) }, - { type: "text-delta", delta: ", ", id: expect.any(String) }, - { type: "text-delta", delta: "World!", id: expect.any(String) }, - { type: "text-delta", delta: "", id: expect.any(String) }, - { - type: "finish", - finishReason: { unified: "stop", raw: "stop" }, - providerMetadata: { - hyperbolic: { - usage: { - promptTokens: 10, - completionTokens: 362, - totalTokens: 372, - cost: undefined, - }, - }, - }, - usage: { - inputTokens: { - total: 10, - noCache: undefined, - cacheRead: undefined, - cacheWrite: undefined, - }, - outputTokens: { - total: 362, - text: undefined, - reasoning: undefined, - }, - }, - }, - ]); - }); - - it("should include upstream inference cost when provided", async () => { - prepareStreamResponse({ - content: ["Hello"], - usage: { - prompt_tokens: 5, - total_tokens: 15, - completion_tokens: 10, - cost_details: { - upstream_inference_cost: 0.0036, - }, - }, - }); - - const { stream } = await model.doStream({ - prompt: TEST_PROMPT, - }); - - const elements = (await convertReadableStreamToArray(stream)) as LanguageModelV3StreamPart[]; - const finishChunk = elements.find( - (element): element is Extract => - element.type === "finish", - ); - const hyperbolicUsage = ( - finishChunk?.providerMetadata?.hyperbolic as { - usage?: { - cost?: number; - costDetails?: { upstreamInferenceCost: number }; - }; - } - )?.usage; - expect(hyperbolicUsage?.costDetails).toStrictEqual({ - upstreamInferenceCost: 0.0036, - }); - }); - - it("should handle both normal cost and upstream inference cost in finish metadata when both are provided", async () => { - prepareStreamResponse({ - content: ["Hello"], - usage: { - prompt_tokens: 5, - total_tokens: 15, - completion_tokens: 10, - cost: 0.0025, - cost_details: { - upstream_inference_cost: 0.0036, - }, - }, - }); - - const { stream } = await model.doStream({ - prompt: TEST_PROMPT, - }); - - const elements = (await convertReadableStreamToArray(stream)) as LanguageModelV3StreamPart[]; - const finishChunk = elements.find( - (element): element is Extract => - element.type === "finish", - ); - const hyperbolicUsage = ( - finishChunk?.providerMetadata?.hyperbolic as { - usage?: { - cost?: number; - costDetails?: { upstreamInferenceCost: number }; - }; - } - )?.usage; - expect(hyperbolicUsage?.costDetails).toStrictEqual({ - upstreamInferenceCost: 0.0036, - }); - expect(hyperbolicUsage?.cost).toBe(0.0025); - }); - - it("should handle error stream parts", async () => { - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.hyperbolic.xyz/v1/completions"]!.response = { - type: "stream-chunks", - chunks: [ - `data: {"error":{"message": "The server had an error processing your request. Sorry about that! You can retry your request, or contact us through our help center`, - "data: [DONE]\n\n", - ], - }; - - const { stream } = await model.doStream({ - prompt: TEST_PROMPT, - }); - - expect(await convertReadableStreamToArray(stream)).toStrictEqual([ - { - type: "error", - error: { - message: - "The server had an error processing your request. Sorry about that! " + - "You can retry your request, or contact us through our help center.", - type: "server_error", - code: null, - param: null, - }, - }, - { - finishReason: { unified: "error", raw: undefined }, - providerMetadata: { - hyperbolic: { - usage: {}, - }, - }, - type: "finish", - usage: { - inputTokens: { - total: undefined, - noCache: undefined, - cacheRead: undefined, - cacheWrite: undefined, - }, - outputTokens: { - total: undefined, - text: undefined, - reasoning: undefined, - }, - }, - }, - ]); - }); - - it("should handle unparsable stream parts", async () => { - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.hyperbolic.xyz/v1/completions"]!.response = { - type: "stream-chunks", - chunks: ["data: {unparsable}\n\n", "data: [DONE]\n\n"], - }; - - const { stream } = await model.doStream({ - prompt: TEST_PROMPT, - }); - - const elements = await convertReadableStreamToArray(stream); - - expect(elements.length).toBe(2); - expect(elements[0]?.type).toBe("error"); - expect(elements[1]).toStrictEqual({ - finishReason: { unified: "error", raw: undefined }, - providerMetadata: { - hyperbolic: { - usage: {}, - }, - }, - type: "finish", - usage: { - inputTokens: { - total: undefined, - noCache: undefined, - cacheRead: undefined, - cacheWrite: undefined, - }, - outputTokens: { - total: undefined, - text: undefined, - reasoning: undefined, - }, - }, - }); - }); - - it("should pass the model and the prompt", async () => { - prepareStreamResponse({ content: [] }); - - await model.doStream({ - prompt: TEST_PROMPT, - }); - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - expect(await server.calls[0]!.requestBodyJson).toStrictEqual({ - stream: true, - stream_options: { include_usage: true }, - model: "openai/gpt-3.5-turbo-instruct", - prompt: "Hello", - }); - }); - - it("should pass headers", async () => { - prepareStreamResponse({ content: [] }); - - const provider = createHyperbolic({ - apiKey: "test-api-key", - headers: { - "Custom-Provider-Header": "provider-header-value", - }, - }); - - await provider.completion("openai/gpt-3.5-turbo-instruct").doStream({ - prompt: TEST_PROMPT, - headers: { - "Custom-Request-Header": "request-header-value", - }, - }); - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - const requestHeaders = server.calls[0]!.requestHeaders; - - expect(requestHeaders).toMatchObject({ - authorization: "Bearer test-api-key", - "content-type": "application/json", - "custom-provider-header": "provider-header-value", - "custom-request-header": "request-header-value", - }); - expect(requestHeaders["user-agent"]).toContain("ai-sdk/hyperbolic/0.0.0-test"); - }); - - it("should pass extra body", async () => { - prepareStreamResponse({ content: [] }); - - const provider = createHyperbolic({ - apiKey: "test-api-key", - extraBody: { - custom_field: "custom_value", - providers: { - anthropic: { - custom_field: "custom_value", - }, - }, - }, - }); - - await provider.completion("openai/gpt-4o").doStream({ - prompt: TEST_PROMPT, - }); - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - const requestBody = await server.calls[0]!.requestBodyJson; - - expect(requestBody).toHaveProperty("custom_field", "custom_value"); - expect(requestBody).toHaveProperty("providers.anthropic.custom_field", "custom_value"); - }); -}); diff --git a/packages/ai-sdk-provider/src/completion/index.ts b/packages/ai-sdk-provider/src/completion/index.ts deleted file mode 100644 index f24223a..0000000 --- a/packages/ai-sdk-provider/src/completion/index.ts +++ /dev/null @@ -1,360 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { - LanguageModelV3, - LanguageModelV3CallOptions, - LanguageModelV3FinishReason, - LanguageModelV3StreamPart, - LanguageModelV3Usage, -} from "@ai-sdk/provider"; -import type { ParseResult } from "@ai-sdk/provider-utils"; -import type { z } from "zod/v4"; -import { - APICallError, - NoContentGeneratedError, - UnsupportedFunctionalityError, -} from "@ai-sdk/provider"; -import { - combineHeaders, - createEventSourceResponseHandler, - createJsonResponseHandler, - generateId, - postJsonToApi, -} from "@ai-sdk/provider-utils"; - -import type { HyperbolicUsageAccounting } from "../types"; -import type { - HyperbolicCompletionModelId, - HyperbolicCompletionSettings, -} from "../types/hyperbolic-completion-settings"; -import { hyperbolicFailedResponseHandler } from "../schemas/error-response"; -import { createFinishReason, mapHyperbolicFinishReason } from "../utils/map-finish-reason"; -import { convertToHyperbolicCompletionPrompt } from "./convert-to-hyperbolic-completion-prompt"; -import { HyperbolicCompletionChunkSchema } from "./schemas"; - -type HyperbolicCompletionConfig = { - provider: string; - compatibility: "strict" | "compatible"; - headers: () => Record; - url: (options: { modelId: string; path: string }) => string; - fetch?: typeof fetch; - extraBody?: Record; -}; - -export class HyperbolicCompletionLanguageModel implements LanguageModelV3 { - readonly specificationVersion = "v3" as const; - readonly provider = "hyperbolic"; - readonly modelId: HyperbolicCompletionModelId; - readonly supportsImageUrls = true; - readonly supportedUrls: Record = { - "image/*": [/^data:image\/[a-zA-Z]+;base64,/, /^https?:\/\/.+\.(jpg|jpeg|png|gif|webp)$/i], - "text/*": [/^data:text\//, /^https?:\/\/.+$/], - "application/*": [/^data:application\//, /^https?:\/\/.+$/], - }; - readonly defaultObjectGenerationMode = undefined; - readonly settings: HyperbolicCompletionSettings; - - private readonly config: HyperbolicCompletionConfig; - - constructor( - modelId: HyperbolicCompletionModelId, - settings: HyperbolicCompletionSettings, - config: HyperbolicCompletionConfig, - ) { - this.modelId = modelId; - this.settings = settings; - this.config = config; - } - - private getArgs({ - prompt, - maxOutputTokens, - temperature, - topP, - frequencyPenalty, - presencePenalty, - seed, - responseFormat, - topK, - stopSequences, - tools, - toolChoice, - }: LanguageModelV3CallOptions) { - const { prompt: completionPrompt } = convertToHyperbolicCompletionPrompt({ - prompt, - inputFormat: "prompt", - }); - - if (tools?.length) { - throw new UnsupportedFunctionalityError({ - functionality: "tools", - }); - } - - if (toolChoice) { - throw new UnsupportedFunctionalityError({ - functionality: "toolChoice", - }); - } - - return { - // model id: - model: this.modelId, - models: this.settings.models, - - // model specific settings: - logit_bias: this.settings.logitBias, - logprobs: - typeof this.settings.logprobs === "number" - ? this.settings.logprobs - : typeof this.settings.logprobs === "boolean" - ? this.settings.logprobs - ? 0 - : undefined - : undefined, - suffix: this.settings.suffix, - user: this.settings.user, - - // standardized settings: - max_tokens: maxOutputTokens, - temperature, - top_p: topP, - frequency_penalty: frequencyPenalty, - presence_penalty: presencePenalty, - seed, - - stop: stopSequences, - response_format: responseFormat, - top_k: topK, - - // prompt: - prompt: completionPrompt, - - // Hyperbolic specific settings: - include_reasoning: this.settings.includeReasoning, - reasoning: this.settings.reasoning, - - // extra body: - ...this.config.extraBody, - ...this.settings.extraBody, - }; - } - - async doGenerate( - options: LanguageModelV3CallOptions, - ): Promise>> { - const providerOptions = options.providerOptions || {}; - const hyperbolicOptions = providerOptions.hyperbolic || {}; - - const args = { - ...this.getArgs(options), - ...hyperbolicOptions, - }; - - const { value: response, responseHeaders } = await postJsonToApi({ - url: this.config.url({ - path: "/completions", - modelId: this.modelId, - }), - headers: combineHeaders(this.config.headers(), options.headers), - body: args, - failedResponseHandler: hyperbolicFailedResponseHandler, - successfulResponseHandler: createJsonResponseHandler(HyperbolicCompletionChunkSchema), - abortSignal: options.abortSignal, - fetch: this.config.fetch, - }); - - if ("error" in response) { - const errorData = response.error as { message: string; code?: string }; - throw new APICallError({ - message: errorData.message, - url: this.config.url({ - path: "/completions", - modelId: this.modelId, - }), - requestBodyValues: args, - statusCode: 200, - responseHeaders, - data: errorData, - }); - } - - const choice = response.choices[0]; - - if (!choice) { - throw new NoContentGeneratedError({ - message: "No choice in Hyperbolic completion response", - }); - } - - return { - content: [ - { - type: "text", - text: choice.text ?? "", - }, - ], - finishReason: mapHyperbolicFinishReason(choice.finish_reason), - usage: { - inputTokens: { - total: response.usage?.prompt_tokens ?? 0, - noCache: undefined, - cacheRead: response.usage?.prompt_tokens_details?.cached_tokens ?? undefined, - cacheWrite: undefined, - }, - outputTokens: { - total: response.usage?.completion_tokens ?? 0, - text: undefined, - reasoning: response.usage?.completion_tokens_details?.reasoning_tokens ?? undefined, - }, - }, - warnings: [], - response: { - headers: responseHeaders, - }, - }; - } - - async doStream( - options: LanguageModelV3CallOptions, - ): Promise>> { - const providerOptions = options.providerOptions || {}; - const hyperbolicOptions = providerOptions.hyperbolic || {}; - - const args = { - ...this.getArgs(options), - ...hyperbolicOptions, - }; - - const { value: response, responseHeaders } = await postJsonToApi({ - url: this.config.url({ - path: "/completions", - modelId: this.modelId, - }), - headers: combineHeaders(this.config.headers(), options.headers), - body: { - ...args, - stream: true, - - // only include stream_options when in strict compatibility mode: - stream_options: - this.config.compatibility === "strict" ? { include_usage: true } : undefined, - }, - failedResponseHandler: hyperbolicFailedResponseHandler, - successfulResponseHandler: createEventSourceResponseHandler(HyperbolicCompletionChunkSchema), - abortSignal: options.abortSignal, - fetch: this.config.fetch, - }); - - let finishReason: LanguageModelV3FinishReason = createFinishReason("other"); - const usage: LanguageModelV3Usage = { - inputTokens: { - total: undefined, - noCache: undefined, - cacheRead: undefined, - cacheWrite: undefined, - }, - outputTokens: { - total: undefined, - text: undefined, - reasoning: undefined, - }, - }; - - const hyperbolicUsage: Partial = {}; - return { - stream: response.pipeThrough( - new TransformStream< - ParseResult>, - LanguageModelV3StreamPart - >({ - transform(chunk, controller) { - // handle failed chunk parsing / validation: - if (!chunk.success) { - finishReason = createFinishReason("error"); - controller.enqueue({ type: "error", error: chunk.error }); - return; - } - - const value = chunk.value; - - // handle error chunks: - if ("error" in value) { - finishReason = createFinishReason("error"); - controller.enqueue({ type: "error", error: value.error }); - return; - } - - if (value.usage != null) { - usage.inputTokens.total = value.usage.prompt_tokens; - usage.outputTokens.total = value.usage.completion_tokens; - - // Collect Hyperbolic specific usage information - hyperbolicUsage.promptTokens = value.usage.prompt_tokens; - - if (value.usage.prompt_tokens_details) { - const cachedInputTokens = value.usage.prompt_tokens_details.cached_tokens ?? 0; - - usage.inputTokens.cacheRead = cachedInputTokens; - hyperbolicUsage.promptTokensDetails = { - cachedTokens: cachedInputTokens, - }; - } - - hyperbolicUsage.completionTokens = value.usage.completion_tokens; - if (value.usage.completion_tokens_details) { - const reasoningTokens = value.usage.completion_tokens_details.reasoning_tokens ?? 0; - - usage.outputTokens.reasoning = reasoningTokens; - hyperbolicUsage.completionTokensDetails = { - reasoningTokens, - }; - } - - hyperbolicUsage.cost = value.usage.cost; - hyperbolicUsage.totalTokens = value.usage.total_tokens; - const upstreamInferenceCost = value.usage.cost_details?.upstream_inference_cost; - if (upstreamInferenceCost != null) { - hyperbolicUsage.costDetails = { - upstreamInferenceCost, - }; - } - } - - const choice = value.choices[0]; - - if (choice?.finish_reason != null) { - finishReason = mapHyperbolicFinishReason(choice.finish_reason); - } - - if (choice?.text != null) { - controller.enqueue({ - type: "text-delta", - delta: choice.text, - id: generateId(), - }); - } - }, - - flush(controller) { - controller.enqueue({ - type: "finish", - finishReason, - usage, - providerMetadata: { - hyperbolic: { - usage: hyperbolicUsage, - }, - }, - }); - }, - }), - ), - response: { - headers: responseHeaders, - }, - }; - } -} diff --git a/packages/ai-sdk-provider/src/completion/schemas.ts b/packages/ai-sdk-provider/src/completion/schemas.ts deleted file mode 100644 index 01d8e66..0000000 --- a/packages/ai-sdk-provider/src/completion/schemas.ts +++ /dev/null @@ -1,68 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import { z } from "zod/v4"; - -import { HyperbolicErrorResponseSchema } from "../schemas/error-response"; -import { ReasoningDetailArraySchema } from "../schemas/reasoning-details"; - -// limited version of the schema, focussed on what is needed for the implementation -// this approach limits breakages when the API changes and increases efficiency -export const HyperbolicCompletionChunkSchema = z.union([ - z - .object({ - id: z.string().optional(), - model: z.string().optional(), - choices: z.array( - z - .object({ - text: z.string(), - reasoning: z.string().nullish().optional(), - reasoning_details: ReasoningDetailArraySchema.nullish(), - - finish_reason: z.string().nullish(), - index: z.number().nullish(), - logprobs: z - .object({ - tokens: z.array(z.string()), - token_logprobs: z.array(z.number()), - top_logprobs: z.array(z.record(z.string(), z.number())).nullable(), - }) - .passthrough() - .nullable() - .optional(), - }) - .passthrough(), - ), - usage: z - .object({ - prompt_tokens: z.number(), - prompt_tokens_details: z - .object({ - cached_tokens: z.number(), - }) - .passthrough() - .nullish(), - completion_tokens: z.number(), - completion_tokens_details: z - .object({ - reasoning_tokens: z.number(), - }) - .passthrough() - .nullish(), - total_tokens: z.number(), - cost: z.number().optional(), - cost_details: z - .object({ - upstream_inference_cost: z.number().nullish(), - }) - .passthrough() - .nullish(), - }) - .passthrough() - .nullish(), - }) - .passthrough(), - HyperbolicErrorResponseSchema, -]); diff --git a/packages/ai-sdk-provider/src/facade.ts b/packages/ai-sdk-provider/src/facade.ts deleted file mode 100644 index bfa4f4b..0000000 --- a/packages/ai-sdk-provider/src/facade.ts +++ /dev/null @@ -1,91 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import { loadApiKey, withoutTrailingSlash } from "@ai-sdk/provider-utils"; - -import type { HyperbolicProviderSettings } from "./provider"; -import type { - HyperbolicChatModelId, - HyperbolicChatSettings, -} from "./types/hyperbolic-chat-settings"; -import type { - HyperbolicCompletionModelId, - HyperbolicCompletionSettings, -} from "./types/hyperbolic-completion-settings"; -import { HyperbolicChatLanguageModel } from "./chat"; -import { HyperbolicCompletionLanguageModel } from "./completion"; - -/** -@deprecated Use `createHyperbolic` instead. - */ -export class Hyperbolic { - /** -Use a different URL prefix for API calls, e.g. to use proxy servers. -The default prefix is `https://api.hyperbolic.xyz/v1`. - */ - readonly baseURL: string; - - /** -API key that is being sent using the `Authorization` header. -It defaults to the `HYPERBOLIC_API_KEY` environment variable. - */ - readonly apiKey?: string; - - /** -Custom headers to include in the requests. - */ - readonly headers?: Record; - - /** - * Record of provider slugs to API keys for injecting into provider routing. - */ - readonly api_keys?: Record; - - /** - * Creates a new Hyperbolic provider instance. - */ - constructor(options: HyperbolicProviderSettings = {}) { - this.baseURL = - withoutTrailingSlash(options.baseURL ?? options.baseUrl) ?? "https://api.hyperbolic.xyz/v1"; - this.apiKey = options.apiKey; - this.headers = options.headers; - this.api_keys = options.api_keys; - } - - private get baseConfig() { - return { - baseURL: this.baseURL, - headers: () => ({ - Authorization: `Bearer ${loadApiKey({ - apiKey: this.apiKey, - environmentVariableName: "HYPERBOLIC_API_KEY", - description: "Hyperbolic", - })}`, - ...this.headers, - ...(this.api_keys && - Object.keys(this.api_keys).length > 0 && { - "X-Provider-API-Keys": JSON.stringify(this.api_keys), - }), - }), - }; - } - - chat(modelId: HyperbolicChatModelId, settings: HyperbolicChatSettings = {}) { - return new HyperbolicChatLanguageModel(modelId, settings, { - provider: "hyperbolic.chat", - ...this.baseConfig, - compatibility: "strict", - url: ({ path }) => `${this.baseURL}${path}`, - }); - } - - completion(modelId: HyperbolicCompletionModelId, settings: HyperbolicCompletionSettings = {}) { - return new HyperbolicCompletionLanguageModel(modelId, settings, { - provider: "hyperbolic.completion", - ...this.baseConfig, - compatibility: "strict", - url: ({ path }) => `${this.baseURL}${path}`, - }); - } -} diff --git a/packages/ai-sdk-provider/src/image/index.ts b/packages/ai-sdk-provider/src/image/index.ts index a04cdc8..1a4fb43 100644 --- a/packages/ai-sdk-provider/src/image/index.ts +++ b/packages/ai-sdk-provider/src/image/index.ts @@ -12,7 +12,6 @@ import { hyperbolicFailedResponseHandler } from "../schemas/error-response"; type HyperbolicImageModelConfig = { provider: string; - compatibility: "strict" | "compatible"; headers: () => Record; url: (options: { modelId: string; path: string }) => string; fetch?: typeof fetch; diff --git a/packages/ai-sdk-provider/src/index.ts b/packages/ai-sdk-provider/src/index.ts index 505c903..8047213 100644 --- a/packages/ai-sdk-provider/src/index.ts +++ b/packages/ai-sdk-provider/src/index.ts @@ -1,4 +1,2 @@ -export * from "./facade"; export * from "./provider"; -export * from "./types"; export * from "./schemas/error-response"; diff --git a/packages/ai-sdk-provider/src/internal/index.ts b/packages/ai-sdk-provider/src/internal/index.ts index fe316cd..533c4b2 100644 --- a/packages/ai-sdk-provider/src/internal/index.ts +++ b/packages/ai-sdk-provider/src/internal/index.ts @@ -1,6 +1 @@ -export * from "../chat"; -export * from "../completion"; export * from "../image"; -export * from "../types"; -export * from "../types/hyperbolic-chat-settings"; -export * from "../types/hyperbolic-completion-settings"; diff --git a/packages/ai-sdk-provider/src/provider.ts b/packages/ai-sdk-provider/src/provider.ts index 0cd762b..7251939 100644 --- a/packages/ai-sdk-provider/src/provider.ts +++ b/packages/ai-sdk-provider/src/provider.ts @@ -1,7 +1,3 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - import type { ProviderV3 } from "@ai-sdk/provider"; import { loadApiKey, withoutTrailingSlash } from "@ai-sdk/provider-utils"; @@ -9,53 +5,12 @@ import type { HyperbolicImageModelId, HyperbolicImageSettings, } from "./image/hyperbolic-image-settings"; -import type { - HyperbolicChatModelId, - HyperbolicChatSettings, -} from "./types/hyperbolic-chat-settings"; -import type { - HyperbolicCompletionModelId, - HyperbolicCompletionSettings, -} from "./types/hyperbolic-completion-settings"; -import { HyperbolicChatLanguageModel } from "./chat"; -import { HyperbolicCompletionLanguageModel } from "./completion"; import { HyperbolicImageModel } from "./image"; import { withUserAgentSuffix } from "./utils/with-user-agent-suffix"; import { VERSION } from "./version"; -export type { HyperbolicChatSettings, HyperbolicCompletionSettings }; - export interface HyperbolicProvider extends ProviderV3 { - ( - modelId: HyperbolicChatModelId, - settings?: HyperbolicCompletionSettings, - ): HyperbolicCompletionLanguageModel; - (modelId: HyperbolicChatModelId, settings?: HyperbolicChatSettings): HyperbolicChatLanguageModel; - - languageModel( - modelId: HyperbolicChatModelId, - settings?: HyperbolicCompletionSettings, - ): HyperbolicCompletionLanguageModel; - languageModel( - modelId: HyperbolicChatModelId, - settings?: HyperbolicChatSettings, - ): HyperbolicChatLanguageModel; - - /** -Creates an Hyperbolic chat model for text generation. - */ - chat( - modelId: HyperbolicChatModelId, - settings?: HyperbolicChatSettings, - ): HyperbolicChatLanguageModel; - - /** -Creates an Hyperbolic completion model for text generation. - */ - completion( - modelId: HyperbolicCompletionModelId, - settings?: HyperbolicCompletionSettings, - ): HyperbolicCompletionLanguageModel; + (modelId: HyperbolicImageModelId, settings?: HyperbolicImageSettings): HyperbolicImageModel; image(modelId: HyperbolicImageModelId, settings?: HyperbolicImageSettings): HyperbolicImageModel; } @@ -81,13 +36,6 @@ Custom headers to include in the requests. */ headers?: Record; - /** -Hyperbolic compatibility mode. Should be set to `strict` when using the Hyperbolic API, -and `compatible` when using 3rd party providers. In `compatible` mode, newer -information such as streamOptions are not being sent. Defaults to 'compatible'. - */ - compatibility?: "strict" | "compatible"; - /** Custom fetch implementation. You can use it as a middleware to intercept requests, or to provide a custom fetch implementation for e.g. testing. @@ -107,15 +55,14 @@ A JSON object to send as the request body to access Hyperbolic features & upstre } /** -Create an Hyperbolic provider instance. + * Create an Hyperbolic provider instance. + * + * For chat and completion models, use the @openrouter/ai-sdk-provider instead. */ export function createHyperbolic(options: HyperbolicProviderSettings = {}): HyperbolicProvider { const baseURL = withoutTrailingSlash(options.baseURL ?? options.baseUrl) ?? "https://api.hyperbolic.xyz/v1"; - // we default to compatible, because strict breaks providers like Groq: - const compatibility = options.compatibility ?? "compatible"; - const getHeaders = () => withUserAgentSuffix( { @@ -133,29 +80,6 @@ export function createHyperbolic(options: HyperbolicProviderSettings = {}): Hype `ai-sdk/hyperbolic/${VERSION}`, ); - const createChatModel = (modelId: HyperbolicChatModelId, settings: HyperbolicChatSettings = {}) => - new HyperbolicChatLanguageModel(modelId, settings, { - provider: "hyperbolic.chat", - url: ({ path }) => `${baseURL}${path}`, - headers: getHeaders, - compatibility, - fetch: options.fetch, - extraBody: options.extraBody, - }); - - const createCompletionModel = ( - modelId: HyperbolicCompletionModelId, - settings: HyperbolicCompletionSettings = {}, - ) => - new HyperbolicCompletionLanguageModel(modelId, settings, { - provider: "hyperbolic.completion", - url: ({ path }) => `${baseURL}${path}`, - headers: getHeaders, - compatibility, - fetch: options.fetch, - extraBody: options.extraBody, - }); - const createImageModel = ( modelId: HyperbolicImageModelId, settings: HyperbolicImageSettings = {}, @@ -164,38 +88,19 @@ export function createHyperbolic(options: HyperbolicProviderSettings = {}): Hype provider: "hyperbolic.image", url: ({ path }) => `${baseURL}${path}`, headers: getHeaders, - compatibility, fetch: options.fetch, extraBody: options.extraBody, }); - const createLanguageModel = ( - modelId: HyperbolicChatModelId | HyperbolicCompletionModelId, - settings?: HyperbolicChatSettings | HyperbolicCompletionSettings, - ) => { - if (new.target) { - throw new Error("The Hyperbolic model function cannot be called with the new keyword."); - } - - return createChatModel(modelId, settings as HyperbolicChatSettings); - }; - - const provider = ( - modelId: HyperbolicChatModelId | HyperbolicCompletionModelId, - settings?: HyperbolicChatSettings | HyperbolicCompletionSettings, - ) => createLanguageModel(modelId, settings); - - provider.languageModel = createLanguageModel; - provider.chat = createChatModel; - provider.completion = createCompletionModel; + const provider = (modelId: HyperbolicImageModelId, settings?: HyperbolicImageSettings) => + createImageModel(modelId, settings); + provider.image = createImageModel; return provider as HyperbolicProvider; } /** -Default Hyperbolic provider instance. It uses 'strict' compatibility mode. + * Default Hyperbolic provider instance. Requires a valid API key set to the `HYPERBOLIC_API_KEY` environment variable. */ -export const hyperbolic = createHyperbolic({ - compatibility: "strict", // strict for Hyperbolic API -}); +export const hyperbolic = createHyperbolic(); diff --git a/packages/ai-sdk-provider/src/schemas/error-response.test.ts b/packages/ai-sdk-provider/src/schemas/error-response.test.ts index fd8e27b..3b2d1e7 100644 --- a/packages/ai-sdk-provider/src/schemas/error-response.test.ts +++ b/packages/ai-sdk-provider/src/schemas/error-response.test.ts @@ -1,7 +1,3 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - import { HyperbolicErrorResponseSchema } from "./error-response"; describe("HyperbolicErrorResponseSchema", () => { diff --git a/packages/ai-sdk-provider/src/schemas/error-response.ts b/packages/ai-sdk-provider/src/schemas/error-response.ts index d4f5cb2..3b17c6a 100644 --- a/packages/ai-sdk-provider/src/schemas/error-response.ts +++ b/packages/ai-sdk-provider/src/schemas/error-response.ts @@ -1,8 +1,3 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { ChatErrorError } from "@openrouter/sdk/models"; import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils"; import { z } from "zod/v4"; @@ -17,11 +12,9 @@ export const HyperbolicErrorResponseSchema = z type: z.string().nullable().optional().default(null), param: z.any().nullable().optional().default(null), }) - .passthrough() satisfies z.ZodType< - Omit & { code: string | number | null } - >, + .loose(), }) - .passthrough(); + .loose(); export type HyperbolicErrorData = z.infer; diff --git a/packages/ai-sdk-provider/src/schemas/format.ts b/packages/ai-sdk-provider/src/schemas/format.ts deleted file mode 100644 index 89856e3..0000000 --- a/packages/ai-sdk-provider/src/schemas/format.ts +++ /dev/null @@ -1,15 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -export enum ReasoningFormat { - Unknown = "unknown", - OpenAIResponsesV1 = "openai-responses-v1", - XAIResponsesV1 = "xai-responses-v1", - AnthropicClaudeV1 = "anthropic-claude-v1", - GoogleGeminiV1 = "google-gemini-v1", -} - -// Anthropic Claude was the first reasoning that we're -// passing back and forth -export const DEFAULT_REASONING_FORMAT = ReasoningFormat.AnthropicClaudeV1; diff --git a/packages/ai-sdk-provider/src/schemas/image.ts b/packages/ai-sdk-provider/src/schemas/image.ts index 8f57ab0..49049ae 100644 --- a/packages/ai-sdk-provider/src/schemas/image.ts +++ b/packages/ai-sdk-provider/src/schemas/image.ts @@ -1,7 +1,3 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - import { z } from "zod/v4"; const ImageResponseSchema = z diff --git a/packages/ai-sdk-provider/src/schemas/provider-metadata.ts b/packages/ai-sdk-provider/src/schemas/provider-metadata.ts deleted file mode 100644 index 84bb789..0000000 --- a/packages/ai-sdk-provider/src/schemas/provider-metadata.ts +++ /dev/null @@ -1,87 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import { z } from "zod/v4"; - -import { ReasoningDetailUnionSchema } from "./reasoning-details"; - -/** - * Schema for file annotations from FileParserPlugin - */ -export const FileAnnotationSchema = z - .object({ - type: z.literal("file"), - file: z - .object({ - hash: z.string(), - name: z.string(), - content: z - .array( - z - .object({ - type: z.string(), - text: z.string().optional(), - }) - .catchall(z.any()), - ) - .optional(), - }) - .catchall(z.any()), - }) - .catchall(z.any()); - -export type FileAnnotation = z.infer; - -/** - * Schema for Hyperbolic provider metadata attached to responses - */ -export const HyperbolicProviderMetadataSchema = z - .object({ - provider: z.string(), - reasoning_details: z.array(ReasoningDetailUnionSchema).optional(), - annotations: z.array(FileAnnotationSchema).optional(), - usage: z - .object({ - promptTokens: z.number(), - promptTokensDetails: z - .object({ - cachedTokens: z.number(), - }) - .catchall(z.any()) - .optional(), - completionTokens: z.number(), - completionTokensDetails: z - .object({ - reasoningTokens: z.number(), - }) - .catchall(z.any()) - .optional(), - totalTokens: z.number(), - cost: z.number().optional(), - costDetails: z - .object({ - upstreamInferenceCost: z.number(), - }) - .catchall(z.any()) - .optional(), - }) - .catchall(z.any()), - }) - .catchall(z.any()); - -export type HyperbolicProviderMetadata = z.infer; - -/** - * Schema for parsing provider options that may contain reasoning_details and annotations - */ -export const HyperbolicProviderOptionsSchema = z - .object({ - hyperbolic: z - .object({ - reasoning_details: z.array(ReasoningDetailUnionSchema).optional(), - annotations: z.array(FileAnnotationSchema).optional(), - }) - .optional(), - }) - .optional(); diff --git a/packages/ai-sdk-provider/src/schemas/reasoning-details.ts b/packages/ai-sdk-provider/src/schemas/reasoning-details.ts deleted file mode 100644 index 79f7a5e..0000000 --- a/packages/ai-sdk-provider/src/schemas/reasoning-details.ts +++ /dev/null @@ -1,89 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import { z } from "zod/v4"; - -import { isDefinedOrNotNull } from "../utils/type-guards"; -import { ReasoningFormat } from "./format"; - -export enum ReasoningDetailType { - Summary = "reasoning.summary", - Encrypted = "reasoning.encrypted", - Text = "reasoning.text", -} - -export const CommonReasoningDetailSchema = z - .object({ - id: z.string().nullish(), - format: z.enum(ReasoningFormat).nullish(), - index: z.number().optional(), - }) - .loose(); - -export const ReasoningDetailSummarySchema = z - .object({ - type: z.literal(ReasoningDetailType.Summary), - summary: z.string(), - }) - .extend(CommonReasoningDetailSchema.shape); -export type ReasoningDetailSummary = z.infer; - -export const ReasoningDetailEncryptedSchema = z - .object({ - type: z.literal(ReasoningDetailType.Encrypted), - data: z.string(), - }) - .extend(CommonReasoningDetailSchema.shape); - -export type ReasoningDetailEncrypted = z.infer; - -export const ReasoningDetailTextSchema = z - .object({ - type: z.literal(ReasoningDetailType.Text), - text: z.string().nullish(), - signature: z.string().nullish(), - }) - .extend(CommonReasoningDetailSchema.shape); - -export type ReasoningDetailText = z.infer; - -export const ReasoningDetailUnionSchema = z.union([ - ReasoningDetailSummarySchema, - ReasoningDetailEncryptedSchema, - ReasoningDetailTextSchema, -]); - -export type ReasoningDetailUnion = z.infer; - -const ReasoningDetailsWithUnknownSchema = z.union([ - ReasoningDetailUnionSchema, - z.unknown().transform(() => null), -]); - -export const ReasoningDetailArraySchema = z - .array(ReasoningDetailsWithUnknownSchema) - .transform((d) => d.filter((d): d is ReasoningDetailUnion => !!d)); - -export const OutputUnionToReasoningDetailsSchema = z.union([ - z - .object({ - delta: z.object({ - reasoning_details: z.array(ReasoningDetailsWithUnknownSchema), - }), - }) - .transform((data) => data.delta.reasoning_details.filter(isDefinedOrNotNull)), - z - .object({ - message: z.object({ - reasoning_details: z.array(ReasoningDetailsWithUnknownSchema), - }), - }) - .transform((data) => data.message.reasoning_details.filter(isDefinedOrNotNull)), - z - .object({ - text: z.string(), - reasoning_details: z.array(ReasoningDetailsWithUnknownSchema), - }) - .transform((data) => data.reasoning_details.filter(isDefinedOrNotNull)), -]); diff --git a/packages/ai-sdk-provider/src/scripts/update-models-list.ts b/packages/ai-sdk-provider/src/scripts/update-models-list.ts index af5998e..5efab77 100644 --- a/packages/ai-sdk-provider/src/scripts/update-models-list.ts +++ b/packages/ai-sdk-provider/src/scripts/update-models-list.ts @@ -1,7 +1,3 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - import "@hyperbolic/api"; import { readFileSync, writeFileSync } from "fs"; diff --git a/packages/ai-sdk-provider/src/test-utils/test-server.ts b/packages/ai-sdk-provider/src/test-utils/test-server.ts deleted file mode 100644 index 329693a..0000000 --- a/packages/ai-sdk-provider/src/test-utils/test-server.ts +++ /dev/null @@ -1,150 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -/** - * Simple test server utility to replace the removed @ai-sdk/provider-utils/test createTestServer - * This provides HTTP request interception for testing purposes. - */ - -import type { JsonBodyType } from "msw"; -import type { SetupServerApi } from "msw/node"; -import { http, HttpResponse } from "msw"; -import { setupServer } from "msw/node"; -import { afterAll, afterEach, beforeAll } from "vitest"; - -// Re-export utilities that were previously in @ai-sdk/provider-utils/test -export { convertReadableStreamToArray } from "@ai-sdk/provider-utils/test"; - -type ResponseConfig = { - type: "json-value" | "stream-chunks" | "error"; - body?: JsonBodyType; - chunks?: string[]; - status?: number; - headers?: Record; -}; - -type CallRecord = { - requestBody: string; - requestBodyJson: Promise; - requestHeaders: Record; -}; - -type UrlConfig = { - response?: ResponseConfig; -}; - -type UrlConfigWithCalls = UrlConfig & { - calls: CallRecord[]; -}; - -type TestServerConfig = Record; - -export function createTestServer(config: TestServerConfig): { - urls: Record; - server: SetupServerApi; - calls: CallRecord[]; -} { - const urls: Record = {}; - const calls: CallRecord[] = []; - - // Initialize URL configs with call tracking - for (const [url, urlConfig] of Object.entries(config)) { - urls[url] = { ...urlConfig, calls: [] }; - } - - const handlers = Object.keys(config).map((url) => - http.post(url, async ({ request }) => { - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - const urlConfig = urls[url]!; - - // Record the call - const bodyText = await request.clone().text(); - - const headers: Record = {}; - request.headers.forEach((value, key) => { - headers[key] = value; - }); - - const callRecord: CallRecord = { - requestBody: bodyText, - requestBodyJson: Promise.resolve().then(() => { - try { - return JSON.parse(bodyText); - } catch { - return bodyText; - } - }), - requestHeaders: headers, - }; - - urlConfig.calls.push(callRecord); - calls.push(callRecord); - - const response = urlConfig.response; - - if (!response) { - return HttpResponse.json({ error: "No response configured" }, { status: 500 }); - } - - const status = response.status ?? 200; - const responseHeaders = response.headers ?? {}; - - switch (response.type) { - case "json-value": - return HttpResponse.json(response.body ?? null, { - status, - headers: responseHeaders, - }); - - case "stream-chunks": { - const encoder = new TextEncoder(); - const chunks = response.chunks ?? []; - const stream = new ReadableStream({ - async start(controller) { - for (const chunk of chunks) { - controller.enqueue(encoder.encode(chunk)); - } - controller.close(); - }, - }); - return new HttpResponse(stream, { - status, - headers: { - "Content-Type": "text/event-stream", - ...responseHeaders, - }, - }); - } - - case "error": - return HttpResponse.json(response.body ?? { error: "Test error" }, { - status: response.status ?? 500, - headers: responseHeaders, - }); - - default: - return HttpResponse.json(response.body ?? null, { - status, - headers: responseHeaders, - }); - } - }), - ); - - const server = setupServer(...handlers); - - beforeAll(() => server.listen({ onUnhandledRequest: "bypass" })); - afterEach(() => { - server.resetHandlers(); - // Clear calls between tests - calls.length = 0; - for (const url of Object.keys(urls)) { - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - urls[url]!.calls = []; - } - }); - afterAll(() => server.close()); - - return { urls, server, calls }; -} diff --git a/packages/ai-sdk-provider/src/tests/provider-options.test.ts b/packages/ai-sdk-provider/src/tests/provider-options.test.ts deleted file mode 100644 index 9ca9ce2..0000000 --- a/packages/ai-sdk-provider/src/tests/provider-options.test.ts +++ /dev/null @@ -1,63 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { ModelMessage } from "ai"; -import { streamText } from "ai"; -import { describe, expect, it, vi } from "vitest"; - -import { createHyperbolic } from "../provider"; -import { createTestServer } from "../test-utils/test-server"; - -// Add type assertions for the mocked classes -const TEST_MESSAGES: ModelMessage[] = [ - { role: "user", content: [{ type: "text", text: "Hello" }] }, -]; - -describe("providerOptions", () => { - const server = createTestServer({ - "https://api.hyperbolic.xyz/v1/chat/completions": { - response: { - type: "stream-chunks", - chunks: [], - }, - }, - }); - - beforeEach(() => { - vi.clearAllMocks(); - }); - - it("should set providerOptions hyperbolic to extra body", async () => { - const hyperbolic = createHyperbolic({ - apiKey: "test", - }); - const model = hyperbolic("anthropic/claude-3.7-sonnet"); - - await streamText({ - model: model, - messages: TEST_MESSAGES, - providerOptions: { - hyperbolic: { - reasoning: { - max_tokens: 1000, - }, - }, - }, - }).consumeStream(); - - expect(await server.calls[0]?.requestBodyJson).toStrictEqual({ - messages: [ - { - content: "Hello", - role: "user", - }, - ], - reasoning: { - max_tokens: 1000, - }, - model: "anthropic/claude-3.7-sonnet", - stream: true, - }); - }); -}); diff --git a/packages/ai-sdk-provider/src/tests/stream-usage-accounting.test.ts b/packages/ai-sdk-provider/src/tests/stream-usage-accounting.test.ts deleted file mode 100644 index c3b9ffc..0000000 --- a/packages/ai-sdk-provider/src/tests/stream-usage-accounting.test.ts +++ /dev/null @@ -1,177 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import { describe, expect, it } from "vitest"; - -import type { HyperbolicChatSettings } from "../types/hyperbolic-chat-settings"; -import { HyperbolicChatLanguageModel } from "../chat"; -import { convertReadableStreamToArray, createTestServer } from "../test-utils/test-server"; - -describe("Hyperbolic Streaming Usage Accounting", () => { - const server = createTestServer({ - "https://api.hyperbolic.xyz/v1/chat/completions": { - response: { type: "stream-chunks", chunks: [] }, - }, - }); - - function prepareStreamResponse(includeUsage = true) { - const chunks = [ - `data: {"id":"test-id","model":"test-model","choices":[{"delta":{"content":"Hello"},"index":0}]}\n\n`, - `data: {"choices":[{"finish_reason":"stop","index":0}]}\n\n`, - ]; - - if (includeUsage) { - chunks.push( - `data: ${JSON.stringify({ - usage: { - prompt_tokens: 10, - prompt_tokens_details: { cached_tokens: 5 }, - completion_tokens: 20, - completion_tokens_details: { reasoning_tokens: 8 }, - total_tokens: 30, - cost: 0.0015, - cost_details: { upstream_inference_cost: 0.0019 }, - }, - choices: [], - })}\n\n`, - ); - } - - chunks.push("data: [DONE]\n\n"); - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { - type: "stream-chunks", - chunks, - }; - } - - it("should include stream_options.include_usage in request when enabled", async () => { - prepareStreamResponse(); - - // Create model with usage accounting enabled - const settings: HyperbolicChatSettings = { - usage: { include: true }, - }; - - const model = new HyperbolicChatLanguageModel("test-model", settings, { - provider: "hyperbolic.chat", - url: () => "https://api.hyperbolic.xyz/v1/chat/completions", - headers: () => ({}), - compatibility: "strict", - fetch: global.fetch, - }); - - // Call the model with streaming - await model.doStream({ - prompt: [ - { - role: "user", - content: [{ type: "text", text: "Hello" }], - }, - ], - maxOutputTokens: 100, - }); - - // Verify stream options - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - const requestBody = (await server.calls[0]!.requestBodyJson) as Record; - expect(requestBody).toBeDefined(); - expect(requestBody.stream).toBe(true); - expect(requestBody.stream_options).toEqual({ - include_usage: true, - }); - }); - - it("should include provider-specific metadata in finish event when usage accounting is enabled", async () => { - prepareStreamResponse(true); - - // Create model with usage accounting enabled - const settings: HyperbolicChatSettings = { - usage: { include: true }, - }; - - const model = new HyperbolicChatLanguageModel("test-model", settings, { - provider: "hyperbolic.chat", - url: () => "https://api.hyperbolic.xyz/v1/chat/completions", - headers: () => ({}), - compatibility: "strict", - fetch: global.fetch, - }); - - // Call the model with streaming - const result = await model.doStream({ - prompt: [ - { - role: "user", - content: [{ type: "text", text: "Hello" }], - }, - ], - maxOutputTokens: 100, - }); - - // Read all chunks from the stream - const chunks = await convertReadableStreamToArray(result.stream); - - // Find the finish chunk - const finishChunk = chunks.find((chunk) => chunk.type === "finish"); - expect(finishChunk).toBeDefined(); - - // Verify metadata is included - expect(finishChunk?.providerMetadata).toBeDefined(); - const hyperbolicData = finishChunk?.providerMetadata?.hyperbolic; - expect(hyperbolicData).toBeDefined(); - - const usage = hyperbolicData?.usage; - expect(usage).toMatchObject({ - promptTokens: 10, - completionTokens: 20, - totalTokens: 30, - cost: 0.0015, - costDetails: { upstreamInferenceCost: 0.0019 }, - promptTokensDetails: { cachedTokens: 5 }, - completionTokensDetails: { reasoningTokens: 8 }, - }); - }); - - it("should not include provider-specific metadata when usage accounting is disabled", async () => { - prepareStreamResponse(false); - - // Create model with usage accounting disabled - const settings: HyperbolicChatSettings = { - // No usage property - }; - - const model = new HyperbolicChatLanguageModel("test-model", settings, { - provider: "hyperbolic.chat", - url: () => "https://api.hyperbolic.xyz/v1/chat/completions", - headers: () => ({}), - compatibility: "strict", - fetch: global.fetch, - }); - - // Call the model with streaming - const result = await model.doStream({ - prompt: [ - { - role: "user", - content: [{ type: "text", text: "Hello" }], - }, - ], - maxOutputTokens: 100, - }); - - // Read all chunks from the stream - const chunks = await convertReadableStreamToArray(result.stream); - - // Find the finish chunk - const finishChunk = chunks.find((chunk) => chunk.type === "finish"); - expect(finishChunk).toBeDefined(); - - // Verify that provider metadata is not included - expect(finishChunk?.providerMetadata?.hyperbolic).toStrictEqual({ - usage: {}, - }); - }); -}); diff --git a/packages/ai-sdk-provider/src/tests/usage-accounting.test.ts b/packages/ai-sdk-provider/src/tests/usage-accounting.test.ts deleted file mode 100644 index 793e548..0000000 --- a/packages/ai-sdk-provider/src/tests/usage-accounting.test.ts +++ /dev/null @@ -1,327 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import { describe, expect, it } from "vitest"; - -import type { HyperbolicChatSettings } from "../types/hyperbolic-chat-settings"; -import { HyperbolicChatLanguageModel } from "../chat"; -import { createTestServer } from "../test-utils/test-server"; - -describe("Hyperbolic Usage Accounting", () => { - const server = createTestServer({ - "https://api.hyperbolic.xyz/v1/chat/completions": { - response: { type: "json-value", body: {} }, - }, - }); - - function prepareJsonResponse(includeUsage = true) { - const response = { - id: "test-id", - model: "test-model", - choices: [ - { - message: { - role: "assistant", - content: "Hello, I am an AI assistant.", - }, - index: 0, - finish_reason: "stop", - }, - ], - usage: includeUsage - ? { - prompt_tokens: 10, - prompt_tokens_details: { - cached_tokens: 5, - }, - completion_tokens: 20, - completion_tokens_details: { - reasoning_tokens: 8, - }, - total_tokens: 30, - cost: 0.0015, - cost_details: { - upstream_inference_cost: 0.0019, - }, - } - : undefined, - }; - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { - type: "json-value", - body: response, - }; - } - - it("should include usage parameter in the request when enabled", async () => { - prepareJsonResponse(); - - // Create model with usage accounting enabled - const settings: HyperbolicChatSettings = { - usage: { include: true }, - }; - - const model = new HyperbolicChatLanguageModel("test-model", settings, { - provider: "hyperbolic.chat", - url: () => "https://api.hyperbolic.xyz/v1/chat/completions", - headers: () => ({}), - compatibility: "strict", - fetch: global.fetch, - }); - - // Call the model - await model.doGenerate({ - prompt: [ - { - role: "user", - content: [{ type: "text", text: "Hello" }], - }, - ], - maxOutputTokens: 100, - }); - - // Check request contains usage parameter - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - const requestBody = (await server.calls[0]!.requestBodyJson) as Record; - expect(requestBody).toBeDefined(); - expect(requestBody).toHaveProperty("usage"); - expect(requestBody.usage).toEqual({ include: true }); - }); - - it("should include provider-specific metadata in response when usage accounting is enabled", async () => { - prepareJsonResponse(); - - // Create model with usage accounting enabled - const settings: HyperbolicChatSettings = { - usage: { include: true }, - }; - - const model = new HyperbolicChatLanguageModel("test-model", settings, { - provider: "hyperbolic.chat", - url: () => "https://api.hyperbolic.xyz/v1/chat/completions", - headers: () => ({}), - compatibility: "strict", - fetch: global.fetch, - }); - - // Call the model - const result = await model.doGenerate({ - prompt: [ - { - role: "user", - content: [{ type: "text", text: "Hello" }], - }, - ], - maxOutputTokens: 100, - }); - - // Check result contains provider metadata - expect(result.providerMetadata).toBeDefined(); - const providerData = result.providerMetadata; - - // Check for Hyperbolic usage data - expect(providerData?.hyperbolic).toBeDefined(); - const hyperbolicData = providerData?.hyperbolic as Record; - expect(hyperbolicData.usage).toBeDefined(); - - const usage = hyperbolicData.usage; - expect(usage).toMatchObject({ - promptTokens: 10, - completionTokens: 20, - totalTokens: 30, - cost: 0.0015, - costDetails: { - upstreamInferenceCost: 0.0019, - }, - promptTokensDetails: { - cachedTokens: 5, - }, - completionTokensDetails: { - reasoningTokens: 8, - }, - }); - }); - - it("should not include provider-specific metadata when usage accounting is disabled", async () => { - prepareJsonResponse(); - - // Create model with usage accounting disabled - const settings: HyperbolicChatSettings = { - // No usage property - }; - - const model = new HyperbolicChatLanguageModel("test-model", settings, { - provider: "hyperbolic.chat", - url: () => "https://api.hyperbolic.xyz/v1/chat/completions", - headers: () => ({}), - compatibility: "strict", - fetch: global.fetch, - }); - - // Call the model - const result = await model.doGenerate({ - prompt: [ - { - role: "user", - content: [{ type: "text", text: "Hello" }], - }, - ], - maxOutputTokens: 100, - }); - - // Verify that Hyperbolic metadata is not included - expect(result.providerMetadata?.hyperbolic?.usage).toStrictEqual({ - promptTokens: 10, - completionTokens: 20, - totalTokens: 30, - cost: 0.0015, - costDetails: { - upstreamInferenceCost: 0.0019, - }, - promptTokensDetails: { - cachedTokens: 5, - }, - completionTokensDetails: { - reasoningTokens: 8, - }, - }); - }); - - it("should exclude token details from providerMetadata when not present in response", async () => { - // Prepare a response without token details - const response = { - id: "test-id", - model: "test-model", - choices: [ - { - message: { - role: "assistant", - content: "Hello, I am an AI assistant.", - }, - index: 0, - finish_reason: "stop", - }, - ], - usage: { - prompt_tokens: 10, - completion_tokens: 20, - total_tokens: 30, - cost: 0.0015, - // No prompt_tokens_details, completion_tokens_details, or cost_details - }, - }; - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { - type: "json-value", - body: response, - }; - - const settings: HyperbolicChatSettings = { - usage: { include: true }, - }; - - const model = new HyperbolicChatLanguageModel("test-model", settings, { - provider: "hyperbolic.chat", - url: () => "https://api.hyperbolic.xyz/v1/chat/completions", - headers: () => ({}), - compatibility: "strict", - fetch: global.fetch, - }); - - const result = await model.doGenerate({ - prompt: [ - { - role: "user", - content: [{ type: "text", text: "Hello" }], - }, - ], - maxOutputTokens: 100, - }); - - const usage = (result.providerMetadata?.hyperbolic as Record)?.usage; - - // Should include basic token counts - expect(usage).toMatchObject({ - promptTokens: 10, - completionTokens: 20, - totalTokens: 30, - cost: 0.0015, - }); - - // Should NOT include token details when not present in response - expect(usage).not.toHaveProperty("promptTokensDetails"); - expect(usage).not.toHaveProperty("completionTokensDetails"); - expect(usage).not.toHaveProperty("costDetails"); - }); - - it("should include only present token details in providerMetadata", async () => { - // Prepare a response with only cached_tokens (no reasoning or cost details) - const response = { - id: "test-id", - model: "test-model", - choices: [ - { - message: { - role: "assistant", - content: "Hello, I am an AI assistant.", - }, - index: 0, - finish_reason: "stop", - }, - ], - usage: { - prompt_tokens: 10, - prompt_tokens_details: { - cached_tokens: 5, - }, - completion_tokens: 20, - total_tokens: 30, - cost: 0.0015, - // No completion_tokens_details or cost_details - }, - }; - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - server.urls["https://api.hyperbolic.xyz/v1/chat/completions"]!.response = { - type: "json-value", - body: response, - }; - - const settings: HyperbolicChatSettings = { - usage: { include: true }, - }; - - const model = new HyperbolicChatLanguageModel("test-model", settings, { - provider: "hyperbolic.chat", - url: () => "https://api.hyperbolic.xyz/v1/chat/completions", - headers: () => ({}), - compatibility: "strict", - fetch: global.fetch, - }); - - const result = await model.doGenerate({ - prompt: [ - { - role: "user", - content: [{ type: "text", text: "Hello" }], - }, - ], - maxOutputTokens: 100, - }); - - const usage = (result.providerMetadata?.hyperbolic as Record)?.usage; - - // Should include promptTokensDetails since cached_tokens is present - expect(usage).toHaveProperty("promptTokensDetails"); - expect((usage as Record).promptTokensDetails).toEqual({ - cachedTokens: 5, - }); - - // Should NOT include completionTokensDetails or costDetails - expect(usage).not.toHaveProperty("completionTokensDetails"); - expect(usage).not.toHaveProperty("costDetails"); - }); -}); diff --git a/packages/ai-sdk-provider/src/types/hyperbolic-chat-completions-input.ts b/packages/ai-sdk-provider/src/types/hyperbolic-chat-completions-input.ts deleted file mode 100644 index 693d895..0000000 --- a/packages/ai-sdk-provider/src/types/hyperbolic-chat-completions-input.ts +++ /dev/null @@ -1,109 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { FileAnnotation } from "../schemas/provider-metadata"; -import type { ReasoningDetailUnion } from "../schemas/reasoning-details"; - -// Type for Hyperbolic Cache Control following Anthropic's pattern -export type HyperbolicCacheControl = { type: "ephemeral" }; - -export type HyperbolicChatCompletionsInput = Array; - -export type ChatCompletionMessageParam = - | ChatCompletionSystemMessageParam - | ChatCompletionUserMessageParam - | ChatCompletionAssistantMessageParam - | ChatCompletionToolMessageParam; - -export interface ChatCompletionSystemMessageParam { - role: "system"; - content: string; - cache_control?: HyperbolicCacheControl; -} - -export interface ChatCompletionUserMessageParam { - role: "user"; - content: string | Array; - cache_control?: HyperbolicCacheControl; -} - -export type ChatCompletionContentPart = - | ChatCompletionContentPartText - | ChatCompletionContentPartImage - | ChatCompletionContentPartFile - | ChatCompletionContentPartInputAudio; - -export interface ChatCompletionContentPartFile { - type: "file"; - file: { - filename?: string; - file_data?: string; - file_id?: string; - }; - cache_control?: HyperbolicCacheControl; -} - -export interface ChatCompletionContentPartImage { - type: "image_url"; - image_url: { - url: string; - }; - cache_control?: HyperbolicCacheControl; -} - -export interface ChatCompletionContentPartText { - type: "text"; - text: string; - reasoning?: string | null; - cache_control?: HyperbolicCacheControl; -} - -export const HYPERBOLIC_AUDIO_FORMATS = [ - "wav", - "mp3", - "aiff", - "aac", - "ogg", - "flac", - "m4a", - "pcm16", - "pcm24", -] as const; - -export type HyperbolicAudioFormat = (typeof HYPERBOLIC_AUDIO_FORMATS)[number]; - -export interface ChatCompletionContentPartInputAudio { - type: "input_audio"; - input_audio: { - data: string; - format: HyperbolicAudioFormat; - }; - cache_control?: HyperbolicCacheControl; -} - -export interface ChatCompletionAssistantMessageParam { - role: "assistant"; - content?: string | null; - reasoning?: string | null; - reasoning_details?: ReasoningDetailUnion[]; - annotations?: FileAnnotation[]; - tool_calls?: Array; - cache_control?: HyperbolicCacheControl; -} - -export interface ChatCompletionMessageToolCall { - type: "function"; - id: string; - function: { - arguments: string; - name: string; - }; -} - -export interface ChatCompletionToolMessageParam { - role: "tool"; - content: string; - tool_call_id: string; - cache_control?: HyperbolicCacheControl; -} diff --git a/packages/ai-sdk-provider/src/types/hyperbolic-chat-settings.ts b/packages/ai-sdk-provider/src/types/hyperbolic-chat-settings.ts deleted file mode 100644 index aa1676e..0000000 --- a/packages/ai-sdk-provider/src/types/hyperbolic-chat-settings.ts +++ /dev/null @@ -1,161 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type * as models from "@openrouter/sdk/models"; - -import type { HyperbolicSharedSettings } from ".."; - -// https://api.hyperbolic.xyz/v1/models -export type { HyperbolicChatModelId } from "../__generated__/models.gen"; - -export type HyperbolicChatSettings = { - /** -Modify the likelihood of specified tokens appearing in the completion. - -Accepts a JSON object that maps tokens (specified by their token ID in -the GPT tokenizer) to an associated bias value from -100 to 100. You -can use this tokenizer tool to convert text to token IDs. Mathematically, -the bias is added to the logits generated by the model prior to sampling. -The exact effect will vary per model, but values between -1 and 1 should -decrease or increase likelihood of selection; values like -100 or 100 -should result in a ban or exclusive selection of the relevant token. - -As an example, you can pass {"50256": -100} to prevent the <|endoftext|> -token from being generated. -*/ - logitBias?: Record; - - /** -Return the log probabilities of the tokens. Including logprobs will increase -the response size and can slow down response times. However, it can -be useful to understand better how the model is behaving. - -Setting to true will return the log probabilities of the tokens that -were generated. - -Setting to a number will return the log probabilities of the top n -tokens that were generated. -*/ - logprobs?: boolean | number; - - /** -Whether to enable parallel function calling during tool use. Default to true. - */ - parallelToolCalls?: boolean; - - /** -A unique identifier representing your end-user, which can help Hyperbolic to -monitor and detect abuse. Learn more. -*/ - user?: string; - - /** - * Plugin configurations for enabling various capabilities - */ - plugins?: Array< - | { - id: models.IdWeb; - max_results?: number; - search_prompt?: string; - engine?: models.Engine; - } - | { - id: models.IdFileParser; - max_files?: number; - pdf?: { - engine?: models.PdfEngine; - }; - } - | { - id: models.IdModeration; - } - >; - - /** - * Built-in web search options for models that support native web search - */ - web_search_options?: { - /** - * Maximum number of search results to include - */ - max_results?: number; - /** - * Custom search prompt to guide the search query - */ - search_prompt?: string; - /** - * Search engine to use for web search - * - "native": Use provider's built-in web search - * - "exa": Use Exa's search API - * - undefined: Native if supported, otherwise Exa - */ - engine?: models.Engine; - }; - - /** - * Debug options for troubleshooting API requests. - * Only works with streaming requests. - */ - debug?: { - /** - * When true, echoes back the request body that was sent to the upstream provider. - * The debug data will be returned as the first chunk in the stream with a `debug.echo_upstream_body` field. - * Sensitive data like user IDs and base64 content will be redacted. - */ - echo_upstream_body?: boolean; - }; - - /** - * Provider routing preferences to control request routing behavior - */ - provider?: { - /** - * List of provider slugs to try in order (e.g. ["anthropic", "openai"]) - */ - order?: string[]; - /** - * Whether to allow backup providers when primary is unavailable (default: true) - */ - allow_fallbacks?: boolean; - /** - * Only use providers that support all parameters in your request (default: false) - */ - require_parameters?: boolean; - /** - * Control whether to use providers that may store data - */ - data_collection?: models.DataCollection; - /** - * List of provider slugs to allow for this request - */ - only?: string[]; - /** - * List of provider slugs to skip for this request - */ - ignore?: string[]; - /** - * List of quantization levels to filter by (e.g. ["int4", "int8"]) - */ - quantizations?: Array; - /** - * Sort providers by price, throughput, or latency - */ - sort?: models.ProviderSort; - /** - * Maximum pricing you want to pay for this request - */ - max_price?: { - prompt?: number | string; - completion?: number | string; - image?: number | string; - audio?: number | string; - request?: number | string; - }; - /** - * Whether to restrict routing to only ZDR (Zero Data Retention) endpoints. - * When true, only endpoints that do not retain prompts will be used. - */ - zdr?: boolean; - }; -} & HyperbolicSharedSettings; diff --git a/packages/ai-sdk-provider/src/types/hyperbolic-completion-settings.ts b/packages/ai-sdk-provider/src/types/hyperbolic-completion-settings.ts deleted file mode 100644 index f2c75b1..0000000 --- a/packages/ai-sdk-provider/src/types/hyperbolic-completion-settings.ts +++ /dev/null @@ -1,43 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { HyperbolicSharedSettings } from "."; - -export type { HyperbolicCompletionModelId } from "../__generated__/models.gen"; - -export type HyperbolicCompletionSettings = { - /** -Modify the likelihood of specified tokens appearing in the completion. - -Accepts a JSON object that maps tokens (specified by their token ID in -the GPT tokenizer) to an associated bias value from -100 to 100. You -can use this tokenizer tool to convert text to token IDs. Mathematically, -the bias is added to the logits generated by the model prior to sampling. -The exact effect will vary per model, but values between -1 and 1 should -decrease or increase likelihood of selection; values like -100 or 100 -should result in a ban or exclusive selection of the relevant token. - -As an example, you can pass {"50256": -100} to prevent the <|endoftext|> -token from being generated. - */ - logitBias?: Record; - - /** -Return the log probabilities of the tokens. Including logprobs will increase -the response size and can slow down response times. However, it can -be useful to better understand how the model is behaving. - -Setting to true will return the log probabilities of the tokens that -were generated. - -Setting to a number will return the log probabilities of the top n -tokens that were generated. - */ - logprobs?: boolean | number; - - /** -The suffix that comes after a completion of inserted text. - */ - suffix?: string; -} & HyperbolicSharedSettings; diff --git a/packages/ai-sdk-provider/src/types/index.ts b/packages/ai-sdk-provider/src/types/index.ts index 8344498..ac52c1d 100644 --- a/packages/ai-sdk-provider/src/types/index.ts +++ b/packages/ai-sdk-provider/src/types/index.ts @@ -50,22 +50,3 @@ export type HyperbolicSharedSettings = HyperbolicProviderOptions & { include: boolean; }; }; - -/** - * Usage accounting response - */ -export type HyperbolicUsageAccounting = { - promptTokens: number; - promptTokensDetails?: { - cachedTokens: number; - }; - completionTokens: number; - completionTokensDetails?: { - reasoningTokens: number; - }; - totalTokens: number; - cost?: number; - costDetails?: { - upstreamInferenceCost: number; - }; -}; diff --git a/packages/ai-sdk-provider/src/utils/map-finish-reason.ts b/packages/ai-sdk-provider/src/utils/map-finish-reason.ts deleted file mode 100644 index 1600de4..0000000 --- a/packages/ai-sdk-provider/src/utils/map-finish-reason.ts +++ /dev/null @@ -1,39 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { LanguageModelV3FinishReason } from "@ai-sdk/provider"; - -type UnifiedFinishReason = "stop" | "length" | "content-filter" | "tool-calls" | "error" | "other"; - -function mapToUnified(finishReason: string | null | undefined): UnifiedFinishReason { - switch (finishReason) { - case "stop": - return "stop"; - case "length": - return "length"; - case "content_filter": - return "content-filter"; - case "function_call": - case "tool_calls": - return "tool-calls"; - default: - return "other"; - } -} - -export function mapHyperbolicFinishReason( - finishReason: string | null | undefined, -): LanguageModelV3FinishReason { - return { - unified: mapToUnified(finishReason), - raw: finishReason ?? undefined, - }; -} - -export function createFinishReason( - unified: UnifiedFinishReason, - raw?: string, -): LanguageModelV3FinishReason { - return { unified, raw }; -} diff --git a/packages/ai-sdk-provider/src/utils/remove-undefined.ts b/packages/ai-sdk-provider/src/utils/remove-undefined.ts index 9a54448..4c0b391 100644 --- a/packages/ai-sdk-provider/src/utils/remove-undefined.ts +++ b/packages/ai-sdk-provider/src/utils/remove-undefined.ts @@ -1,7 +1,3 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - /** * Removes entries from a record where the value is null or undefined. * @param record - The input object whose entries may be null or undefined. diff --git a/packages/ai-sdk-provider/src/utils/type-guards.ts b/packages/ai-sdk-provider/src/utils/type-guards.ts deleted file mode 100644 index b3c52c6..0000000 --- a/packages/ai-sdk-provider/src/utils/type-guards.ts +++ /dev/null @@ -1,10 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -/** - * Type guard to check if a value is defined and not null - */ -export function isDefinedOrNotNull(value: T | null | undefined): value is T { - return value !== null && value !== undefined; -} diff --git a/packages/ai-sdk-provider/src/utils/with-user-agent-suffix.ts b/packages/ai-sdk-provider/src/utils/with-user-agent-suffix.ts index 962fa7d..0a7df9d 100644 --- a/packages/ai-sdk-provider/src/utils/with-user-agent-suffix.ts +++ b/packages/ai-sdk-provider/src/utils/with-user-agent-suffix.ts @@ -1,7 +1,3 @@ -// Modified by Hyperbolic Labs, Inc. on 2026-01-23 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - import { removeUndefinedEntries } from "../utils/remove-undefined"; /** From 57a913f1cb639adf3ef9b5bcb62c0a8fe1a0e103 Mon Sep 17 00:00:00 2001 From: Connor Chevli Date: Fri, 23 Jan 2026 16:34:37 -0800 Subject: [PATCH 19/22] remove old package --- packages/ai-sdk-provider-old/.prettierignore | 3 - packages/ai-sdk-provider-old/CHANGELOG.md | 41 - packages/ai-sdk-provider-old/LICENSE | 202 ---- packages/ai-sdk-provider-old/README.md | 85 -- packages/ai-sdk-provider-old/eslint.config.js | 9 - packages/ai-sdk-provider-old/package.json | 70 -- .../src/__generated__/models.gen.ts | 41 - ...onvert-to-hyperbolic-chat-messages.test.ts | 437 -------- .../convert-to-hyperbolic-chat-messages.ts | 165 --- ...convert-to-hyperbolic-completion-prompt.ts | 134 --- .../hyperbolic-chat-language-model.test.ts | 990 ------------------ .../src/hyperbolic-chat-language-model.ts | 659 ------------ .../src/hyperbolic-chat-prompt.ts | 67 -- .../src/hyperbolic-chat-settings.ts | 50 - ...perbolic-completion-language-model.test.ts | 496 --------- .../hyperbolic-completion-language-model.ts | 352 ------- .../src/hyperbolic-completion-settings.ts | 42 - .../src/hyperbolic-error.ts | 49 - .../src/hyperbolic-image-language-model.ts | 130 --- .../src/hyperbolic-image-settings.ts | 37 - .../src/hyperbolic-provider-options.test.ts | 64 -- .../src/hyperbolic-provider.ts | 180 ---- packages/ai-sdk-provider-old/src/index.ts | 3 - .../ai-sdk-provider-old/src/internal/index.ts | 7 - .../src/map-hyperbolic-chat-logprobs.ts | 37 - .../src/map-hyperbolic-completion-logprobs.ts | 24 - .../src/map-hyperbolic-finish-reason.ts | 23 - .../src/scripts/templates/models.ts.hbs | 32 - .../src/scripts/update-models-list.ts | 31 - packages/ai-sdk-provider-old/src/types.ts | 47 - packages/ai-sdk-provider-old/tsconfig.json | 5 - packages/ai-sdk-provider-old/tsup.config.ts | 17 - packages/ai-sdk-provider-old/turbo.json | 10 - .../ai-sdk-provider-old/vitest.config.mts | 10 - .../ai-sdk-provider-old/vitest.edge.config.ts | 10 - .../ai-sdk-provider-old/vitest.node.config.ts | 10 - pnpm-lock.yaml | 63 +- 37 files changed, 2 insertions(+), 4630 deletions(-) delete mode 100644 packages/ai-sdk-provider-old/.prettierignore delete mode 100644 packages/ai-sdk-provider-old/CHANGELOG.md delete mode 100644 packages/ai-sdk-provider-old/LICENSE delete mode 100644 packages/ai-sdk-provider-old/README.md delete mode 100644 packages/ai-sdk-provider-old/eslint.config.js delete mode 100644 packages/ai-sdk-provider-old/package.json delete mode 100644 packages/ai-sdk-provider-old/src/__generated__/models.gen.ts delete mode 100644 packages/ai-sdk-provider-old/src/convert-to-hyperbolic-chat-messages.test.ts delete mode 100644 packages/ai-sdk-provider-old/src/convert-to-hyperbolic-chat-messages.ts delete mode 100644 packages/ai-sdk-provider-old/src/convert-to-hyperbolic-completion-prompt.ts delete mode 100644 packages/ai-sdk-provider-old/src/hyperbolic-chat-language-model.test.ts delete mode 100644 packages/ai-sdk-provider-old/src/hyperbolic-chat-language-model.ts delete mode 100644 packages/ai-sdk-provider-old/src/hyperbolic-chat-prompt.ts delete mode 100644 packages/ai-sdk-provider-old/src/hyperbolic-chat-settings.ts delete mode 100644 packages/ai-sdk-provider-old/src/hyperbolic-completion-language-model.test.ts delete mode 100644 packages/ai-sdk-provider-old/src/hyperbolic-completion-language-model.ts delete mode 100644 packages/ai-sdk-provider-old/src/hyperbolic-completion-settings.ts delete mode 100644 packages/ai-sdk-provider-old/src/hyperbolic-error.ts delete mode 100644 packages/ai-sdk-provider-old/src/hyperbolic-image-language-model.ts delete mode 100644 packages/ai-sdk-provider-old/src/hyperbolic-image-settings.ts delete mode 100644 packages/ai-sdk-provider-old/src/hyperbolic-provider-options.test.ts delete mode 100644 packages/ai-sdk-provider-old/src/hyperbolic-provider.ts delete mode 100644 packages/ai-sdk-provider-old/src/index.ts delete mode 100644 packages/ai-sdk-provider-old/src/internal/index.ts delete mode 100644 packages/ai-sdk-provider-old/src/map-hyperbolic-chat-logprobs.ts delete mode 100644 packages/ai-sdk-provider-old/src/map-hyperbolic-completion-logprobs.ts delete mode 100644 packages/ai-sdk-provider-old/src/map-hyperbolic-finish-reason.ts delete mode 100644 packages/ai-sdk-provider-old/src/scripts/templates/models.ts.hbs delete mode 100644 packages/ai-sdk-provider-old/src/scripts/update-models-list.ts delete mode 100644 packages/ai-sdk-provider-old/src/types.ts delete mode 100644 packages/ai-sdk-provider-old/tsconfig.json delete mode 100644 packages/ai-sdk-provider-old/tsup.config.ts delete mode 100644 packages/ai-sdk-provider-old/turbo.json delete mode 100644 packages/ai-sdk-provider-old/vitest.config.mts delete mode 100644 packages/ai-sdk-provider-old/vitest.edge.config.ts delete mode 100644 packages/ai-sdk-provider-old/vitest.node.config.ts diff --git a/packages/ai-sdk-provider-old/.prettierignore b/packages/ai-sdk-provider-old/.prettierignore deleted file mode 100644 index 523438f..0000000 --- a/packages/ai-sdk-provider-old/.prettierignore +++ /dev/null @@ -1,3 +0,0 @@ -# Ignore Handlebars template files -**/*.hbs -**/*.ts.hbs \ No newline at end of file diff --git a/packages/ai-sdk-provider-old/CHANGELOG.md b/packages/ai-sdk-provider-old/CHANGELOG.md deleted file mode 100644 index 6a00837..0000000 --- a/packages/ai-sdk-provider-old/CHANGELOG.md +++ /dev/null @@ -1,41 +0,0 @@ -# @hyperbolic/ai-sdk-provider - -## 0.1.3 - -### Patch Changes - -- [#19](https://github.com/HyperbolicLabs/hyperbolic-ts/pull/19) [`825eb5f6be2f1d9ee959b62b79573a65f56362a5`](https://github.com/HyperbolicLabs/hyperbolic-ts/commit/825eb5f6be2f1d9ee959b62b79573a65f56362a5) Thanks [@connorch](https://github.com/connorch)! - bump deps and tweak readme - -## 0.1.2 - -### Patch Changes - -- [#16](https://github.com/HyperbolicLabs/hyperbolic-ts/pull/16) [`78fde27c775553b84af8a1c90b1c08ba6821513c`](https://github.com/HyperbolicLabs/hyperbolic-ts/commit/78fde27c775553b84af8a1c90b1c08ba6821513c) Thanks [@connorch](https://github.com/connorch)! - handle error parsing when the termination sequence is appended to the… - -## 0.1.1 - -### Patch Changes - -- [#13](https://github.com/HyperbolicLabs/hyperbolic-ts/pull/13) [`6d53d2982a58215344fa66b3cf45d553e713d7f6`](https://github.com/HyperbolicLabs/hyperbolic-ts/commit/6d53d2982a58215344fa66b3cf45d553e713d7f6) Thanks [@connorch](https://github.com/connorch)! - Add links to repo in package.json - -## 0.1.0 - -### Minor Changes - -- [`21c60cbb9aff7d7256bbbf39007c824a604eea00`](https://github.com/HyperbolicLabs/hyperbolic-ts/commit/21c60cbb9aff7d7256bbbf39007c824a604eea00) Thanks [@connorch](https://github.com/connorch)! - Initial release for the ai-sdk-provider - -### Patch Changes - -- [`21c60cbb9aff7d7256bbbf39007c824a604eea00`](https://github.com/HyperbolicLabs/hyperbolic-ts/commit/21c60cbb9aff7d7256bbbf39007c824a604eea00) Thanks [@connorch](https://github.com/connorch)! - Add documentation - -## 0.0.3 - -### Patch Changes - -- [#4](https://github.com/HyperbolicLabs/hyperbolic-ts/pull/4) [`3abde9c43edd10d4cc2b65e036298fdad5a21c96`](https://github.com/HyperbolicLabs/hyperbolic-ts/commit/3abde9c43edd10d4cc2b65e036298fdad5a21c96) Thanks [@connorch](https://github.com/connorch)! - update npm token in release - -## 0.0.2 - -### Patch Changes - -- [#1](https://github.com/HyperbolicLabs/hyperbolic-ts/pull/1) [`dd3804f79603b4d6876b866efb6fd17cf72e99a2`](https://github.com/HyperbolicLabs/hyperbolic-ts/commit/dd3804f79603b4d6876b866efb6fd17cf72e99a2) Thanks [@connorch](https://github.com/connorch)! - fix: test changeset diff --git a/packages/ai-sdk-provider-old/LICENSE b/packages/ai-sdk-provider-old/LICENSE deleted file mode 100644 index 1bceb99..0000000 --- a/packages/ai-sdk-provider-old/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2025 OpenRouter Inc, - Copyright 2025 Hyperbolic Labs Inc, - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/packages/ai-sdk-provider-old/README.md b/packages/ai-sdk-provider-old/README.md deleted file mode 100644 index 3e99fbc..0000000 --- a/packages/ai-sdk-provider-old/README.md +++ /dev/null @@ -1,85 +0,0 @@ -# Hyperbolic Provider for Vercel AI SDK - -The [Hyperbolic](https://hyperbolic.xyz/) provider for the [Vercel AI SDK](https://sdk.vercel.ai/docs) gives access to any model found at . - -This is based on the [OpenRouter](https://openrouter.ai/) provider for the Vercel AI SDK, with a number of changes to support the Hyperbolic API and add image generation support. - -## Setup - -```bash -# For pnpm -pnpm add @hyperbolic/ai-sdk-provider - -# For npm -npm install @hyperbolic/ai-sdk-provider - -# For yarn -yarn add @hyperbolic/ai-sdk-provider -``` - -## Provider Instance - -You can create a provider instance with `createHyperbolic` from `@hyperbolic/ai-sdk-provider`: - -```ts -import { createHyperbolic } from "@hyperbolic/ai-sdk-provider"; -``` - -## Example - -```ts -import { generateText } from "ai"; - -import { createHyperbolic } from "@hyperbolic/ai-sdk-provider"; - -const hyperbolic = createHyperbolic({ - apiKey: process.env.HYPERBOLIC_API_KEY, // Found in settings after logging in at https://app.hyperbolic.xyz -}); - -const { text } = await generateText({ - model: hyperbolic.chat("deepseek-ai/DeepSeek-R1"), - prompt: "Write a vegetarian lasagna recipe for 4 people.", -}); -``` - -## Supported models - -This list is not a definitive list of models supported by Hyperbolic, as it constantly changes as we add new models (and deprecate old ones) to our system. -You can find the latest list of models supported by Hyperbolic [here](https://openrouter.ai/models). - -## Using Models - -### Language Models - -```ts -const { text } = await generateText({ - model: hyperbolic.chat("deepseek-ai/DeepSeek-R1"), - prompt: "Write a vegetarian lasagna recipe for 4 people.", -}); - -const { text } = await generateText({ - model: hyperbolic.completion("deepseek-ai/DeepSeek-R1"), - prompt: "The capital of France is", -}); -``` - -### Image Generation Models - -```ts -import { experimental_generateImage as generateImage } from "ai"; - -// Text to Image -const { images } = await generateImage({ - model: hyperbolic.image("SDXL1.0-base"), - prompt: "A beautiful sunset over a calm ocean", - size: "1024x1024", - providerOptions: { - hyperbolic: { - cfgScale: 5, - steps: 30, - negativePrompt: "low quality, blurry, distorted", - enableRefiner: false, - } satisfies HyperbolicImageProviderOptions, - }, -}); -``` diff --git a/packages/ai-sdk-provider-old/eslint.config.js b/packages/ai-sdk-provider-old/eslint.config.js deleted file mode 100644 index 5b13b2e..0000000 --- a/packages/ai-sdk-provider-old/eslint.config.js +++ /dev/null @@ -1,9 +0,0 @@ -import baseConfig from "@hyperbolic/eslint-config/base"; - -/** @type {import('typescript-eslint').Config} */ -export default [ - { - ignores: [], - }, - ...baseConfig, -]; diff --git a/packages/ai-sdk-provider-old/package.json b/packages/ai-sdk-provider-old/package.json deleted file mode 100644 index fef3ce5..0000000 --- a/packages/ai-sdk-provider-old/package.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "name": "@hyperbolic/ai-sdk-provider-old", - "private": false, - "version": "0.1.3", - "type": "module", - "main": "./dist/index.js", - "module": "./dist/index.js", - "types": "./dist/index.d.ts", - "repository": { - "type": "git", - "url": "https://github.com/HyperbolicLabs/hyperbolic-ts.git", - "directory": "packages/ai-sdk-provider" - }, - "homepage": "https://github.com/HyperbolicLabs/hyperbolic-ts/tree/main/packages/ai-sdk-provider", - "bugs": { - "url": "https://github.com/HyperbolicLabs/hyperbolic-ts/issues" - }, - "exports": { - "./package.json": "./package.json", - ".": { - "types": "./dist/index.d.ts", - "import": "./dist/index.js", - "require": "./dist/index.cjs" - }, - "./internal": { - "types": "./dist/internal/index.d.ts", - "import": "./dist/internal/index.js", - "module": "./dist/internal/index.js", - "require": "./dist/internal/index.cjs" - } - }, - "files": [ - "dist/**/*", - "CHANGELOG.md" - ], - "license": "Apache-2.0", - "scripts": { - "build": "tsup", - "clean": "git clean -xdf .cache .turbo dist node_modules", - "dev": "pnpm with-env tsup", - "format": "prettier --check . --ignore-path ../../.gitignore --ignore-path .prettierignore", - "lint": "eslint", - "typecheck": "tsc --noEmit --emitDeclarationOnly false", - "with-env": "dotenv -e ../../.env -c --", - "codegen:update-models": "pnpm with-env npx tsx src/scripts/update-models-list.ts", - "test": "pnpm test:node && pnpm test:edge", - "test:edge": "pnpm with-env vitest --config vitest.edge.config.ts --run", - "test:node": "pnpm with-env vitest --config vitest.node.config.ts --run" - }, - "prettier": "@hyperbolic/prettier-config", - "devDependencies": { - "@edge-runtime/vm": "^5.0.0", - "@hyperbolic/api": "workspace:*", - "@hyperbolic/eslint-config": "workspace:*", - "@hyperbolic/prettier-config": "workspace:*", - "@hyperbolic/tsconfig": "workspace:*", - "eslint": "catalog:", - "handlebars": "^4.7.8", - "prettier": "catalog:", - "tsup": "8.5.0", - "type-fest": "^4.37.0", - "typescript": "catalog:" - }, - "dependencies": { - "@ai-sdk/provider": "^3.0.5", - "@ai-sdk/provider-utils": "^4.0.9", - "ai": "^6.0.48", - "zod": "^4.0.0" - } -} diff --git a/packages/ai-sdk-provider-old/src/__generated__/models.gen.ts b/packages/ai-sdk-provider-old/src/__generated__/models.gen.ts deleted file mode 100644 index 08a1ae7..0000000 --- a/packages/ai-sdk-provider-old/src/__generated__/models.gen.ts +++ /dev/null @@ -1,41 +0,0 @@ -// prettier-ignore - -// This file is auto-generated by the pnpm codegen:update-models script. Do not edit manually. - -const _models = [ - "Qwen/Qwen2.5-72B-Instruct", - "Qwen/Qwen2.5-VL-72B-Instruct", - "meta-llama/Meta-Llama-3-70B-Instruct", - "meta-llama/Meta-Llama-3.1-70B-Instruct", - "meta-llama/Meta-Llama-3.1-405B-FP8", - "Qwen/Qwen2.5-VL-7B-Instruct", - "meta-llama/Meta-Llama-3.1-405B-Instruct", - "Qwen/QwQ-32B", - "deepseek-ai/DeepSeek-V3", - "Qwen/QwQ-32B-Preview", - "meta-llama/Llama-3.3-70B-Instruct", - "NousResearch/Hermes-3-Llama-3.1-70B", - "meta-llama/Meta-Llama-3.1-405B", - "meta-llama/Llama-3.2-3B-Instruct", - "FLUX.1-dev", - "mistralai/Pixtral-12B-2409", - "StableDiffusion", - "meta-llama/Meta-Llama-3.1-8B-Instruct", - "Qwen/Qwen2.5-Coder-32B-Instruct", - "TTS", - "deepseek-ai/DeepSeek-R1", -] as const; - -const _imageModels = [] as const; - -const _chatModels = [] as const; - -const _completionModels = [] as const; - -export type HyperbolicImageModelId = (typeof _imageModels)[number] | string; -export type HyperbolicChatModelId = (typeof _chatModels)[number] | string; -export type HyperbolicCompletionModelId = (typeof _completionModels)[number] | string; -export type HyperbolicModelId = - | HyperbolicImageModelId - | HyperbolicChatModelId - | HyperbolicCompletionModelId; diff --git a/packages/ai-sdk-provider-old/src/convert-to-hyperbolic-chat-messages.test.ts b/packages/ai-sdk-provider-old/src/convert-to-hyperbolic-chat-messages.test.ts deleted file mode 100644 index af2e058..0000000 --- a/packages/ai-sdk-provider-old/src/convert-to-hyperbolic-chat-messages.test.ts +++ /dev/null @@ -1,437 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import { describe, expect, it } from "vitest"; - -import { convertToHyperbolicChatMessages } from "./convert-to-hyperbolic-chat-messages"; - -describe("user messages", () => { - it("should convert messages with image parts to multiple parts", async () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [ - { type: "text", text: "Hello" }, - { - type: "image", - image: new Uint8Array([0, 1, 2, 3]), - mimeType: "image/png", - }, - ], - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: [ - { type: "text", text: "Hello" }, - { - type: "image_url", - image_url: { url: "data:image/png;base64,AAECAw==" }, - }, - ], - }, - ]); - }); - - it("should convert messages with only a text part to a string content", async () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [{ type: "text", text: "Hello" }], - }, - ]); - - expect(result).toEqual([{ role: "user", content: "Hello" }]); - }); -}); - -describe("cache control", () => { - it("should pass cache control from system message provider metadata", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "system", - content: "System prompt", - providerMetadata: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "system", - content: "System prompt", - cache_control: { type: "ephemeral" }, - }, - ]); - }); - - it("should pass cache control from user message provider metadata (single text part)", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [{ type: "text", text: "Hello" }], - providerMetadata: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: "Hello", - cache_control: { type: "ephemeral" }, - }, - ]); - }); - - it("should pass cache control from user message provider metadata (multiple parts)", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [ - { type: "text", text: "Hello" }, - { - type: "image", - image: new Uint8Array([0, 1, 2, 3]), - mimeType: "image/png", - }, - ], - providerMetadata: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: [ - { - type: "text", - text: "Hello", - cache_control: undefined, - }, - { - type: "image_url", - image_url: { url: "data:image/png;base64,AAECAw==" }, - cache_control: { type: "ephemeral" }, - }, - ], - }, - ]); - }); - - it("should pass cache control to multiple image parts from user message provider metadata", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [ - { type: "text", text: "Hello" }, - { - type: "image", - image: new Uint8Array([0, 1, 2, 3]), - mimeType: "image/png", - }, - { - type: "image", - image: new Uint8Array([4, 5, 6, 7]), - mimeType: "image/jpeg", - }, - ], - providerMetadata: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: [ - { - type: "text", - text: "Hello", - cache_control: undefined, - }, - { - type: "image_url", - image_url: { url: "data:image/png;base64,AAECAw==" }, - cache_control: { type: "ephemeral" }, - }, - { - type: "image_url", - image_url: { url: "data:image/jpeg;base64,BAUGBw==" }, - cache_control: { type: "ephemeral" }, - }, - ], - }, - ]); - }); - - it("should pass cache control to file parts from user message provider metadata", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [ - { type: "text", text: "Hello" }, - { - type: "file", - data: "file content", - mimeType: "text/plain", - }, - ], - providerMetadata: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: [ - { - type: "text", - text: "Hello", - cache_control: undefined, - }, - { - type: "text", - text: "file content", - cache_control: { type: "ephemeral" }, - }, - ], - }, - ]); - }); - - it("should handle mixed part-specific and message-level cache control for multiple parts", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [ - { - type: "text", - text: "Hello", - // No part-specific provider metadata - }, - { - type: "image", - image: new Uint8Array([0, 1, 2, 3]), - mimeType: "image/png", - providerMetadata: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - { - type: "file", - data: "file content", - mimeType: "text/plain", - // No part-specific provider metadata - }, - ], - providerMetadata: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: [ - { - type: "text", - text: "Hello", - cache_control: undefined, - }, - { - type: "image_url", - image_url: { url: "data:image/png;base64,AAECAw==" }, - cache_control: { type: "ephemeral" }, - }, - { - type: "text", - text: "file content", - cache_control: { type: "ephemeral" }, - }, - ], - }, - ]); - }); - - it("should pass cache control from individual content part provider metadata", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "user", - content: [ - { - type: "text", - text: "Hello", - providerMetadata: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - { - type: "image", - image: new Uint8Array([0, 1, 2, 3]), - mimeType: "image/png", - }, - ], - }, - ]); - - expect(result).toEqual([ - { - role: "user", - content: [ - { - type: "text", - text: "Hello", - cache_control: { type: "ephemeral" }, - }, - { - type: "image_url", - image_url: { url: "data:image/png;base64,AAECAw==" }, - }, - ], - }, - ]); - }); - - it("should pass cache control from assistant message provider metadata", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "assistant", - content: [{ type: "text", text: "Assistant response" }], - providerMetadata: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "assistant", - content: "Assistant response", - cache_control: { type: "ephemeral" }, - }, - ]); - }); - - it("should pass cache control from tool message provider metadata", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "tool", - content: [ - { - type: "tool-result", - toolCallId: "call-123", - toolName: "calculator", - result: { answer: 42 }, - isError: false, - }, - ], - providerMetadata: { - anthropic: { - cacheControl: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "tool", - tool_call_id: "call-123", - content: JSON.stringify({ answer: 42 }), - cache_control: { type: "ephemeral" }, - }, - ]); - }); - - it("should support the alias cache_control field", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "system", - content: "System prompt", - providerMetadata: { - anthropic: { - cache_control: { type: "ephemeral" }, - }, - }, - }, - ]); - - expect(result).toEqual([ - { - role: "system", - content: "System prompt", - cache_control: { type: "ephemeral" }, - }, - ]); - }); - - it("should support cache control on last message in content array", () => { - const result = convertToHyperbolicChatMessages([ - { - role: "system", - content: "System prompt", - }, - { - role: "user", - content: [ - { type: "text", text: "User prompt" }, - { - type: "text", - text: "User prompt 2", - providerMetadata: { - anthropic: { cacheControl: { type: "ephemeral" } }, - }, - }, - ], - }, - ]); - - expect(result).toEqual([ - { - role: "system", - content: "System prompt", - }, - { - role: "user", - content: [ - { type: "text", text: "User prompt" }, - { - type: "text", - text: "User prompt 2", - cache_control: { type: "ephemeral" }, - }, - ], - }, - ]); - }); -}); diff --git a/packages/ai-sdk-provider-old/src/convert-to-hyperbolic-chat-messages.ts b/packages/ai-sdk-provider-old/src/convert-to-hyperbolic-chat-messages.ts deleted file mode 100644 index 30bae0b..0000000 --- a/packages/ai-sdk-provider-old/src/convert-to-hyperbolic-chat-messages.ts +++ /dev/null @@ -1,165 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { LanguageModelV1Prompt, LanguageModelV1ProviderMetadata } from "@ai-sdk/provider"; -import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils"; - -import type { ChatCompletionContentPart, HyperbolicChatPrompt } from "./hyperbolic-chat-prompt"; - -// Type for Hyperbolic Cache Control following Anthropic's pattern -export type HyperbolicCacheControl = { type: "ephemeral" }; - -export function convertToHyperbolicChatMessages( - prompt: LanguageModelV1Prompt, -): HyperbolicChatPrompt { - const messages: HyperbolicChatPrompt = []; - - function getCacheControl( - providerMetadata: LanguageModelV1ProviderMetadata | undefined, - ): HyperbolicCacheControl | undefined { - const anthropic = providerMetadata?.anthropic; - - // Allow both cacheControl and cache_control: - const cacheControlValue = anthropic?.cacheControl ?? anthropic?.cache_control; - - // Return the cache control object if it exists - return cacheControlValue as HyperbolicCacheControl | undefined; - } - - for (const { role, content, providerMetadata } of prompt) { - switch (role) { - case "system": { - messages.push({ - role: "system", - content, - cache_control: getCacheControl(providerMetadata), - }); - break; - } - - case "user": { - if (content.length === 1 && content[0]?.type === "text") { - messages.push({ - role: "user", - content: content[0].text, - cache_control: - getCacheControl(providerMetadata) ?? getCacheControl(content[0].providerMetadata), - }); - break; - } - - // Get message level cache control - const messageCacheControl = getCacheControl(providerMetadata); - - const contentParts: ChatCompletionContentPart[] = content.map((part) => { - switch (part.type) { - case "text": - return { - type: "text" as const, - text: part.text, - // For text parts, only use part-specific cache control - cache_control: getCacheControl(part.providerMetadata), - }; - case "image": - return { - type: "image_url" as const, - image_url: { - url: - part.image instanceof URL - ? part.image.toString() - : `data:${part.mimeType ?? "image/jpeg"};base64,${convertUint8ArrayToBase64( - part.image, - )}`, - }, - // For image parts, use part-specific or message-level cache control - cache_control: getCacheControl(part.providerMetadata) ?? messageCacheControl, - }; - case "file": - return { - type: "text" as const, - text: part.data instanceof URL ? part.data.toString() : part.data, - cache_control: getCacheControl(part.providerMetadata) ?? messageCacheControl, - }; - default: { - const _exhaustiveCheck: never = part; - throw new Error(`Unsupported content part type: ${_exhaustiveCheck}`); - } - } - }); - - // For multi-part messages, don't add cache_control at the root level - messages.push({ - role: "user", - content: contentParts, - }); - - break; - } - - case "assistant": { - let text = ""; - const toolCalls: Array<{ - id: string; - type: "function"; - function: { name: string; arguments: string }; - }> = []; - - for (const part of content) { - switch (part.type) { - case "text": { - text += part.text; - break; - } - case "tool-call": { - toolCalls.push({ - id: part.toolCallId, - type: "function", - function: { - name: part.toolName, - arguments: JSON.stringify(part.args), - }, - }); - break; - } - // TODO: Handle reasoning and redacted-reasoning - case "reasoning": - case "redacted-reasoning": - break; - default: { - throw new Error(`Unsupported part: ${part}`); - } - } - } - - messages.push({ - role: "assistant", - content: text, - tool_calls: toolCalls.length > 0 ? toolCalls : undefined, - cache_control: getCacheControl(providerMetadata), - }); - - break; - } - - case "tool": { - for (const toolResponse of content) { - messages.push({ - role: "tool", - tool_call_id: toolResponse.toolCallId, - content: JSON.stringify(toolResponse.result), - cache_control: - getCacheControl(providerMetadata) ?? getCacheControl(toolResponse.providerMetadata), - }); - } - break; - } - - default: { - throw new Error(`Unsupported role: ${role}`); - } - } - } - - return messages; -} diff --git a/packages/ai-sdk-provider-old/src/convert-to-hyperbolic-completion-prompt.ts b/packages/ai-sdk-provider-old/src/convert-to-hyperbolic-completion-prompt.ts deleted file mode 100644 index c241b77..0000000 --- a/packages/ai-sdk-provider-old/src/convert-to-hyperbolic-completion-prompt.ts +++ /dev/null @@ -1,134 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { LanguageModelV1Prompt } from "@ai-sdk/provider"; -import { InvalidPromptError, UnsupportedFunctionalityError } from "@ai-sdk/provider"; - -export function convertToHyperbolicCompletionPrompt({ - prompt, - inputFormat, - user = "user", - assistant = "assistant", -}: { - prompt: LanguageModelV1Prompt; - inputFormat: "prompt" | "messages"; - user?: string; - assistant?: string; -}): { - prompt: string; -} { - // When the user supplied a prompt input, we don't transform it: - if ( - inputFormat === "prompt" && - prompt.length === 1 && - prompt[0] && - prompt[0].role === "user" && - prompt[0].content.length === 1 && - prompt[0].content[0] && - prompt[0].content[0].type === "text" - ) { - return { prompt: prompt[0].content[0].text }; - } - - // otherwise transform to a chat message format: - let text = ""; - - // if first message is a system message, add it to the text: - if (prompt[0] && prompt[0].role === "system") { - text += `${prompt[0].content}\n\n`; - prompt = prompt.slice(1); - } - - for (const { role, content } of prompt) { - switch (role) { - case "system": { - throw new InvalidPromptError({ - message: "Unexpected system message in prompt: ${content}", - prompt, - }); - } - - case "user": { - const userMessage = content - .map((part) => { - switch (part.type) { - case "text": { - return part.text; - } - case "image": { - throw new UnsupportedFunctionalityError({ - functionality: "images", - }); - } - case "file": { - throw new UnsupportedFunctionalityError({ - functionality: "file attachments", - }); - } - default: { - const _exhaustiveCheck: never = part; - throw new Error(`Unsupported content type: ${_exhaustiveCheck}`); - } - } - }) - .join(""); - - text += `${user}:\n${userMessage}\n\n`; - break; - } - - case "assistant": { - const assistantMessage = content - .map((part) => { - switch (part.type) { - case "text": { - return part.text; - } - case "tool-call": { - throw new UnsupportedFunctionalityError({ - functionality: "tool-call messages", - }); - } - case "reasoning": { - throw new UnsupportedFunctionalityError({ - functionality: "reasoning messages", - }); - } - - case "redacted-reasoning": { - throw new UnsupportedFunctionalityError({ - functionality: "redacted reasoning messages", - }); - } - - default: { - throw new Error(`Unsupported content type: ${part}`); - } - } - }) - .join(""); - - text += `${assistant}:\n${assistantMessage}\n\n`; - break; - } - - case "tool": { - throw new UnsupportedFunctionalityError({ - functionality: "tool messages", - }); - } - - default: { - throw new Error(`Unsupported role: ${role}`); - } - } - } - - // Assistant message prefix: - text += `${assistant}:\n`; - - return { - prompt: text, - }; -} diff --git a/packages/ai-sdk-provider-old/src/hyperbolic-chat-language-model.test.ts b/packages/ai-sdk-provider-old/src/hyperbolic-chat-language-model.test.ts deleted file mode 100644 index 944ccce..0000000 --- a/packages/ai-sdk-provider-old/src/hyperbolic-chat-language-model.test.ts +++ /dev/null @@ -1,990 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { LanguageModelV1Prompt } from "@ai-sdk/provider"; -import { - convertReadableStreamToArray, - JsonTestServer, - StreamingTestServer, -} from "@ai-sdk/provider-utils/test"; -import { describe, expect, it } from "vitest"; - -import { createHyperbolic } from "./hyperbolic-provider"; -import { mapHyperbolicChatLogProbsOutput } from "./map-hyperbolic-chat-logprobs"; - -const TEST_PROMPT: LanguageModelV1Prompt = [ - { role: "user", content: [{ type: "text", text: "Hello" }] }, -]; - -const TEST_LOGPROBS = { - content: [ - { - token: "Hello", - logprob: -0.0009994634, - top_logprobs: [ - { - token: "Hello", - logprob: -0.0009994634, - }, - ], - }, - { - token: "!", - logprob: -0.13410144, - top_logprobs: [ - { - token: "!", - logprob: -0.13410144, - }, - ], - }, - { - token: " How", - logprob: -0.0009250381, - top_logprobs: [ - { - token: " How", - logprob: -0.0009250381, - }, - ], - }, - { - token: " can", - logprob: -0.047709424, - top_logprobs: [ - { - token: " can", - logprob: -0.047709424, - }, - ], - }, - { - token: " I", - logprob: -0.000009014684, - top_logprobs: [ - { - token: " I", - logprob: -0.000009014684, - }, - ], - }, - { - token: " assist", - logprob: -0.009125131, - top_logprobs: [ - { - token: " assist", - logprob: -0.009125131, - }, - ], - }, - { - token: " you", - logprob: -0.0000066306106, - top_logprobs: [ - { - token: " you", - logprob: -0.0000066306106, - }, - ], - }, - { - token: " today", - logprob: -0.00011093382, - top_logprobs: [ - { - token: " today", - logprob: -0.00011093382, - }, - ], - }, - { - token: "?", - logprob: -0.00004596782, - top_logprobs: [ - { - token: "?", - logprob: -0.00004596782, - }, - ], - }, - ], -}; - -const provider = createHyperbolic({ - apiKey: "test-api-key", - compatibility: "strict", -}); - -const model = provider.chat("anthropic/claude-3.5-sonnet"); - -describe("doGenerate", () => { - const server = new JsonTestServer("https://api.hyperbolic.xyz/v1/chat/completions"); - - server.setupTestEnvironment(); - - function prepareJsonResponse({ - content = "", - usage = { - prompt_tokens: 4, - total_tokens: 34, - completion_tokens: 30, - }, - logprobs = null, - finish_reason = "stop", - }: { - content?: string; - usage?: { - prompt_tokens: number; - total_tokens: number; - completion_tokens: number; - }; - logprobs?: { - content: - | { - token: string; - logprob: number; - top_logprobs: { token: string; logprob: number }[]; - }[] - | null; - } | null; - finish_reason?: string; - } = {}) { - server.responseBodyJson = { - id: "chatcmpl-95ZTZkhr0mHNKqerQfiwkuox3PHAd", - object: "chat.completion", - created: 1711115037, - model: "gpt-3.5-turbo-0125", - choices: [ - { - index: 0, - message: { - role: "assistant", - content, - }, - logprobs, - finish_reason, - }, - ], - usage, - system_fingerprint: "fp_3bc1b5746c", - }; - } - - it("should extract text response", async () => { - prepareJsonResponse({ content: "Hello, World!" }); - - const { text } = await model.doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(text).toStrictEqual("Hello, World!"); - }); - - it("should extract usage", async () => { - prepareJsonResponse({ - content: "", - usage: { prompt_tokens: 20, total_tokens: 25, completion_tokens: 5 }, - }); - - const { usage } = await model.doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(usage).toStrictEqual({ - promptTokens: 20, - completionTokens: 5, - }); - }); - - it("should extract logprobs", async () => { - prepareJsonResponse({ - logprobs: TEST_LOGPROBS, - }); - - const response = await provider.chat("openai/gpt-3.5-turbo", { logprobs: 1 }).doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - expect(response.logprobs).toStrictEqual(mapHyperbolicChatLogProbsOutput(TEST_LOGPROBS)); - }); - - it("should extract finish reason", async () => { - prepareJsonResponse({ - content: "", - finish_reason: "stop", - }); - - const response = await model.doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(response.finishReason).toStrictEqual("stop"); - }); - - it("should support unknown finish reason", async () => { - prepareJsonResponse({ - content: "", - finish_reason: "eos", - }); - - const response = await model.doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(response.finishReason).toStrictEqual("unknown"); - }); - - it("should expose the raw response headers", async () => { - prepareJsonResponse({ content: "" }); - - server.responseHeaders = { - "test-header": "test-value", - }; - - const { rawResponse } = await model.doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(rawResponse?.headers).toStrictEqual({ - // default headers: - "content-length": "337", - "content-type": "application/json", - - // custom header - "test-header": "test-value", - }); - }); - - it("should pass the model and the messages", async () => { - prepareJsonResponse({ content: "" }); - - await model.doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(await server.getRequestBodyJson()).toStrictEqual({ - model: "anthropic/claude-3.5-sonnet", - messages: [{ role: "user", content: "Hello" }], - }); - }); - - it("should pass the models array when provided", async () => { - prepareJsonResponse({ content: "" }); - - const customModel = provider.chat("anthropic/claude-3.5-sonnet", { - models: ["anthropic/claude-2", "gryphe/mythomax-l2-13b"], - }); - - await customModel.doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(await server.getRequestBodyJson()).toStrictEqual({ - model: "anthropic/claude-3.5-sonnet", - models: ["anthropic/claude-2", "gryphe/mythomax-l2-13b"], - messages: [{ role: "user", content: "Hello" }], - }); - }); - - it("should pass settings", async () => { - prepareJsonResponse(); - - await provider - .chat("openai/gpt-3.5-turbo", { - logitBias: { 50256: -100 }, - logprobs: 2, - parallelToolCalls: false, - user: "test-user-id", - }) - .doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(await server.getRequestBodyJson()).toStrictEqual({ - model: "openai/gpt-3.5-turbo", - messages: [{ role: "user", content: "Hello" }], - logprobs: true, - top_logprobs: 2, - logit_bias: { 50256: -100 }, - parallel_tool_calls: false, - user: "test-user-id", - }); - }); - - it("should pass tools and toolChoice", async () => { - prepareJsonResponse({ content: "" }); - - await model.doGenerate({ - inputFormat: "prompt", - mode: { - type: "regular", - tools: [ - { - type: "function", - name: "test-tool", - parameters: { - type: "object", - properties: { value: { type: "string" } }, - required: ["value"], - additionalProperties: false, - $schema: "http://json-schema.org/draft-07/schema#", - }, - }, - ], - toolChoice: { - type: "tool", - toolName: "test-tool", - }, - }, - prompt: TEST_PROMPT, - }); - - expect(await server.getRequestBodyJson()).toStrictEqual({ - model: "anthropic/claude-3.5-sonnet", - messages: [{ role: "user", content: "Hello" }], - tools: [ - { - type: "function", - function: { - name: "test-tool", - parameters: { - type: "object", - properties: { value: { type: "string" } }, - required: ["value"], - additionalProperties: false, - $schema: "http://json-schema.org/draft-07/schema#", - }, - }, - }, - ], - tool_choice: { - type: "function", - function: { name: "test-tool" }, - }, - }); - }); - - it("should pass headers", async () => { - prepareJsonResponse({ content: "" }); - - const provider = createHyperbolic({ - apiKey: "test-api-key", - headers: { - "Custom-Provider-Header": "provider-header-value", - }, - }); - - await provider.chat("openai/gpt-3.5-turbo").doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - headers: { - "Custom-Request-Header": "request-header-value", - }, - }); - - const requestHeaders = await server.getRequestHeaders(); - - expect(requestHeaders).toStrictEqual({ - authorization: "Bearer test-api-key", - "content-type": "application/json", - "custom-provider-header": "provider-header-value", - "custom-request-header": "request-header-value", - }); - }); -}); - -describe("doStream", () => { - const server = new StreamingTestServer("https://api.hyperbolic.xyz/v1/chat/completions"); - - server.setupTestEnvironment(); - - function prepareStreamResponse({ - content, - usage = { - prompt_tokens: 17, - total_tokens: 244, - completion_tokens: 227, - }, - logprobs = null, - finish_reason = "stop", - }: { - content: string[]; - usage?: { - prompt_tokens: number; - total_tokens: number; - completion_tokens: number; - }; - logprobs?: { - content: - | { - token: string; - logprob: number; - top_logprobs: { token: string; logprob: number }[]; - }[] - | null; - } | null; - finish_reason?: string; - }) { - server.responseChunks = [ - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` + - `"system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null}]}\n\n`, - ...content.flatMap((text) => { - return ( - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` + - `"system_fingerprint":null,"choices":[{"index":1,"delta":{"content":"${text}"},"finish_reason":null}]}\n\n` - ); - }), - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` + - `"system_fingerprint":null,"choices":[{"index":0,"delta":{},"finish_reason":"${finish_reason}","logprobs":${JSON.stringify( - logprobs, - )}}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":${JSON.stringify(usage)}}\n\n`, - "data: [DONE]\n\n", - ]; - } - - it("should stream text deltas", async () => { - prepareStreamResponse({ - content: ["Hello", ", ", "World!"], - finish_reason: "stop", - usage: { - prompt_tokens: 17, - total_tokens: 244, - completion_tokens: 227, - }, - logprobs: TEST_LOGPROBS, - }); - - const { stream } = await model.doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - // note: space moved to last chunk bc of trimming - expect(await convertReadableStreamToArray(stream)).toStrictEqual([ - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0613", - }, - { type: "text-delta", textDelta: "" }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0613", - }, - { type: "text-delta", textDelta: "Hello" }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0613", - }, - { type: "text-delta", textDelta: ", " }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0613", - }, - { type: "text-delta", textDelta: "World!" }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0613", - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0613", - }, - { - type: "finish", - finishReason: "stop", - logprobs: mapHyperbolicChatLogProbsOutput(TEST_LOGPROBS), - usage: { promptTokens: 17, completionTokens: 227 }, - }, - ]); - }); - - it("should stream tool deltas", async () => { - server.responseChunks = [ - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"role":"assistant","content":null,` + - `"tool_calls":[{"index":0,"id":"call_O17Uplv4lJvD6DVdIvFFeRMw","type":"function","function":{"name":"test-tool","arguments":""}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\\""}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"value"}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\\":\\""}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Spark"}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"le"}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" Day"}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\\"}"}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":53,"completion_tokens":17,"total_tokens":70}}\n\n`, - "data: [DONE]\n\n", - ]; - - const { stream } = await model.doStream({ - inputFormat: "prompt", - mode: { - type: "regular", - tools: [ - { - type: "function", - name: "test-tool", - parameters: { - type: "object", - properties: { value: { type: "string" } }, - required: ["value"], - additionalProperties: false, - $schema: "http://json-schema.org/draft-07/schema#", - }, - }, - ], - }, - prompt: TEST_PROMPT, - }); - - expect(await convertReadableStreamToArray(stream)).toStrictEqual([ - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "tool-call-delta", - toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", - toolCallType: "function", - toolName: "test-tool", - argsTextDelta: '{"', - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "tool-call-delta", - toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", - toolCallType: "function", - toolName: "test-tool", - argsTextDelta: "value", - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "tool-call-delta", - toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", - toolCallType: "function", - toolName: "test-tool", - argsTextDelta: '":"', - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "tool-call-delta", - toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", - toolCallType: "function", - toolName: "test-tool", - argsTextDelta: "Spark", - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "tool-call-delta", - toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", - toolCallType: "function", - toolName: "test-tool", - argsTextDelta: "le", - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "tool-call-delta", - toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", - toolCallType: "function", - toolName: "test-tool", - argsTextDelta: " Day", - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "tool-call-delta", - toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", - toolCallType: "function", - toolName: "test-tool", - argsTextDelta: '"}', - }, - { - type: "tool-call", - toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", - toolCallType: "function", - toolName: "test-tool", - args: '{"value":"Sparkle Day"}', - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "finish", - finishReason: "tool-calls", - logprobs: undefined, - usage: { promptTokens: 53, completionTokens: 17 }, - }, - ]); - }); - - it("should stream tool call that is sent in one chunk", async () => { - server.responseChunks = [ - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"role":"assistant","content":null,` + - `"tool_calls":[{"index":0,"id":"call_O17Uplv4lJvD6DVdIvFFeRMw","type":"function","function":{"name":"test-tool","arguments":"{\\"value\\":\\"Sparkle Day\\"}"}}]},` + - `"logprobs":null,"finish_reason":null}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}]}\n\n`, - `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + - `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":53,"completion_tokens":17,"total_tokens":70}}\n\n`, - "data: [DONE]\n\n", - ]; - - const { stream } = await model.doStream({ - inputFormat: "prompt", - mode: { - type: "regular", - tools: [ - { - type: "function", - name: "test-tool", - parameters: { - type: "object", - properties: { value: { type: "string" } }, - required: ["value"], - additionalProperties: false, - $schema: "http://json-schema.org/draft-07/schema#", - }, - }, - ], - }, - prompt: TEST_PROMPT, - }); - - expect(await convertReadableStreamToArray(stream)).toStrictEqual([ - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "tool-call-delta", - toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", - toolCallType: "function", - toolName: "test-tool", - argsTextDelta: '{"value":"Sparkle Day"}', - }, - { - type: "tool-call", - toolCallId: "call_O17Uplv4lJvD6DVdIvFFeRMw", - toolCallType: "function", - toolName: "test-tool", - args: '{"value":"Sparkle Day"}', - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "response-metadata", - id: "chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP", - }, - { - type: "response-metadata", - modelId: "gpt-3.5-turbo-0125", - }, - { - type: "finish", - finishReason: "tool-calls", - logprobs: undefined, - usage: { promptTokens: 53, completionTokens: 17 }, - }, - ]); - }); - - it("should handle error stream parts", async () => { - server.responseChunks = [ - `data: {"object": "error", "message": "The server had an error processing your request. Sorry about that! You can retry your request, or contact us through our ` + - `help center at app.hyperbolic.xyz/support if you keep seeing this error.","type":"server_error","param":null,"code":null}\n\n`, - "data: [DONE]\n\n", - ]; - - const { stream } = await model.doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(await convertReadableStreamToArray(stream)).toStrictEqual([ - { - type: "error", - error: { - object: "error", - message: - "The server had an error processing your request. Sorry about that! " + - "You can retry your request, or contact us through our help center at " + - "app.hyperbolic.xyz/support if you keep seeing this error.", - type: "server_error", - code: null, - param: null, - }, - }, - { - finishReason: "error", - logprobs: undefined, - type: "finish", - usage: { - completionTokens: NaN, - promptTokens: NaN, - }, - }, - ]); - }); - - it("should handle unparsable stream parts", async () => { - server.responseChunks = [`data: {unparsable}\n\n`, "data: [DONE]\n\n"]; - - const { stream } = await model.doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - const elements = await convertReadableStreamToArray(stream); - - expect(elements.length).toBe(2); - expect(elements[0]?.type).toBe("error"); - expect(elements[1]).toStrictEqual({ - finishReason: "error", - logprobs: undefined, - type: "finish", - usage: { - completionTokens: NaN, - promptTokens: NaN, - }, - }); - }); - - it("should expose the raw response headers", async () => { - prepareStreamResponse({ content: [] }); - - server.responseHeaders = { - "test-header": "test-value", - }; - - const { rawResponse } = await model.doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(rawResponse?.headers).toStrictEqual({ - // default headers: - "content-type": "text/event-stream", - "cache-control": "no-cache", - connection: "keep-alive", - - // custom header - "test-header": "test-value", - }); - }); - - it("should pass the messages and the model", async () => { - prepareStreamResponse({ content: [] }); - - await model.doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(await server.getRequestBodyJson()).toStrictEqual({ - stream: true, - stream_options: { include_usage: true }, - model: "anthropic/claude-3.5-sonnet", - messages: [{ role: "user", content: "Hello" }], - }); - }); - - it("should pass headers", async () => { - prepareStreamResponse({ content: [] }); - - const provider = createHyperbolic({ - apiKey: "test-api-key", - headers: { - "Custom-Provider-Header": "provider-header-value", - }, - }); - - await provider.chat("openai/gpt-3.5-turbo").doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - headers: { - "Custom-Request-Header": "request-header-value", - }, - }); - - const requestHeaders = await server.getRequestHeaders(); - - expect(requestHeaders).toStrictEqual({ - authorization: "Bearer test-api-key", - "content-type": "application/json", - "custom-provider-header": "provider-header-value", - "custom-request-header": "request-header-value", - }); - }); - - it("should pass extra body", async () => { - prepareStreamResponse({ content: [] }); - - const provider = createHyperbolic({ - apiKey: "test-api-key", - extraBody: { - custom_field: "custom_value", - providers: { - anthropic: { - custom_field: "custom_value", - }, - }, - }, - }); - - await provider.chat("anthropic/claude-3.5-sonnet").doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - const requestBody = await server.getRequestBodyJson(); - - expect(requestBody).toHaveProperty("custom_field", "custom_value"); - expect(requestBody).toHaveProperty("providers.anthropic.custom_field", "custom_value"); - }); -}); diff --git a/packages/ai-sdk-provider-old/src/hyperbolic-chat-language-model.ts b/packages/ai-sdk-provider-old/src/hyperbolic-chat-language-model.ts deleted file mode 100644 index 29d16b2..0000000 --- a/packages/ai-sdk-provider-old/src/hyperbolic-chat-language-model.ts +++ /dev/null @@ -1,659 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { - LanguageModelV1, - LanguageModelV1FinishReason, - LanguageModelV1FunctionTool, - LanguageModelV1LogProbs, - LanguageModelV1ProviderDefinedTool, - LanguageModelV1StreamPart, -} from "@ai-sdk/provider"; -import type { ParseResult } from "@ai-sdk/provider-utils"; -import { InvalidResponseDataError, UnsupportedFunctionalityError } from "@ai-sdk/provider"; -import { - combineHeaders, - createEventSourceResponseHandler, - createJsonResponseHandler, - generateId, - isParsableJson, - postJsonToApi, -} from "@ai-sdk/provider-utils"; -import { z } from "zod"; - -import type { HyperbolicChatModelId, HyperbolicChatSettings } from "./hyperbolic-chat-settings"; -import { convertToHyperbolicChatMessages } from "./convert-to-hyperbolic-chat-messages"; -import { - HyperbolicErrorResponseSchema, - hyperbolicFailedResponseHandler, - isHyperbolicError, - tryParsingHyperbolicError, -} from "./hyperbolic-error"; -import { mapHyperbolicChatLogProbsOutput } from "./map-hyperbolic-chat-logprobs"; -import { mapHyperbolicFinishReason } from "./map-hyperbolic-finish-reason"; - -function isFunctionTool( - tool: LanguageModelV1FunctionTool | LanguageModelV1ProviderDefinedTool, -): tool is LanguageModelV1FunctionTool { - return "parameters" in tool; -} - -type HyperbolicChatConfig = { - provider: string; - compatibility: "strict" | "compatible"; - headers: () => Record; - url: (options: { modelId: string; path: string }) => string; - fetch?: typeof fetch; - extraBody?: Record; -}; - -export class HyperbolicChatLanguageModel implements LanguageModelV1 { - readonly specificationVersion = "v1"; - readonly defaultObjectGenerationMode = "tool"; - - readonly modelId: HyperbolicChatModelId; - readonly settings: HyperbolicChatSettings; - - private readonly config: HyperbolicChatConfig; - - constructor( - modelId: HyperbolicChatModelId, - settings: HyperbolicChatSettings, - config: HyperbolicChatConfig, - ) { - this.modelId = modelId; - this.settings = settings; - this.config = config; - } - - get provider(): string { - return this.config.provider; - } - - private getArgs({ - mode, - prompt, - maxTokens, - temperature, - topP, - frequencyPenalty, - presencePenalty, - seed, - stopSequences, - responseFormat, - topK, - providerMetadata, - }: Parameters[0]) { - const type = mode.type; - const extraCallingBody = providerMetadata?.["hyperbolic"] ?? {}; - - const baseArgs = { - // model id: - model: this.modelId, - models: this.settings.models, - - // model specific settings: - logit_bias: this.settings.logitBias, - logprobs: - this.settings.logprobs === true || typeof this.settings.logprobs === "number" - ? true - : undefined, - top_logprobs: - typeof this.settings.logprobs === "number" - ? this.settings.logprobs - : typeof this.settings.logprobs === "boolean" - ? this.settings.logprobs - ? 0 - : undefined - : undefined, - user: this.settings.user, - parallel_tool_calls: this.settings.parallelToolCalls, - - // standardized settings: - max_tokens: maxTokens, - temperature, - top_p: topP, - frequency_penalty: frequencyPenalty, - presence_penalty: presencePenalty, - seed, - - stop: stopSequences, - response_format: responseFormat, - top_k: topK, - - // messages: - messages: convertToHyperbolicChatMessages(prompt), - - // Hyperbolic specific settings: - include_reasoning: this.settings.includeReasoning, - reasoning: this.settings.reasoning, - - // extra body: - ...this.config.extraBody, - ...this.settings.extraBody, - ...extraCallingBody, - }; - - switch (type) { - case "regular": { - return { ...baseArgs, ...prepareToolsAndToolChoice(mode) }; - } - - case "object-json": { - return { - ...baseArgs, - response_format: { type: "json_object" }, - }; - } - - case "object-tool": { - return { - ...baseArgs, - tool_choice: { type: "function", function: { name: mode.tool.name } }, - tools: [ - { - type: "function", - function: { - name: mode.tool.name, - description: mode.tool.description, - parameters: mode.tool.parameters, - }, - }, - ], - }; - } - - // Handle all non-text types with a single default case - default: { - const _exhaustiveCheck: never = type; - throw new UnsupportedFunctionalityError({ - functionality: `${_exhaustiveCheck} mode`, - }); - } - } - } - async doGenerate( - options: Parameters[0], - ): Promise>> { - const args = this.getArgs(options); - - const { responseHeaders, value: response } = await postJsonToApi({ - url: this.config.url({ - path: "/chat/completions", - modelId: this.modelId, - }), - headers: combineHeaders(this.config.headers(), options.headers), - body: args, - failedResponseHandler: hyperbolicFailedResponseHandler, - successfulResponseHandler: createJsonResponseHandler( - HyperbolicNonStreamChatCompletionResponseSchema, - ), - abortSignal: options.abortSignal, - fetch: this.config.fetch, - }); - - const { messages: rawPrompt, ...rawSettings } = args; - const choice = response.choices[0]; - - if (!choice) { - throw new Error("No choice in response"); - } - - return { - response: { - id: response.id, - modelId: response.model, - }, - text: choice.message.content ?? undefined, - reasoning: choice.message.reasoning ?? undefined, - toolCalls: choice.message.tool_calls?.map((toolCall) => ({ - toolCallType: "function", - toolCallId: toolCall.id ?? generateId(), - toolName: toolCall.function.name, - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - args: toolCall.function.arguments!, - })), - finishReason: mapHyperbolicFinishReason(choice.finish_reason), - usage: { - promptTokens: response.usage?.prompt_tokens ?? 0, - completionTokens: response.usage?.completion_tokens ?? 0, - }, - rawCall: { rawPrompt, rawSettings }, - rawResponse: { headers: responseHeaders }, - warnings: [], - logprobs: mapHyperbolicChatLogProbsOutput(choice.logprobs), - }; - } - - async doStream( - options: Parameters[0], - ): Promise>> { - const args = this.getArgs(options); - - const { responseHeaders, value: response } = await postJsonToApi({ - url: this.config.url({ - path: "/chat/completions", - modelId: this.modelId, - }), - headers: combineHeaders(this.config.headers(), options.headers), - body: { - ...args, - stream: true, - - // only include stream_options when in strict compatibility mode: - stream_options: - this.config.compatibility === "strict" ? { include_usage: true } : undefined, - }, - failedResponseHandler: hyperbolicFailedResponseHandler, - successfulResponseHandler: createEventSourceResponseHandler( - HyperbolicStreamChatCompletionChunkSchema, - ), - abortSignal: options.abortSignal, - fetch: this.config.fetch, - }); - - const { messages: rawPrompt, ...rawSettings } = args; - - const toolCalls: Array<{ - id: string; - type: "function"; - function: { - name: string; - arguments: string; - }; - }> = []; - - let finishReason: LanguageModelV1FinishReason = "other"; - let usage: { promptTokens: number; completionTokens: number } = { - promptTokens: Number.NaN, - completionTokens: Number.NaN, - }; - let logprobs: LanguageModelV1LogProbs; - - return { - stream: response.pipeThrough( - new TransformStream< - ParseResult>, - LanguageModelV1StreamPart - >({ - transform(chunk, controller) { - // handle failed chunk parsing / validation: - if (!chunk.success) { - finishReason = "error"; - - // Error messages from the API are sometimes an ugly combo of text and JSON in a single chunk, so attempt to parse it as a hyperbolic error. - const maybeHyperbolicError = tryParsingHyperbolicError(chunk.error); - if (maybeHyperbolicError) { - controller.enqueue({ type: "error", error: maybeHyperbolicError }); - return; - } - - controller.enqueue({ - type: "error", - error: chunk.error, - }); - return; - } - - const value = chunk.value; - - // handle error chunks: - if (isHyperbolicError(value)) { - finishReason = "error"; - controller.enqueue({ type: "error", error: value }); - return; - } - - if (value.id) { - controller.enqueue({ - type: "response-metadata", - id: value.id, - }); - } - - if (value.model) { - controller.enqueue({ - type: "response-metadata", - modelId: value.model, - }); - } - - if (value.usage != null) { - usage = { - promptTokens: value.usage.prompt_tokens, - completionTokens: value.usage.completion_tokens, - }; - } - - const choice = value.choices[0]; - - if (choice?.finish_reason != null) { - finishReason = mapHyperbolicFinishReason(choice.finish_reason); - } - - if (choice?.delta == null) { - return; - } - - const delta = choice.delta; - - if (delta.content != null) { - controller.enqueue({ - type: "text-delta", - textDelta: delta.content, - }); - } - - if (delta.reasoning != null) { - controller.enqueue({ - type: "reasoning", - textDelta: delta.reasoning, - }); - } - - const mappedLogprobs = mapHyperbolicChatLogProbsOutput(choice?.logprobs); - if (mappedLogprobs?.length) { - if (logprobs === undefined) logprobs = []; - logprobs.push(...mappedLogprobs); - } - - if (delta.tool_calls != null) { - for (const toolCallDelta of delta.tool_calls) { - const index = toolCallDelta.index; - - // Tool call start. Hyperbolic returns all information except the arguments in the first chunk. - if (toolCalls[index] == null) { - if (toolCallDelta.type !== "function") { - throw new InvalidResponseDataError({ - data: toolCallDelta, - message: `Expected 'function' type.`, - }); - } - - if (toolCallDelta.id == null) { - throw new InvalidResponseDataError({ - data: toolCallDelta, - message: `Expected 'id' to be a string.`, - }); - } - - if (toolCallDelta.function?.name == null) { - throw new InvalidResponseDataError({ - data: toolCallDelta, - message: `Expected 'function.name' to be a string.`, - }); - } - - toolCalls[index] = { - id: toolCallDelta.id, - type: "function", - function: { - name: toolCallDelta.function.name, - arguments: toolCallDelta.function.arguments ?? "", - }, - }; - - const toolCall = toolCalls[index]; - - if (toolCall == null) { - throw new Error("Tool call is missing"); - } - - // check if tool call is complete (some providers send the full tool call in one chunk) - if ( - toolCall.function?.name != null && - toolCall.function?.arguments != null && - isParsableJson(toolCall.function.arguments) - ) { - // send delta - controller.enqueue({ - type: "tool-call-delta", - toolCallType: "function", - toolCallId: toolCall.id, - toolName: toolCall.function.name, - argsTextDelta: toolCall.function.arguments, - }); - - // send tool call - controller.enqueue({ - type: "tool-call", - toolCallType: "function", - toolCallId: toolCall.id ?? generateId(), - toolName: toolCall.function.name, - args: toolCall.function.arguments, - }); - } - - continue; - } - - // existing tool call, merge - const toolCall = toolCalls[index]; - - if (toolCall == null) { - throw new Error("Tool call is missing"); - } - - if (toolCallDelta.function?.arguments != null) { - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - toolCall.function!.arguments += toolCallDelta.function?.arguments ?? ""; - } - - // send delta - controller.enqueue({ - type: "tool-call-delta", - toolCallType: "function", - toolCallId: toolCall.id, - toolName: toolCall.function.name, - argsTextDelta: toolCallDelta.function.arguments ?? "", - }); - - // check if tool call is complete - if ( - toolCall.function?.name != null && - toolCall.function?.arguments != null && - isParsableJson(toolCall.function.arguments) - ) { - controller.enqueue({ - type: "tool-call", - toolCallType: "function", - toolCallId: toolCall.id ?? generateId(), - toolName: toolCall.function.name, - args: toolCall.function.arguments, - }); - } - } - } - }, - - flush(controller) { - controller.enqueue({ - type: "finish", - finishReason, - logprobs, - usage, - }); - }, - }), - ), - rawCall: { rawPrompt, rawSettings }, - rawResponse: { headers: responseHeaders }, - warnings: [], - }; - } -} - -const HyperbolicChatCompletionBaseResponseSchema = z.object({ - id: z.string().optional(), - model: z.string().optional(), - usage: z - .object({ - prompt_tokens: z.number(), - completion_tokens: z.number(), - total_tokens: z.number(), - }) - .nullish(), -}); - -// limited version of the schema, focussed on what is needed for the implementation -// this approach limits breakages when the API changes and increases efficiency -const HyperbolicNonStreamChatCompletionResponseSchema = - HyperbolicChatCompletionBaseResponseSchema.extend({ - choices: z.array( - z.object({ - message: z.object({ - role: z.literal("assistant"), - content: z.string().nullable().optional(), - reasoning: z.string().nullable().optional(), - tool_calls: z - .array( - z.object({ - id: z.string().optional().nullable(), - type: z.literal("function"), - function: z.object({ - name: z.string(), - arguments: z.string(), - }), - }), - ) - .optional(), - }), - index: z.number(), - logprobs: z - .object({ - content: z - .array( - z.object({ - token: z.string(), - logprob: z.number(), - top_logprobs: z.array( - z.object({ - token: z.string(), - logprob: z.number(), - }), - ), - }), - ) - .nullable(), - }) - .nullable() - .optional(), - finish_reason: z.string().optional().nullable(), - }), - ), - }); - -// limited version of the schema, focussed on what is needed for the implementation -// this approach limits breakages when the API changes and increases efficiency -const HyperbolicStreamChatCompletionChunkSchema = z.union([ - HyperbolicChatCompletionBaseResponseSchema.extend({ - choices: z.array( - z.object({ - delta: z - .object({ - role: z.enum(["assistant"]).optional(), - content: z.string().nullish(), - reasoning: z.string().nullish().optional(), - tool_calls: z - .array( - z.object({ - index: z.number(), - id: z.string().nullish(), - type: z.literal("function").optional(), - function: z.object({ - name: z.string().nullish(), - arguments: z.string().nullish(), - }), - }), - ) - .nullish(), - }) - .nullish(), - logprobs: z - .object({ - content: z - .array( - z.object({ - token: z.string(), - logprob: z.number(), - top_logprobs: z.array( - z.object({ - token: z.string(), - logprob: z.number(), - }), - ), - }), - ) - .nullable(), - }) - .nullish(), - finish_reason: z.string().nullable().optional(), - index: z.number(), - }), - ), - }), - HyperbolicErrorResponseSchema, -]); - -function prepareToolsAndToolChoice( - mode: Parameters[0]["mode"] & { - type: "regular"; - }, -) { - // when the tools array is empty, change it to undefined to prevent errors: - const tools = mode.tools?.length ? mode.tools : undefined; - - if (tools == null) { - return { tools: undefined, tool_choice: undefined }; - } - - const mappedTools = tools.map((tool) => { - if (isFunctionTool(tool)) { - return { - type: "function" as const, - function: { - name: tool.name, - description: tool.description, - parameters: tool.parameters, - }, - }; - } else { - return { - type: "function" as const, - function: { - name: tool.name, - }, - }; - } - }); - - const toolChoice = mode.toolChoice; - - if (toolChoice == null) { - return { tools: mappedTools, tool_choice: undefined }; - } - - const type = toolChoice.type; - - switch (type) { - case "auto": - case "none": - case "required": - return { tools: mappedTools, tool_choice: type }; - case "tool": - return { - tools: mappedTools, - tool_choice: { - type: "function", - function: { - name: toolChoice.toolName, - }, - }, - }; - default: { - const _exhaustiveCheck: never = type; - throw new Error(`Unsupported tool choice type: ${_exhaustiveCheck}`); - } - } -} diff --git a/packages/ai-sdk-provider-old/src/hyperbolic-chat-prompt.ts b/packages/ai-sdk-provider-old/src/hyperbolic-chat-prompt.ts deleted file mode 100644 index f1c2a1e..0000000 --- a/packages/ai-sdk-provider-old/src/hyperbolic-chat-prompt.ts +++ /dev/null @@ -1,67 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -// Type for Hyperbolic Cache Control following Anthropic's pattern -export type HyperbolicCacheControl = { type: "ephemeral" }; - -export type HyperbolicChatPrompt = Array; - -export type ChatCompletionMessageParam = - | ChatCompletionSystemMessageParam - | ChatCompletionUserMessageParam - | ChatCompletionAssistantMessageParam - | ChatCompletionToolMessageParam; - -export interface ChatCompletionSystemMessageParam { - role: "system"; - content: string; - cache_control?: HyperbolicCacheControl; -} - -export interface ChatCompletionUserMessageParam { - role: "user"; - content: string | Array; - cache_control?: HyperbolicCacheControl; -} - -export type ChatCompletionContentPart = - | ChatCompletionContentPartText - | ChatCompletionContentPartImage; - -export interface ChatCompletionContentPartImage { - type: "image_url"; - image_url: { - url: string; - }; - cache_control?: HyperbolicCacheControl; -} - -export interface ChatCompletionContentPartText { - type: "text"; - text: string; - cache_control?: HyperbolicCacheControl; -} - -export interface ChatCompletionAssistantMessageParam { - role: "assistant"; - content?: string | null; - tool_calls?: Array; - cache_control?: HyperbolicCacheControl; -} - -export interface ChatCompletionMessageToolCall { - type: "function"; - id: string; - function: { - arguments: string; - name: string; - }; -} - -export interface ChatCompletionToolMessageParam { - role: "tool"; - content: string; - tool_call_id: string; - cache_control?: HyperbolicCacheControl; -} diff --git a/packages/ai-sdk-provider-old/src/hyperbolic-chat-settings.ts b/packages/ai-sdk-provider-old/src/hyperbolic-chat-settings.ts deleted file mode 100644 index 76c8fc9..0000000 --- a/packages/ai-sdk-provider-old/src/hyperbolic-chat-settings.ts +++ /dev/null @@ -1,50 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { HyperbolicSharedSettings } from "./types"; - -// https://app.hyperbolic.xyz/models -export type HyperbolicChatModelId = string; - -export type HyperbolicChatSettings = { - /** - * Modify the likelihood of specified tokens appearing in the completion. - * - * Accepts a JSON object that maps tokens (specified by their token ID in - * the GPT tokenizer) to an associated bias value from -100 to 100. You - * can use this tokenizer tool to convert text to token IDs. Mathematically, - * the bias is added to the logits generated by the model prior to sampling. - * The exact effect will vary per model, but values between -1 and 1 should - * decrease or increase likelihood of selection; values like -100 or 100 - * should result in a ban or exclusive selection of the relevant token. - * - * As an example, you can pass {"50256": -100} to prevent the <|endoftext|> - * token from being generated. - */ - logitBias?: Record; - - /** - * Return the log probabilities of the tokens. Including logprobs will increase - * the response size and can slow down response times. However, it can - * be useful to better understand how the model is behaving. - * - * Setting to true will return the log probabilities of the tokens that - * were generated. - * - * Setting to a number will return the log probabilities of the top n - * tokens that were generated. - */ - logprobs?: boolean | number; - - /** - * Whether to enable parallel function calling during tool use. Default to true. - */ - parallelToolCalls?: boolean; - - /** - * A unique identifier representing your end-user, which can help Hyperbolic - * to monitor and detect abuse. Learn more. - */ - user?: string; -} & HyperbolicSharedSettings; diff --git a/packages/ai-sdk-provider-old/src/hyperbolic-completion-language-model.test.ts b/packages/ai-sdk-provider-old/src/hyperbolic-completion-language-model.test.ts deleted file mode 100644 index 9fdaec6..0000000 --- a/packages/ai-sdk-provider-old/src/hyperbolic-completion-language-model.test.ts +++ /dev/null @@ -1,496 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { LanguageModelV1Prompt } from "@ai-sdk/provider"; -import { - convertReadableStreamToArray, - JsonTestServer, - StreamingTestServer, -} from "@ai-sdk/provider-utils/test"; -import { describe, expect, it } from "vitest"; - -import { createHyperbolic } from "./hyperbolic-provider"; -import { mapHyperbolicCompletionLogProbs } from "./map-hyperbolic-completion-logprobs"; - -const TEST_PROMPT: LanguageModelV1Prompt = [ - { role: "user", content: [{ type: "text", text: "Hello" }] }, -]; - -const TEST_LOGPROBS = { - tokens: [" ever", " after", ".\n\n", "The", " end", "."], - token_logprobs: [-0.0664508, -0.014520033, -1.3820221, -0.7890417, -0.5323165, -0.10247037], - top_logprobs: [ - { - " ever": -0.0664508, - }, - { - " after": -0.014520033, - }, - { - ".\n\n": -1.3820221, - }, - { - The: -0.7890417, - }, - { - " end": -0.5323165, - }, - { - ".": -0.10247037, - }, - ] as Record[], -}; - -const provider = createHyperbolic({ - apiKey: "test-api-key", - compatibility: "strict", -}); - -const model = provider.completion("meta-llama/Llama-3.1-405B-FP8"); - -describe("doGenerate", () => { - const server = new JsonTestServer("https://api.hyperbolic.xyz/v1/completions"); - server.setupTestEnvironment(); - - function prepareJsonResponse({ - content = "", - usage = { - prompt_tokens: 4, - total_tokens: 34, - completion_tokens: 30, - }, - logprobs = null, - finish_reason = "stop", - }: { - content?: string; - usage?: { - prompt_tokens: number; - total_tokens: number; - completion_tokens: number; - }; - logprobs?: { - tokens: string[]; - token_logprobs: number[]; - top_logprobs: Record[]; - } | null; - finish_reason?: string; - }) { - server.responseBodyJson = { - id: "cmpl-96cAM1v77r4jXa4qb2NSmRREV5oWB", - object: "text_completion", - created: 1711363706, - model: "meta-llama/Llama-3.1-405B-FP8", - choices: [ - { - text: content, - index: 0, - logprobs, - finish_reason, - }, - ], - usage, - }; - } - - it("should extract text response", async () => { - prepareJsonResponse({ content: "Hello, World!" }); - - const { text } = await model.doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(text).toStrictEqual("Hello, World!"); - }); - - it("should extract usage", async () => { - prepareJsonResponse({ - content: "", - usage: { prompt_tokens: 20, total_tokens: 25, completion_tokens: 5 }, - }); - - const { usage } = await model.doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(usage).toStrictEqual({ - promptTokens: 20, - completionTokens: 5, - }); - }); - - it("should extract logprobs", async () => { - prepareJsonResponse({ logprobs: TEST_LOGPROBS }); - - const provider = createHyperbolic({ apiKey: "test-api-key" }); - - const response = await provider.completion("openai/gpt-3.5-turbo", { logprobs: 1 }).doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - expect(response.logprobs).toStrictEqual(mapHyperbolicCompletionLogProbs(TEST_LOGPROBS)); - }); - - it("should extract finish reason", async () => { - prepareJsonResponse({ - content: "", - finish_reason: "stop", - }); - - const { finishReason } = await provider.completion("meta-llama/Llama-3.1-405B-FP8").doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(finishReason).toStrictEqual("stop"); - }); - - it("should support unknown finish reason", async () => { - prepareJsonResponse({ - content: "", - finish_reason: "eos", - }); - - const { finishReason } = await provider.completion("meta-llama/Llama-3.1-405B-FP8").doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(finishReason).toStrictEqual("unknown"); - }); - - it("should expose the raw response headers", async () => { - prepareJsonResponse({ content: "" }); - - server.responseHeaders = { - "test-header": "test-value", - }; - - const { rawResponse } = await model.doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(rawResponse?.headers).toStrictEqual({ - // default headers: - "content-length": "273", - "content-type": "application/json", - - // custom header - "test-header": "test-value", - }); - }); - - it("should pass the model and the prompt", async () => { - prepareJsonResponse({ content: "" }); - - await model.doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(await server.getRequestBodyJson()).toStrictEqual({ - model: "meta-llama/Llama-3.1-405B-FP8", - prompt: "Hello", - }); - }); - - it("should pass the models array when provided", async () => { - prepareJsonResponse({ content: "" }); - - const customModel = provider.completion("meta-llama/Llama-3.1-405B-FP8", { - models: ["openai/gpt-4", "anthropic/claude-2"], - }); - - await customModel.doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(await server.getRequestBodyJson()).toStrictEqual({ - model: "meta-llama/Llama-3.1-405B-FP8", - models: ["openai/gpt-4", "anthropic/claude-2"], - prompt: "Hello", - }); - }); - - it("should pass headers", async () => { - prepareJsonResponse({ content: "" }); - - const provider = createHyperbolic({ - apiKey: "test-api-key", - headers: { - "Custom-Provider-Header": "provider-header-value", - }, - }); - - await provider.completion("meta-llama/Llama-3.1-405B-FP8").doGenerate({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - headers: { - "Custom-Request-Header": "request-header-value", - }, - }); - - const requestHeaders = await server.getRequestHeaders(); - - expect(requestHeaders).toStrictEqual({ - authorization: "Bearer test-api-key", - "content-type": "application/json", - "custom-provider-header": "provider-header-value", - "custom-request-header": "request-header-value", - }); - }); -}); - -describe("doStream", () => { - const server = new StreamingTestServer("https://api.hyperbolic.xyz/v1/completions"); - - server.setupTestEnvironment(); - - function prepareStreamResponse({ - content, - finish_reason = "stop", - usage = { - prompt_tokens: 10, - total_tokens: 372, - completion_tokens: 362, - }, - logprobs = null, - }: { - content: string[]; - usage?: { - prompt_tokens: number; - total_tokens: number; - completion_tokens: number; - }; - logprobs?: { - tokens: string[]; - token_logprobs: number[]; - top_logprobs: Record[]; - } | null; - finish_reason?: string; - }) { - server.responseChunks = [ - ...content.map((text) => { - return ( - `data: {"id":"cmpl-96c64EdfhOw8pjFFgVpLuT8k2MtdT","object":"text_completion","created":1711363440,` + - `"choices":[{"text":"${text}","index":0,"logprobs":null,"finish_reason":null}],"model":"meta-llama/Llama-3.1-405B-FP8"}\n\n` - ); - }), - `data: {"id":"cmpl-96c3yLQE1TtZCd6n6OILVmzev8M8H","object":"text_completion","created":1711363310,` + - `"choices":[{"text":"","index":0,"logprobs":${JSON.stringify( - logprobs, - )},"finish_reason":"${finish_reason}"}],"model":"meta-llama/Llama-3.1-405B-FP8"}\n\n`, - `data: {"id":"cmpl-96c3yLQE1TtZCd6n6OILVmzev8M8H","object":"text_completion","created":1711363310,` + - `"model":"meta-llama/Llama-3.1-405B-FP8","usage":${JSON.stringify( - usage, - )},"choices":[]}\n\n`, - "data: [DONE]\n\n", - ]; - } - - it("should stream text deltas", async () => { - prepareStreamResponse({ - content: ["Hello", ", ", "World!"], - finish_reason: "stop", - usage: { - prompt_tokens: 10, - total_tokens: 372, - completion_tokens: 362, - }, - logprobs: TEST_LOGPROBS, - }); - - const { stream } = await model.doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - // note: space moved to last chunk bc of trimming - expect(await convertReadableStreamToArray(stream)).toStrictEqual([ - { type: "text-delta", textDelta: "Hello" }, - { type: "text-delta", textDelta: ", " }, - { type: "text-delta", textDelta: "World!" }, - { type: "text-delta", textDelta: "" }, - { - type: "finish", - finishReason: "stop", - logprobs: mapHyperbolicCompletionLogProbs(TEST_LOGPROBS), - usage: { promptTokens: 10, completionTokens: 362 }, - }, - ]); - }); - - it("should handle error stream parts", async () => { - server.responseChunks = [ - `data: {"object": "error", "message": "The server had an error processing your request. Sorry about that! You can retry your request, or contact us through our ` + - `help center at app.hyperbolic.xyz/support if you keep seeing this error.","type":"server_error","param":null,"code":null}\n\n`, - "data: [DONE]\n\n", - ]; - - const { stream } = await model.doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(await convertReadableStreamToArray(stream)).toStrictEqual([ - { - type: "error", - error: { - object: "error", - message: - "The server had an error processing your request. Sorry about that! " + - "You can retry your request, or contact us through our help center at " + - "app.hyperbolic.xyz/support if you keep seeing this error.", - type: "server_error", - code: null, - param: null, - }, - }, - { - finishReason: "error", - logprobs: undefined, - type: "finish", - usage: { - completionTokens: NaN, - promptTokens: NaN, - }, - }, - ]); - }); - - it("should handle unparsable stream parts", async () => { - server.responseChunks = [`data: {unparsable}\n\n`, "data: [DONE]\n\n"]; - - const { stream } = await model.doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - const elements = await convertReadableStreamToArray(stream); - - expect(elements.length).toBe(2); - expect(elements[0]?.type).toBe("error"); - expect(elements[1]).toStrictEqual({ - finishReason: "error", - logprobs: undefined, - type: "finish", - usage: { - completionTokens: NaN, - promptTokens: NaN, - }, - }); - }); - - it("should expose the raw response headers", async () => { - prepareStreamResponse({ content: [] }); - - server.responseHeaders = { - "test-header": "test-value", - }; - - const { rawResponse } = await model.doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(rawResponse?.headers).toStrictEqual({ - // default headers: - "content-type": "text/event-stream", - "cache-control": "no-cache", - connection: "keep-alive", - - // custom header - "test-header": "test-value", - }); - }); - - it("should pass the model and the prompt", async () => { - prepareStreamResponse({ content: [] }); - - await model.doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - expect(await server.getRequestBodyJson()).toStrictEqual({ - stream: true, - stream_options: { include_usage: true }, - model: "meta-llama/Llama-3.1-405B-FP8", - prompt: "Hello", - }); - }); - - it("should pass headers", async () => { - prepareStreamResponse({ content: [] }); - - const provider = createHyperbolic({ - apiKey: "test-api-key", - headers: { - "Custom-Provider-Header": "provider-header-value", - }, - }); - - await provider.completion("meta-llama/Llama-3.1-405B-FP8").doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - headers: { - "Custom-Request-Header": "request-header-value", - }, - }); - - const requestHeaders = await server.getRequestHeaders(); - - expect(requestHeaders).toStrictEqual({ - authorization: "Bearer test-api-key", - "content-type": "application/json", - "custom-provider-header": "provider-header-value", - "custom-request-header": "request-header-value", - }); - }); - - it("should pass extra body", async () => { - prepareStreamResponse({ content: [] }); - - const provider = createHyperbolic({ - apiKey: "test-api-key", - extraBody: { - custom_field: "custom_value", - providers: { - anthropic: { - custom_field: "custom_value", - }, - }, - }, - }); - - await provider.completion("openai/gpt-4o").doStream({ - inputFormat: "prompt", - mode: { type: "regular" }, - prompt: TEST_PROMPT, - }); - - const requestBody = await server.getRequestBodyJson(); - - expect(requestBody).toHaveProperty("custom_field", "custom_value"); - expect(requestBody).toHaveProperty("providers.anthropic.custom_field", "custom_value"); - }); -}); diff --git a/packages/ai-sdk-provider-old/src/hyperbolic-completion-language-model.ts b/packages/ai-sdk-provider-old/src/hyperbolic-completion-language-model.ts deleted file mode 100644 index a37c22c..0000000 --- a/packages/ai-sdk-provider-old/src/hyperbolic-completion-language-model.ts +++ /dev/null @@ -1,352 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { - LanguageModelV1, - LanguageModelV1FinishReason, - LanguageModelV1LogProbs, - LanguageModelV1StreamPart, -} from "@ai-sdk/provider"; -import type { ParseResult } from "@ai-sdk/provider-utils"; -import { UnsupportedFunctionalityError } from "@ai-sdk/provider"; -import { - combineHeaders, - createEventSourceResponseHandler, - createJsonResponseHandler, - postJsonToApi, -} from "@ai-sdk/provider-utils"; -import { z } from "zod"; - -import type { - HyperbolicCompletionModelId, - HyperbolicCompletionSettings, -} from "./hyperbolic-completion-settings"; -import { convertToHyperbolicCompletionPrompt } from "./convert-to-hyperbolic-completion-prompt"; -import { - HyperbolicErrorResponseSchema, - hyperbolicFailedResponseHandler, - isHyperbolicError, -} from "./hyperbolic-error"; -import { mapHyperbolicCompletionLogProbs } from "./map-hyperbolic-completion-logprobs"; -import { mapHyperbolicFinishReason } from "./map-hyperbolic-finish-reason"; - -type HyperbolicCompletionConfig = { - provider: string; - compatibility: "strict" | "compatible"; - headers: () => Record; - url: (options: { modelId: string; path: string }) => string; - fetch?: typeof fetch; - extraBody?: Record; -}; - -export class HyperbolicCompletionLanguageModel implements LanguageModelV1 { - readonly specificationVersion = "v1"; - readonly defaultObjectGenerationMode = undefined; - - readonly modelId: HyperbolicCompletionModelId; - readonly settings: HyperbolicCompletionSettings; - - private readonly config: HyperbolicCompletionConfig; - - constructor( - modelId: HyperbolicCompletionModelId, - settings: HyperbolicCompletionSettings, - config: HyperbolicCompletionConfig, - ) { - this.modelId = modelId; - this.settings = settings; - this.config = config; - } - - get provider(): string { - return this.config.provider; - } - - private getArgs({ - mode, - inputFormat, - prompt, - maxTokens, - temperature, - topP, - frequencyPenalty, - presencePenalty, - seed, - responseFormat, - topK, - stopSequences, - providerMetadata, - }: Parameters[0]) { - const type = mode.type; - - const extraCallingBody = providerMetadata?.["hyperbolic"] ?? {}; - - const { prompt: completionPrompt } = convertToHyperbolicCompletionPrompt({ - prompt, - inputFormat, - }); - - const baseArgs = { - // model id: - model: this.modelId, - models: this.settings.models, - - // model specific settings: - logit_bias: this.settings.logitBias, - logprobs: - typeof this.settings.logprobs === "number" - ? this.settings.logprobs - : typeof this.settings.logprobs === "boolean" - ? this.settings.logprobs - ? 0 - : undefined - : undefined, - suffix: this.settings.suffix, - user: this.settings.user, - - // standardized settings: - max_tokens: maxTokens, - temperature, - top_p: topP, - frequency_penalty: frequencyPenalty, - presence_penalty: presencePenalty, - seed, - - stop: stopSequences, - response_format: responseFormat, - top_k: topK, - - // prompt: - prompt: completionPrompt, - - // Hyperbolic specific settings: - include_reasoning: this.settings.includeReasoning, - reasoning: this.settings.reasoning, - - // extra body: - ...this.config.extraBody, - ...this.settings.extraBody, - ...extraCallingBody, - }; - - switch (type) { - case "regular": { - if (mode.tools?.length) { - throw new UnsupportedFunctionalityError({ - functionality: "tools", - }); - } - - if (mode.toolChoice) { - throw new UnsupportedFunctionalityError({ - functionality: "toolChoice", - }); - } - - return baseArgs; - } - - case "object-json": { - throw new UnsupportedFunctionalityError({ - functionality: "object-json mode", - }); - } - - case "object-tool": { - throw new UnsupportedFunctionalityError({ - functionality: "object-tool mode", - }); - } - - // Handle all non-text types with a single default case - default: { - const _exhaustiveCheck: never = type; - throw new UnsupportedFunctionalityError({ - functionality: `${_exhaustiveCheck} mode`, - }); - } - } - } - - async doGenerate( - options: Parameters[0], - ): Promise>> { - const args = this.getArgs(options); - - const { responseHeaders, value: response } = await postJsonToApi({ - url: this.config.url({ - path: "/completions", - modelId: this.modelId, - }), - headers: combineHeaders(this.config.headers(), options.headers), - body: args, - failedResponseHandler: hyperbolicFailedResponseHandler, - successfulResponseHandler: createJsonResponseHandler(HyperbolicCompletionChunkSchema), - abortSignal: options.abortSignal, - fetch: this.config.fetch, - }); - - const { prompt: rawPrompt, ...rawSettings } = args; - if (isHyperbolicError(response)) { - throw new Error(`${response.message}`); - } - - const choice = response.choices[0]; - - if (!choice) { - throw new Error("No choice in Hyperbolic completion response"); - } - - return { - response: { - id: response.id, - modelId: response.model, - }, - text: choice.text ?? "", - reasoning: choice.reasoning || undefined, - usage: { - promptTokens: response.usage?.prompt_tokens ?? 0, - completionTokens: response.usage?.completion_tokens ?? 0, - }, - finishReason: mapHyperbolicFinishReason(choice.finish_reason), - logprobs: mapHyperbolicCompletionLogProbs(choice.logprobs), - rawCall: { rawPrompt, rawSettings }, - rawResponse: { headers: responseHeaders }, - warnings: [], - }; - } - - async doStream( - options: Parameters[0], - ): Promise>> { - const args = this.getArgs(options); - - const { responseHeaders, value: response } = await postJsonToApi({ - url: this.config.url({ - path: "/completions", - modelId: this.modelId, - }), - headers: combineHeaders(this.config.headers(), options.headers), - body: { - ...this.getArgs(options), - stream: true, - - // only include stream_options when in strict compatibility mode: - stream_options: - this.config.compatibility === "strict" ? { include_usage: true } : undefined, - }, - failedResponseHandler: hyperbolicFailedResponseHandler, - successfulResponseHandler: createEventSourceResponseHandler(HyperbolicCompletionChunkSchema), - abortSignal: options.abortSignal, - fetch: this.config.fetch, - }); - - const { prompt: rawPrompt, ...rawSettings } = args; - - let finishReason: LanguageModelV1FinishReason = "other"; - let usage: { promptTokens: number; completionTokens: number } = { - promptTokens: Number.NaN, - completionTokens: Number.NaN, - }; - let logprobs: LanguageModelV1LogProbs; - - return { - stream: response.pipeThrough( - new TransformStream< - ParseResult>, - LanguageModelV1StreamPart - >({ - transform(chunk, controller) { - // handle failed chunk parsing / validation: - if (!chunk.success) { - finishReason = "error"; - controller.enqueue({ type: "error", error: chunk.error }); - return; - } - - const value = chunk.value; - - // handle error chunks: - if (isHyperbolicError(value)) { - finishReason = "error"; - controller.enqueue({ type: "error", error: value }); - return; - } - - if (value.usage != null) { - usage = { - promptTokens: value.usage.prompt_tokens, - completionTokens: value.usage.completion_tokens, - }; - } - - const choice = value.choices[0]; - - if (choice?.finish_reason != null) { - finishReason = mapHyperbolicFinishReason(choice.finish_reason); - } - - if (choice?.text != null) { - controller.enqueue({ - type: "text-delta", - textDelta: choice.text, - }); - } - - const mappedLogprobs = mapHyperbolicCompletionLogProbs(choice?.logprobs); - if (mappedLogprobs?.length) { - if (logprobs === undefined) logprobs = []; - logprobs.push(...mappedLogprobs); - } - }, - - flush(controller) { - controller.enqueue({ - type: "finish", - finishReason, - logprobs, - usage, - }); - }, - }), - ), - rawCall: { rawPrompt, rawSettings }, - rawResponse: { headers: responseHeaders }, - warnings: [], - }; - } -} - -// limited version of the schema, focussed on what is needed for the implementation -// this approach limits breakages when the API changes and increases efficiency -const HyperbolicCompletionChunkSchema = z.union([ - z.object({ - id: z.string().optional(), - model: z.string().optional(), - choices: z.array( - z.object({ - text: z.string(), - reasoning: z.string().nullish().optional(), - finish_reason: z.string().nullish(), - index: z.number(), - logprobs: z - .object({ - tokens: z.array(z.string()), - token_logprobs: z.array(z.number()), - top_logprobs: z.array(z.record(z.string(), z.number())).nullable(), - }) - .nullable() - .optional(), - }), - ), - usage: z - .object({ - prompt_tokens: z.number(), - completion_tokens: z.number(), - }) - .optional() - .nullable(), - }), - HyperbolicErrorResponseSchema, -]); diff --git a/packages/ai-sdk-provider-old/src/hyperbolic-completion-settings.ts b/packages/ai-sdk-provider-old/src/hyperbolic-completion-settings.ts deleted file mode 100644 index efc31a0..0000000 --- a/packages/ai-sdk-provider-old/src/hyperbolic-completion-settings.ts +++ /dev/null @@ -1,42 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { HyperbolicSharedSettings } from "./types"; - -export type HyperbolicCompletionModelId = string; - -export type HyperbolicCompletionSettings = { - /** - * Modify the likelihood of specified tokens appearing in the completion. - * - * Accepts a JSON object that maps tokens (specified by their token ID in - * the GPT tokenizer) to an associated bias value from -100 to 100. You - * can use this tokenizer tool to convert text to token IDs. Mathematically, - * the bias is added to the logits generated by the model prior to sampling. - * The exact effect will vary per model, but values between -1 and 1 should - * decrease or increase likelihood of selection; values like -100 or 100 - * should result in a ban or exclusive selection of the relevant token. - * As an example, you can pass {"50256": -100} to prevent the <|endoftext|> - * token from being generated. - */ - logitBias?: Record; - - /** - * Return the log probabilities of the tokens. Including logprobs will increase - * the response size and can slow down response times. However, it can - * be useful to better understand how the model is behaving. - * - * Setting to true will return the log probabilities of the tokens that - * were generated. - * - * Setting to a number will return the log probabilities of the top n - * tokens that were generated. - */ - logprobs?: boolean | number; - - /** - * The suffix that comes after a completion of inserted text. - */ - suffix?: string; -} & HyperbolicSharedSettings; diff --git a/packages/ai-sdk-provider-old/src/hyperbolic-error.ts b/packages/ai-sdk-provider-old/src/hyperbolic-error.ts deleted file mode 100644 index 60eed7e..0000000 --- a/packages/ai-sdk-provider-old/src/hyperbolic-error.ts +++ /dev/null @@ -1,49 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { TypeValidationError } from "ai"; -import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils"; -import { JSONParseError } from "ai"; -import { z } from "zod"; - -export const HyperbolicErrorResponseSchema = z.object({ - object: z.literal("error"), - message: z.string(), - type: z.string(), - param: z.any().nullable(), - code: z.coerce.number().nullable(), -}); - -// eslint-disable-next-line @typescript-eslint/no-explicit-any -export const isHyperbolicError = (data: any): data is HyperbolicErrorData => { - return "object" in data && data.object === "error"; -}; - -export type HyperbolicErrorData = z.infer; - -export const hyperbolicFailedResponseHandler = createJsonErrorResponseHandler({ - errorSchema: HyperbolicErrorResponseSchema, - errorToMessage: (data) => data.message, -}); - -/** - * Error messages from the API are sometimes an ugly combo of text and JSON in a single chunk. Extract data from error message if it contains JSON - */ -export const tryParsingHyperbolicError = (error: JSONParseError | TypeValidationError) => { - if (!JSONParseError.isInstance(error)) { - return undefined; - } - - const jsonMatch = error.text.match(/\{.*\}/); // Match between brackets - if (jsonMatch) { - try { - const parsedErrorJson = JSON.parse(jsonMatch[0]); - if (parsedErrorJson.message) { - return HyperbolicErrorResponseSchema.parse(parsedErrorJson); - } - } catch { - return undefined; - } - } -}; diff --git a/packages/ai-sdk-provider-old/src/hyperbolic-image-language-model.ts b/packages/ai-sdk-provider-old/src/hyperbolic-image-language-model.ts deleted file mode 100644 index da5501d..0000000 --- a/packages/ai-sdk-provider-old/src/hyperbolic-image-language-model.ts +++ /dev/null @@ -1,130 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { ImageModelV1, ImageModelV1CallWarning } from "@ai-sdk/provider"; -import { combineHeaders, createJsonResponseHandler, postJsonToApi } from "@ai-sdk/provider-utils"; -import { z } from "zod"; - -import type { - HyperbolicImageModelId, - HyperbolicImageProviderOptions, - HyperbolicImageProviderResponseMetadata, - HyperbolicImageSettings, -} from "./hyperbolic-image-settings"; -import { hyperbolicFailedResponseHandler } from "./hyperbolic-error"; - -type HyperbolicImageModelConfig = { - provider: string; - compatibility: "strict" | "compatible"; - headers: () => Record; - url: (options: { modelId: string; path: string }) => string; - fetch?: typeof fetch; - extraBody?: Record; -}; - -export class HyperbolicImageModel implements ImageModelV1 { - readonly specificationVersion = "v1"; - readonly provider = "hyperbolic.image"; - - get maxImagesPerCall(): number { - return this.settings.maxImagesPerCall ?? 1; - } - - constructor( - readonly modelId: HyperbolicImageModelId, - private readonly settings: HyperbolicImageSettings, - private readonly config: HyperbolicImageModelConfig, - ) {} - - async doGenerate( - options: Omit[0], "providerOptions"> & { - providerOptions: { - hyperbolic?: HyperbolicImageProviderOptions; - }; - }, - ): Promise< - Omit>, "response"> & { - response: Awaited>["response"] & { - hyperbolic: HyperbolicImageProviderResponseMetadata; - }; - } - > { - const warnings: Array = []; - const [width, height] = options.size ? options.size.split("x").map(Number) : []; - - const args = { - prompt: options.prompt, - height, - width, - cfg_scale: options.providerOptions?.hyperbolic?.cfgScale, - enable_refiner: options.providerOptions?.hyperbolic?.enableRefiner, - model_name: this.modelId, - negative_prompt: options.providerOptions?.hyperbolic?.negativePrompt, - steps: options.providerOptions?.hyperbolic?.steps, - strength: options.providerOptions?.hyperbolic?.strength, - image: options.providerOptions?.hyperbolic?.image, - }; - - if (options.aspectRatio != undefined) { - warnings.push({ - type: "unsupported-setting", - setting: "aspectRatio", - details: "This model does not support `aspectRatio`. Use `size` instead.", - }); - } - if (options.seed != undefined) { - warnings.push({ - type: "unsupported-setting", - setting: "seed", - details: "This model does not support `seed`.", - }); - } - if (options.n != undefined) { - warnings.push({ - type: "unsupported-setting", - setting: "n", - details: "This model does not support `n`.", - }); - } - - const { value: response, responseHeaders } = await postJsonToApi({ - url: this.config.url({ - path: "/image/generation", - modelId: this.modelId, - }), - headers: combineHeaders(this.config.headers(), options.headers), - body: args, - failedResponseHandler: hyperbolicFailedResponseHandler, - successfulResponseHandler: createJsonResponseHandler(hyperbolicImageResponseSchema), - abortSignal: options.abortSignal, - fetch: this.config.fetch, - }); - - return { - images: response.images.map((image) => image.image), - warnings, - response: { - timestamp: new Date(), - modelId: this.modelId, - headers: responseHeaders, - hyperbolic: { - inferenceTime: response.inference_time, - randomSeeds: response.images.map((image) => image.random_seed), - }, - }, - }; - } -} - -// minimal version of the schema, focussed on what is needed for the implementation to avoid breaking changes -const hyperbolicImageResponseSchema = z.object({ - images: z.array( - z.object({ - image: z.string(), - index: z.number(), - random_seed: z.number(), - }), - ), - inference_time: z.number(), -}); diff --git a/packages/ai-sdk-provider-old/src/hyperbolic-image-settings.ts b/packages/ai-sdk-provider-old/src/hyperbolic-image-settings.ts deleted file mode 100644 index c263646..0000000 --- a/packages/ai-sdk-provider-old/src/hyperbolic-image-settings.ts +++ /dev/null @@ -1,37 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { GenerateImageResult } from "ai"; - -import type { HyperbolicSharedSettings } from "./types"; - -export type HyperbolicImageModelId = string; - -export type HyperbolicImageSettings = { - /** - * Override the maximum number of images per call (default is dependent on the - * model, or 1 for an unknown model). - */ - maxImagesPerCall?: number; -} & HyperbolicSharedSettings; - -export type HyperbolicImageProviderOptions = { - cfgScale?: number; - negativePrompt?: string; - steps?: number; - strength?: number; - enableRefiner?: boolean; - image?: string; -}; - -export type HyperbolicImageProviderResponseMetadata = { - inferenceTime: number; - randomSeeds: number[]; -}; - -export type HyperbolicGenerateImageResult = Omit & { - responses: (GenerateImageResult["responses"][number] & { - hyperbolic: HyperbolicImageProviderResponseMetadata; - })[]; -}; diff --git a/packages/ai-sdk-provider-old/src/hyperbolic-provider-options.test.ts b/packages/ai-sdk-provider-old/src/hyperbolic-provider-options.test.ts deleted file mode 100644 index b1db769..0000000 --- a/packages/ai-sdk-provider-old/src/hyperbolic-provider-options.test.ts +++ /dev/null @@ -1,64 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { LanguageModelV1Prompt } from "@ai-sdk/provider"; -import { createTestServer } from "@ai-sdk/provider-utils/test"; -import { streamText } from "ai"; -import { describe, expect, it, vi } from "vitest"; - -import { createHyperbolic } from "./hyperbolic-provider"; - -// Add type assertions for the mocked classes -const TEST_MESSAGES: LanguageModelV1Prompt = [ - { role: "user", content: [{ type: "text", text: "Hello" }] }, -]; - -describe("providerOptions", () => { - const server = createTestServer({ - "https://api.hyperbolic.xyz/v1/chat/completions": { - response: { - type: "stream-chunks", - chunks: [], - }, - }, - }); - - beforeEach(() => { - vi.clearAllMocks(); - }); - - it("should set providerOptions hyperbolic to extra body", async () => { - const hyperbolic = createHyperbolic({ - apiKey: "test", - }); - const model = hyperbolic("Qwen/Qwen2.5-72B-Instruct"); - - await streamText({ - model, - messages: TEST_MESSAGES, - providerOptions: { - hyperbolic: { - reasoning: { - max_tokens: 1000, - }, - }, - }, - }).consumeStream(); - - expect(await server.calls[0]?.requestBody).toStrictEqual({ - messages: [ - { - content: "Hello", - role: "user", - }, - ], - reasoning: { - max_tokens: 1000, - }, - temperature: 0, - model: "Qwen/Qwen2.5-72B-Instruct", - stream: true, - }); - }); -}); diff --git a/packages/ai-sdk-provider-old/src/hyperbolic-provider.ts b/packages/ai-sdk-provider-old/src/hyperbolic-provider.ts deleted file mode 100644 index e878dcf..0000000 --- a/packages/ai-sdk-provider-old/src/hyperbolic-provider.ts +++ /dev/null @@ -1,180 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import { loadApiKey, withoutTrailingSlash } from "@ai-sdk/provider-utils"; - -import type { HyperbolicChatModelId, HyperbolicChatSettings } from "./hyperbolic-chat-settings"; -import type { - HyperbolicCompletionModelId, - HyperbolicCompletionSettings, -} from "./hyperbolic-completion-settings"; -import type { HyperbolicImageModelId, HyperbolicImageSettings } from "./hyperbolic-image-settings"; -import { HyperbolicChatLanguageModel } from "./hyperbolic-chat-language-model"; -import { HyperbolicCompletionLanguageModel } from "./hyperbolic-completion-language-model"; -import { HyperbolicImageModel } from "./hyperbolic-image-language-model"; - -export type { HyperbolicCompletionSettings }; - -export interface HyperbolicProvider { - ( - modelId: HyperbolicChatModelId, - settings?: HyperbolicCompletionSettings, - ): HyperbolicCompletionLanguageModel; - (modelId: HyperbolicChatModelId, settings?: HyperbolicChatSettings): HyperbolicChatLanguageModel; - - languageModel( - modelId: HyperbolicChatModelId, - settings?: HyperbolicCompletionSettings, - ): HyperbolicCompletionLanguageModel; - languageModel( - modelId: HyperbolicChatModelId, - settings?: HyperbolicChatSettings, - ): HyperbolicChatLanguageModel; - - /** - * Creates a Hyperbolic chat model for text generation. - */ - chat( - modelId: HyperbolicChatModelId, - settings?: HyperbolicChatSettings, - ): HyperbolicChatLanguageModel; - - /** - * Creates a Hyperbolic completion model for text generation. - */ - completion( - modelId: HyperbolicCompletionModelId, - settings?: HyperbolicCompletionSettings, - ): HyperbolicCompletionLanguageModel; - - /** - * Creates a Hyperbolic image model for image generation. - */ - image(modelId: HyperbolicImageModelId, settings?: HyperbolicImageSettings): HyperbolicImageModel; -} - -export interface HyperbolicProviderSettings { - /** - * Base URL for the Hyperbolic API calls. - */ - baseURL?: string; - - /** - * @deprecated Use `baseURL` instead. - */ - baseUrl?: string; - - /** - * API key for authenticating requests. - */ - apiKey?: string; - - /** - * Custom headers to include in the requests. - */ - headers?: Record; - - /** - * Hyperbolic compatibility mode. Should be set to `strict` when using the Hyperbolic API, - * and `compatible` when using 3rd party providers. In `compatible` mode, newer - * information such as streamOptions are not being sent. Defaults to 'compatible'. - */ - compatibility?: "strict" | "compatible"; - - /** - * Custom fetch implementation. You can use it as a middleware to intercept requests, - * or to provide a custom fetch implementation for e.g. testing. - */ - fetch?: typeof fetch; - - /** - * A JSON object to send as the request body to access Hyperbolic features & upstream provider features. - */ - extraBody?: Record; -} - -/** - * Create an Hyperbolic provider instance. - */ -export function createHyperbolic(options: HyperbolicProviderSettings = {}): HyperbolicProvider { - const baseURL = - withoutTrailingSlash(options.baseURL ?? options.baseUrl) ?? "https://api.hyperbolic.xyz/v1"; - - // we default to compatible, because strict breaks providers like Groq: - const compatibility = options.compatibility ?? "compatible"; - - const getHeaders = () => ({ - Authorization: `Bearer ${loadApiKey({ - apiKey: options.apiKey, - environmentVariableName: "HYPERBOLIC_API_KEY", - description: "Hyperbolic", - })}`, - ...options.headers, - }); - - const createChatModel = (modelId: HyperbolicChatModelId, settings: HyperbolicChatSettings = {}) => - new HyperbolicChatLanguageModel(modelId, settings, { - provider: "hyperbolic.chat", - url: ({ path }) => `${baseURL}${path}`, - headers: getHeaders, - compatibility, - fetch: options.fetch, - extraBody: options.extraBody, - }); - - const createCompletionModel = ( - modelId: HyperbolicCompletionModelId, - settings: HyperbolicCompletionSettings = {}, - ) => - new HyperbolicCompletionLanguageModel(modelId, settings, { - provider: "hyperbolic.completion", - url: ({ path }) => `${baseURL}${path}`, - headers: getHeaders, - compatibility, - fetch: options.fetch, - extraBody: options.extraBody, - }); - - const createImageModel = ( - modelId: HyperbolicImageModelId, - settings: HyperbolicImageSettings = {}, - ) => - new HyperbolicImageModel(modelId, settings, { - provider: "hyperbolic.image", - url: ({ path }) => `${baseURL}${path}`, - headers: getHeaders, - compatibility, - fetch: options.fetch, - extraBody: options.extraBody, - }); - - const createLanguageModel = ( - modelId: HyperbolicChatModelId | HyperbolicCompletionModelId, - settings?: HyperbolicChatSettings | HyperbolicCompletionSettings, - ) => { - if (new.target) { - throw new Error("The Hyperbolic model function cannot be called with the new keyword."); - } - - if (modelId === "openai/gpt-3.5-turbo-instruct") { - return createCompletionModel(modelId, settings as HyperbolicCompletionSettings); - } - - return createChatModel(modelId, settings as HyperbolicChatSettings); - }; - - const provider = function ( - modelId: HyperbolicChatModelId | HyperbolicCompletionModelId, - settings?: HyperbolicChatSettings | HyperbolicCompletionSettings, - ) { - return createLanguageModel(modelId, settings); - }; - - provider.languageModel = createLanguageModel; - provider.chat = createChatModel; - provider.completion = createCompletionModel; - provider.image = createImageModel; - - return provider as HyperbolicProvider; -} diff --git a/packages/ai-sdk-provider-old/src/index.ts b/packages/ai-sdk-provider-old/src/index.ts deleted file mode 100644 index 0180b06..0000000 --- a/packages/ai-sdk-provider-old/src/index.ts +++ /dev/null @@ -1,3 +0,0 @@ -export * from "./hyperbolic-provider"; -export * from "./types"; -export * from "./hyperbolic-error"; diff --git a/packages/ai-sdk-provider-old/src/internal/index.ts b/packages/ai-sdk-provider-old/src/internal/index.ts deleted file mode 100644 index c9936d2..0000000 --- a/packages/ai-sdk-provider-old/src/internal/index.ts +++ /dev/null @@ -1,7 +0,0 @@ -export * from "../hyperbolic-chat-language-model"; -export * from "../hyperbolic-chat-settings"; -export * from "../hyperbolic-completion-language-model"; -export * from "../hyperbolic-completion-settings"; -export * from "../hyperbolic-image-language-model"; -export * from "../hyperbolic-image-settings"; -export * from "../types"; diff --git a/packages/ai-sdk-provider-old/src/map-hyperbolic-chat-logprobs.ts b/packages/ai-sdk-provider-old/src/map-hyperbolic-chat-logprobs.ts deleted file mode 100644 index f325b88..0000000 --- a/packages/ai-sdk-provider-old/src/map-hyperbolic-chat-logprobs.ts +++ /dev/null @@ -1,37 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { LanguageModelV1LogProbs } from "@ai-sdk/provider"; - -type HyperbolicChatLogProbs = { - content: - | { - token: string; - logprob: number; - top_logprobs: - | { - token: string; - logprob: number; - }[] - | null; - }[] - | null; -}; - -export function mapHyperbolicChatLogProbsOutput( - logprobs: HyperbolicChatLogProbs | null | undefined, -): LanguageModelV1LogProbs | undefined { - return ( - logprobs?.content?.map(({ token, logprob, top_logprobs }) => ({ - token, - logprob, - topLogprobs: top_logprobs - ? top_logprobs.map(({ token, logprob }) => ({ - token, - logprob, - })) - : [], - })) ?? undefined - ); -} diff --git a/packages/ai-sdk-provider-old/src/map-hyperbolic-completion-logprobs.ts b/packages/ai-sdk-provider-old/src/map-hyperbolic-completion-logprobs.ts deleted file mode 100644 index 121d731..0000000 --- a/packages/ai-sdk-provider-old/src/map-hyperbolic-completion-logprobs.ts +++ /dev/null @@ -1,24 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -type HyperbolicCompletionLogProps = { - tokens: string[]; - token_logprobs: number[]; - top_logprobs: Record[] | null; -}; - -export function mapHyperbolicCompletionLogProbs( - logprobs: HyperbolicCompletionLogProps | null | undefined, -) { - return logprobs?.tokens.map((token, index) => ({ - token, - logprob: logprobs.token_logprobs[index] ?? 0, - topLogprobs: logprobs.top_logprobs - ? Object.entries(logprobs.top_logprobs[index] ?? {}).map(([token, logprob]) => ({ - token, - logprob, - })) - : [], - })); -} diff --git a/packages/ai-sdk-provider-old/src/map-hyperbolic-finish-reason.ts b/packages/ai-sdk-provider-old/src/map-hyperbolic-finish-reason.ts deleted file mode 100644 index 5763ff8..0000000 --- a/packages/ai-sdk-provider-old/src/map-hyperbolic-finish-reason.ts +++ /dev/null @@ -1,23 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { LanguageModelV1FinishReason } from "@ai-sdk/provider"; - -export function mapHyperbolicFinishReason( - finishReason: string | null | undefined, -): LanguageModelV1FinishReason { - switch (finishReason) { - case "stop": - return "stop"; - case "length": - return "length"; - case "content_filter": - return "content-filter"; - case "function_call": - case "tool_calls": - return "tool-calls"; - default: - return "unknown"; - } -} diff --git a/packages/ai-sdk-provider-old/src/scripts/templates/models.ts.hbs b/packages/ai-sdk-provider-old/src/scripts/templates/models.ts.hbs deleted file mode 100644 index 4eefd94..0000000 --- a/packages/ai-sdk-provider-old/src/scripts/templates/models.ts.hbs +++ /dev/null @@ -1,32 +0,0 @@ -// prettier-ignore - -// This file is auto-generated by the pnpm codegen:update-models script. Do not edit manually. - -const _models = [ -{{#each modelId}} - "{{this}}", -{{/each}} -] as const; - -const _imageModels = [ -{{#each imageModelId}} - "{{this}}", -{{/each}} -] as const; - -const _chatModels = [ -{{#each chatModelId}} - "{{this}}", -{{/each}} -] as const; - -const _completionModels = [ -{{#each completionModelId}} - "{{this}}", -{{/each}} -] as const; - -export type HyperbolicImageModelId = (typeof _imageModels)[number] | (string & {}); -export type HyperbolicChatModelId = (typeof _chatModels)[number] | (string & {}); -export type HyperbolicCompletionModelId = (typeof _completionModels)[number] | (string & {}); -export type HyperbolicModelId = HyperbolicImageModelId | HyperbolicChatModelId | HyperbolicCompletionModelId; diff --git a/packages/ai-sdk-provider-old/src/scripts/update-models-list.ts b/packages/ai-sdk-provider-old/src/scripts/update-models-list.ts deleted file mode 100644 index 52aea76..0000000 --- a/packages/ai-sdk-provider-old/src/scripts/update-models-list.ts +++ /dev/null @@ -1,31 +0,0 @@ -import "@hyperbolic/api"; - -import { readFileSync, writeFileSync } from "fs"; -import path from "path"; -import { fileURLToPath } from "url"; -import Handlebars from "handlebars"; - -import { hyperbolicClient, showModelsV1ModelsGet } from "@hyperbolic/api"; - -/** - * Generates the list of models supported by Hyperbolic for the AI SDK Provider. - */ -const main = async () => { - const { - data: { data }, - } = await showModelsV1ModelsGet({ client: hyperbolicClient, throwOnError: true }); - - const models = data as { id: string; [key: string]: unknown }[]; - const modelIds = models.map((model) => model.id); - - const __dirname = path.dirname(fileURLToPath(import.meta.url)); - const templatePath = path.join(__dirname, "templates", "models.ts.hbs"); - const templateContent = readFileSync(templatePath, "utf-8"); - const template = Handlebars.compile(templateContent); - - const output = template({ modelId: modelIds }); - - writeFileSync(new URL("../__generated__/models.gen.ts", import.meta.url), output); -}; - -main(); diff --git a/packages/ai-sdk-provider-old/src/types.ts b/packages/ai-sdk-provider-old/src/types.ts deleted file mode 100644 index d0d0c8a..0000000 --- a/packages/ai-sdk-provider-old/src/types.ts +++ /dev/null @@ -1,47 +0,0 @@ -// Modified by Hyperbolic Labs, Inc. on 2025-03-25 -// Original work Copyright 2025 OpenRouter Inc. -// Licensed under the Apache License, Version 2.0 - -import type { LanguageModelV1 } from "@ai-sdk/provider"; - -// Re-export the LanguageModelV1 type to ensure proper type compatibility -export type { LanguageModelV1 }; - -// Export our model types with explicit type constraints -export type HyperbolicLanguageModel = LanguageModelV1; - -export type HyperbolicProviderOptions = { - models?: string[]; - - /** - * https://openrouter.ai/docs/use-cases/reasoning-tokens - * One of `max_tokens` or `effort` is required. - * If `exclude` is true, reasoning will be removed from the response. Default is false. - */ - reasoning?: { - exclude?: boolean; - } & ( - | { - max_tokens: number; - } - | { - effort: "high" | "medium" | "low"; - } - ); - - /** - * A unique identifier representing your end-user, which can - * help Hyperbolic to monitor and detect abuse. - */ - user?: string; -}; - -export type HyperbolicSharedSettings = HyperbolicProviderOptions & { - /** - * @deprecated use `reasoning` instead - */ - includeReasoning?: boolean; - - // eslint-disable-next-line @typescript-eslint/no-explicit-any - extraBody?: Record; -}; diff --git a/packages/ai-sdk-provider-old/tsconfig.json b/packages/ai-sdk-provider-old/tsconfig.json deleted file mode 100644 index 61a17f8..0000000 --- a/packages/ai-sdk-provider-old/tsconfig.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "extends": "@hyperbolic/tsconfig/external-package.json", - "include": ["."], - "exclude": ["*/dist", "dist", "build", "node_modules"] -} diff --git a/packages/ai-sdk-provider-old/tsup.config.ts b/packages/ai-sdk-provider-old/tsup.config.ts deleted file mode 100644 index cded0e2..0000000 --- a/packages/ai-sdk-provider-old/tsup.config.ts +++ /dev/null @@ -1,17 +0,0 @@ -import { defineConfig } from "tsup"; - -export default defineConfig([ - { - entry: ["src/index.ts"], - format: ["cjs", "esm"], - dts: true, - sourcemap: true, - }, - { - entry: ["src/internal/index.ts"], - outDir: "dist/internal", - format: ["cjs", "esm"], - dts: true, - sourcemap: true, - }, -]); diff --git a/packages/ai-sdk-provider-old/turbo.json b/packages/ai-sdk-provider-old/turbo.json deleted file mode 100644 index 7dbd533..0000000 --- a/packages/ai-sdk-provider-old/turbo.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "$schema": "https://turborepo.org/schema.json", - "extends": ["//"], - "tasks": { - "build": { - "dependsOn": ["^build"], - "outputs": ["dist/**"] - } - } -} diff --git a/packages/ai-sdk-provider-old/vitest.config.mts b/packages/ai-sdk-provider-old/vitest.config.mts deleted file mode 100644 index 47e4ab5..0000000 --- a/packages/ai-sdk-provider-old/vitest.config.mts +++ /dev/null @@ -1,10 +0,0 @@ -import tsconfigPaths from "vite-tsconfig-paths"; -import { configDefaults, defineConfig } from "vitest/config"; - -export default defineConfig({ - test: { - exclude: [...configDefaults.exclude, "**/node_modules/**", "**/fixtures/**", "**/templates/**"], - globals: true, - include: ["**/*.test.ts", "**/*.test.tsx"], - }, -}); diff --git a/packages/ai-sdk-provider-old/vitest.edge.config.ts b/packages/ai-sdk-provider-old/vitest.edge.config.ts deleted file mode 100644 index 3f8327a..0000000 --- a/packages/ai-sdk-provider-old/vitest.edge.config.ts +++ /dev/null @@ -1,10 +0,0 @@ -import { defineConfig } from "vitest/config"; - -// https://vitejs.dev/config/ -export default defineConfig({ - test: { - environment: "edge-runtime", - globals: true, - include: ["**/*.test.ts", "**/*.test.tsx"], - }, -}); diff --git a/packages/ai-sdk-provider-old/vitest.node.config.ts b/packages/ai-sdk-provider-old/vitest.node.config.ts deleted file mode 100644 index da67c22..0000000 --- a/packages/ai-sdk-provider-old/vitest.node.config.ts +++ /dev/null @@ -1,10 +0,0 @@ -import { defineConfig } from "vitest/config"; - -// https://vitejs.dev/config/ -export default defineConfig({ - test: { - environment: "node", - globals: true, - include: ["**/*.test.ts", "**/*.test.tsx"], - }, -}); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index aa2c01d..5c59a5c 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -87,9 +87,6 @@ importers: '@hyperbolic/tsconfig': specifier: workspace:* version: link:../../tooling/typescript - '@openrouter/sdk': - specifier: ^0.1.27 - version: 0.1.27 '@types/json-schema': specifier: 7.0.15 version: 7.0.15 @@ -127,55 +124,6 @@ importers: specifier: ^4.0.0 version: 4.3.6 - packages/ai-sdk-provider-old: - dependencies: - '@ai-sdk/provider': - specifier: ^3.0.5 - version: 3.0.5 - '@ai-sdk/provider-utils': - specifier: ^4.0.9 - version: 4.0.9(zod@4.3.6) - ai: - specifier: ^6.0.48 - version: 6.0.48(zod@4.3.6) - zod: - specifier: ^4.0.0 - version: 4.3.6 - devDependencies: - '@edge-runtime/vm': - specifier: ^5.0.0 - version: 5.0.0 - '@hyperbolic/api': - specifier: workspace:* - version: link:../api - '@hyperbolic/eslint-config': - specifier: workspace:* - version: link:../../tooling/eslint - '@hyperbolic/prettier-config': - specifier: workspace:* - version: link:../../tooling/prettier - '@hyperbolic/tsconfig': - specifier: workspace:* - version: link:../../tooling/typescript - eslint: - specifier: 'catalog:' - version: 9.19.0(jiti@2.4.2) - handlebars: - specifier: ^4.7.8 - version: 4.7.8 - prettier: - specifier: 'catalog:' - version: 3.4.2 - tsup: - specifier: 8.5.0 - version: 8.5.0(jiti@2.4.2)(postcss@8.5.1)(tsx@4.19.3)(typescript@5.9.3)(yaml@2.7.0) - type-fest: - specifier: ^4.37.0 - version: 4.37.0 - typescript: - specifier: 'catalog:' - version: 5.9.3 - packages/api: devDependencies: '@hey-api/client-fetch': @@ -991,9 +939,6 @@ packages: '@open-draft/until@2.1.0': resolution: {integrity: sha512-U69T3ItWHvLwGg5eJ0n3I62nWuE6ilHlmz7zM0npLBRvPRd7e6NYmg54vvRtP5mZG7kZqZCFVdsTWo7BPtBujg==} - '@openrouter/sdk@0.1.27': - resolution: {integrity: sha512-RH//L10bSmc81q25zAZudiI4kNkLgxF2E+WU42vghp3N6TEvZ6F0jK7uT3tOxkEn91gzmMw9YVmDENy7SJsajQ==} - '@opentelemetry/api@1.9.0': resolution: {integrity: sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==} engines: {node: '>=8.0.0'} @@ -4045,7 +3990,7 @@ snapshots: '@types/node': 20.5.1 chalk: 4.1.2 cosmiconfig: 8.3.6(typescript@5.9.3) - cosmiconfig-typescript-loader: 4.4.0(@types/node@20.5.1)(cosmiconfig@8.3.6(typescript@5.9.3))(ts-node@10.9.2(@types/node@20.5.1)(typescript@5.9.3))(typescript@5.9.3) + cosmiconfig-typescript-loader: 4.4.0(@types/node@20.5.1)(cosmiconfig@8.3.6(typescript@5.9.3))(ts-node@10.9.2(@types/node@22.13.10)(typescript@5.9.3))(typescript@5.9.3) lodash.isplainobject: 4.0.6 lodash.merge: 4.6.2 lodash.uniq: 4.5.0 @@ -4514,10 +4459,6 @@ snapshots: '@open-draft/until@2.1.0': {} - '@openrouter/sdk@0.1.27': - dependencies: - zod: 4.3.6 - '@opentelemetry/api@1.9.0': {} '@pkgjs/parseargs@0.11.0': @@ -5053,7 +4994,7 @@ snapshots: cookie@1.1.1: {} - cosmiconfig-typescript-loader@4.4.0(@types/node@20.5.1)(cosmiconfig@8.3.6(typescript@5.9.3))(ts-node@10.9.2(@types/node@20.5.1)(typescript@5.9.3))(typescript@5.9.3): + cosmiconfig-typescript-loader@4.4.0(@types/node@20.5.1)(cosmiconfig@8.3.6(typescript@5.9.3))(ts-node@10.9.2(@types/node@22.13.10)(typescript@5.9.3))(typescript@5.9.3): dependencies: '@types/node': 20.5.1 cosmiconfig: 8.3.6(typescript@5.9.3) From d8ca5088c2dcb914bf9c190d2013bb7150d15862 Mon Sep 17 00:00:00 2001 From: Connor Chevli Date: Fri, 23 Jan 2026 16:47:49 -0800 Subject: [PATCH 20/22] add changeset --- .changeset/thin-geckos-breathe.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changeset/thin-geckos-breathe.md diff --git a/.changeset/thin-geckos-breathe.md b/.changeset/thin-geckos-breathe.md new file mode 100644 index 0000000..bf2ba1c --- /dev/null +++ b/.changeset/thin-geckos-breathe.md @@ -0,0 +1,5 @@ +--- +"@hyperbolic/ai-sdk-provider": major +--- + +Remove support for chat and completion models (our APIs are now entirely compatible with openrouter). Upgrade to AI SDK v6 From 8f64ec7567fc96c09148ea44f3715f3e47d57746 Mon Sep 17 00:00:00 2001 From: Connor Chevli Date: Fri, 23 Jan 2026 16:57:01 -0800 Subject: [PATCH 21/22] remove reasoning setting --- packages/ai-sdk-provider/src/types/index.ts | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/packages/ai-sdk-provider/src/types/index.ts b/packages/ai-sdk-provider/src/types/index.ts index ac52c1d..8edd163 100644 --- a/packages/ai-sdk-provider/src/types/index.ts +++ b/packages/ai-sdk-provider/src/types/index.ts @@ -9,22 +9,6 @@ export type { LanguageModelV3, LanguageModelV3Prompt }; export type HyperbolicProviderOptions = { models?: string[]; - /** - * One of `max_tokens` or `effort` is required. - * If `exclude` is true, reasoning will be removed from the response. Default is false. - */ - reasoning?: { - enabled?: boolean; - exclude?: boolean; - } & ( - | { - max_tokens: number; - } - | { - effort: "high" | "medium" | "low"; - } - ); - /** * A unique identifier representing your end-user, which can * help Hyperbolic to monitor and detect abuse. @@ -33,11 +17,6 @@ export type HyperbolicProviderOptions = { }; export type HyperbolicSharedSettings = HyperbolicProviderOptions & { - /** - * @deprecated use `reasoning` instead - */ - includeReasoning?: boolean; - extraBody?: Record; /** From da6215f38be245d366b72b9e66f6136e3b14e922 Mon Sep 17 00:00:00 2001 From: Connor Chevli Date: Fri, 23 Jan 2026 17:00:57 -0800 Subject: [PATCH 22/22] prettier --- packages/ai-sdk-provider/src/__generated__/models.gen.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/packages/ai-sdk-provider/src/__generated__/models.gen.ts b/packages/ai-sdk-provider/src/__generated__/models.gen.ts index ad31532..2ad0d9a 100644 --- a/packages/ai-sdk-provider/src/__generated__/models.gen.ts +++ b/packages/ai-sdk-provider/src/__generated__/models.gen.ts @@ -70,4 +70,7 @@ const _completionModels = [ export type HyperbolicImageModelId = (typeof _imageModels)[number] | (string & {}); export type HyperbolicChatModelId = (typeof _chatModels)[number] | (string & {}); export type HyperbolicCompletionModelId = (typeof _completionModels)[number] | (string & {}); -export type HyperbolicModelId = HyperbolicImageModelId | HyperbolicChatModelId | HyperbolicCompletionModelId; +export type HyperbolicModelId = + | HyperbolicImageModelId + | HyperbolicChatModelId + | HyperbolicCompletionModelId;