diff --git a/integrations/cerebras/integration.definition.ts b/integrations/cerebras/integration.definition.ts index b64837e0381..28553dcc3a0 100644 --- a/integrations/cerebras/integration.definition.ts +++ b/integrations/cerebras/integration.definition.ts @@ -8,7 +8,7 @@ export default new IntegrationDefinition({ title: 'Cerebras', description: 'Get access to a curated list of Cerebras models for content generation and chat completions within your bot.', - version: '7.0.1', + version: '8.0.0', readme: 'hub.md', icon: 'icon.svg', entities: { diff --git a/integrations/cerebras/package.json b/integrations/cerebras/package.json index bc5621a06c7..ef3abaa025e 100644 --- a/integrations/cerebras/package.json +++ b/integrations/cerebras/package.json @@ -10,7 +10,7 @@ "@botpress/client": "workspace:*", "@botpress/common": "workspace:*", "@botpress/sdk": "workspace:*", - "openai": "^4.86.1" + "openai": "^5.12.1" }, "devDependencies": { "@botpress/cli": "workspace:*", diff --git a/integrations/cerebras/src/index.ts b/integrations/cerebras/src/index.ts index dab04b9b1f1..6bd96c3c955 100644 --- a/integrations/cerebras/src/index.ts +++ b/integrations/cerebras/src/index.ts @@ -1,4 +1,5 @@ import { llm } from '@botpress/common' +import { validateGptOssReasoningEffort } from '@botpress/common/src/llm/openai' import OpenAI from 'openai' import { DEFAULT_MODEL_ID, ModelId } from './schemas' import * as bp from '.botpress' @@ -9,7 +10,23 @@ const cerebrasClient = new OpenAI({ }) const languageModels: Record = { - // Reference: https://inference-docs.cerebras.ai/introduction + // Reference: + // https://inference-docs.cerebras.ai/models/overview + // https://www.cerebras.ai/pricing + 'gpt-oss-120b': { + name: 'GPT-OSS 120B (Preview)', + description: + 'gpt-oss-120b is a high-performance, open-weight language model designed for production-grade, general-purpose use cases. It excels at complex reasoning and supports configurable reasoning effort, full chain-of-thought transparency for easier debugging and trust, and native agentic capabilities for function calling, tool use, and structured outputs.', + tags: ['preview', 'general-purpose', 'reasoning'], + input: { + costPer1MTokens: 0.25, + maxTokens: 131_000, + }, + output: { + costPer1MTokens: 0.69, + maxTokens: 16_000, + }, + }, 'qwen-3-32b': { name: 'Qwen3 32B', description: @@ -17,7 +34,7 @@ const languageModels: Record = { tags: ['general-purpose', 'reasoning'], input: { costPer1MTokens: 0.4, - maxTokens: 16_000, + maxTokens: 128_000, }, output: { costPer1MTokens: 0.8, @@ -31,7 +48,7 @@ const languageModels: Record = { tags: ['general-purpose'], input: { costPer1MTokens: 0.65, - maxTokens: 16_000, + maxTokens: 32_000, }, output: { costPer1MTokens: 0.85, @@ -45,7 +62,7 @@ const languageModels: Record = { tags: ['low-cost', 'general-purpose'], input: { costPer1MTokens: 0.1, - maxTokens: 16_000, + maxTokens: 32_000, }, output: { costPer1MTokens: 0.1, @@ -59,7 +76,7 @@ const languageModels: Record = { 'Meta developed and released the Meta Llama 3 family of large language models (LLMs), a collection of pretrained and instruction tuned generative text models in 8B and 70B sizes. The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks.', input: { costPer1MTokens: 0.85, - maxTokens: 16_000, + maxTokens: 128_000, }, output: { costPer1MTokens: 1.2, @@ -98,6 +115,16 @@ export default new bp.Integration({ } } + if (input.model?.id === 'gpt-oss-120b') { + request.reasoning_effort = validateGptOssReasoningEffort(input, logger) + + // GPT-OSS models don't work well with a stop sequence, so we have to remove it from the request. + delete request.stop + + // Reasoning models don't support temperature + delete request.temperature + } + return request }, } diff --git a/integrations/cerebras/src/schemas.ts b/integrations/cerebras/src/schemas.ts index 96f02e19023..1676208da95 100644 --- a/integrations/cerebras/src/schemas.ts +++ b/integrations/cerebras/src/schemas.ts @@ -3,7 +3,7 @@ import { z } from '@botpress/sdk' export const DEFAULT_MODEL_ID = 'llama3.1-8b' export const modelId = z - .enum(['llama3.1-8b', 'llama3.3-70b', 'llama-4-scout-17b-16e-instruct', 'qwen-3-32b']) + .enum(['gpt-oss-120b', 'qwen-3-32b', 'llama-4-scout-17b-16e-instruct', 'llama3.1-8b', 'llama3.3-70b']) .describe('Model to use for content generation') .placeholder(DEFAULT_MODEL_ID) diff --git a/integrations/fireworks-ai/integration.definition.ts b/integrations/fireworks-ai/integration.definition.ts index 8452aac80c7..a2007c46c83 100644 --- a/integrations/fireworks-ai/integration.definition.ts +++ b/integrations/fireworks-ai/integration.definition.ts @@ -9,7 +9,7 @@ export default new IntegrationDefinition({ title: 'Fireworks AI', description: 'Choose from curated Fireworks AI models for content generation, chat completions, and audio transcription.', - version: '8.0.1', + version: '9.0.0', readme: 'hub.md', icon: 'icon.svg', entities: { diff --git a/integrations/fireworks-ai/package.json b/integrations/fireworks-ai/package.json index 0f4d807210c..aed9bb7c6a0 100644 --- a/integrations/fireworks-ai/package.json +++ b/integrations/fireworks-ai/package.json @@ -10,7 +10,7 @@ "@botpress/client": "workspace:*", "@botpress/common": "workspace:*", "@botpress/sdk": "workspace:*", - "openai": "^4.86.1" + "openai": "^5.12.1" }, "devDependencies": { "@botpress/cli": "workspace:*", diff --git a/integrations/fireworks-ai/src/index.ts b/integrations/fireworks-ai/src/index.ts index 66b31d3d39c..f9f267a0cb6 100644 --- a/integrations/fireworks-ai/src/index.ts +++ b/integrations/fireworks-ai/src/index.ts @@ -1,4 +1,5 @@ import { llm, speechToText } from '@botpress/common' +import { validateGptOssReasoningEffort } from '@botpress/common/src/llm/openai' import OpenAI from 'openai' import { LanguageModelId, ImageModelId, SpeechToTextModelId } from './schemas' import * as bp from '.botpress' @@ -14,6 +15,34 @@ const DEFAULT_LANGUAGE_MODEL_ID: LanguageModelId = 'accounts/fireworks/models/ll // https://fireworks.ai/models // https://fireworks.ai/pricing const languageModels: Record = { + 'accounts/fireworks/models/gpt-oss-20b': { + name: 'GPT-OSS 20B', + description: + 'gpt-oss-20b is a compact, open-weight language model optimized for low-latency. It shares the same training foundation and capabilities as the GPT-OSS 120B model, with faster responses and lower cost.', + tags: ['general-purpose', 'reasoning', 'low-cost'], + input: { + costPer1MTokens: 0.07, + maxTokens: 128_000, + }, + output: { + costPer1MTokens: 0.3, + maxTokens: 16_000, + }, + }, + 'accounts/fireworks/models/gpt-oss-120b': { + name: 'GPT-OSS 120B', + description: + 'gpt-oss-120b is a high-performance, open-weight language model designed for production-grade, general-purpose use cases. It excels at complex reasoning and supports configurable reasoning effort, full chain-of-thought transparency for easier debugging and trust, and native agentic capabilities for function calling, tool use, and structured outputs.', + tags: ['general-purpose', 'reasoning'], + input: { + costPer1MTokens: 0.15, + maxTokens: 128_000, + }, + output: { + costPer1MTokens: 0.6, + maxTokens: 16_000, + }, + }, 'accounts/fireworks/models/deepseek-r1-0528': { name: 'DeepSeek R1 0528', description: @@ -263,6 +292,17 @@ export default new bp.Integration({ content: systemPrompt.content, }) } + } else if ( + input.model?.id === 'accounts/fireworks/models/gpt-oss-20b' || + input.model?.id === 'accounts/fireworks/models/gpt-oss-120b' + ) { + request.reasoning_effort = validateGptOssReasoningEffort(input, logger) + + // GPT-OSS models don't work well with a stop sequence, so we have to remove it from the request. + delete request.stop + + // Reasoning models don't support temperature + delete request.temperature } return request diff --git a/integrations/fireworks-ai/src/schemas.ts b/integrations/fireworks-ai/src/schemas.ts index c5b5a124416..af03f971770 100644 --- a/integrations/fireworks-ai/src/schemas.ts +++ b/integrations/fireworks-ai/src/schemas.ts @@ -17,6 +17,8 @@ export const languageModelId = z 'accounts/fireworks/models/mixtral-8x7b-instruct', 'accounts/fireworks/models/mythomax-l2-13b', 'accounts/fireworks/models/gemma2-9b-it', + 'accounts/fireworks/models/gpt-oss-20b', + 'accounts/fireworks/models/gpt-oss-120b', ]) .describe('Model to use for content generation') .placeholder('accounts/fireworks/models/llama-v3p1-70b-instruct') diff --git a/integrations/groq/integration.definition.ts b/integrations/groq/integration.definition.ts index c47c098206a..bf0493a55ba 100644 --- a/integrations/groq/integration.definition.ts +++ b/integrations/groq/integration.definition.ts @@ -8,7 +8,7 @@ export default new IntegrationDefinition({ name: 'groq', title: 'Groq', description: 'Gain access to Groq models for content generation, chat responses, and audio transcription.', - version: '13.0.1', + version: '14.0.0', readme: 'hub.md', icon: 'icon.svg', entities: { diff --git a/integrations/groq/package.json b/integrations/groq/package.json index 8c2055246ab..66698b919e3 100644 --- a/integrations/groq/package.json +++ b/integrations/groq/package.json @@ -10,7 +10,7 @@ "@botpress/client": "workspace:*", "@botpress/common": "workspace:*", "@botpress/sdk": "workspace:*", - "openai": "^4.86.1" + "openai": "^5.12.1" }, "devDependencies": { "@botpress/cli": "workspace:*", diff --git a/integrations/groq/src/index.ts b/integrations/groq/src/index.ts index f86fa28bfda..4ecc9f33e0a 100644 --- a/integrations/groq/src/index.ts +++ b/integrations/groq/src/index.ts @@ -1,4 +1,5 @@ import { llm, speechToText } from '@botpress/common' +import { validateGptOssReasoningEffort } from '@botpress/common/src/llm/openai' import OpenAI from 'openai' import { ModelId, SpeechToTextModelId } from './schemas' import * as bp from '.botpress' @@ -12,6 +13,35 @@ const languageModels: Record = { // Reference: // https://console.groq.com/docs/models // https://groq.com/pricing/ + 'openai/gpt-oss-20b': { + name: 'GPT-OSS 20B (Preview)', + description: + 'gpt-oss-20b is a compact, open-weight language model optimized for low-latency. It shares the same training foundation and capabilities as the GPT-OSS 120B model, with faster responses and lower cost.', + tags: ['preview', 'general-purpose', 'reasoning', 'low-cost'], + input: { + costPer1MTokens: 0.1, + maxTokens: 131_000, + }, + output: { + costPer1MTokens: 0.5, + maxTokens: 32_000, + }, + }, + 'openai/gpt-oss-120b': { + name: 'GPT-OSS 120B (Preview)', + description: + 'gpt-oss-120b is a high-performance, open-weight language model designed for production-grade, general-purpose use cases. It excels at complex reasoning and supports configurable reasoning effort, full chain-of-thought transparency for easier debugging and trust, and native agentic capabilities for function calling, tool use, and structured outputs.', + tags: ['preview', 'general-purpose', 'reasoning'], + + input: { + costPer1MTokens: 0.15, + maxTokens: 131_000, + }, + output: { + costPer1MTokens: 0.75, + maxTokens: 32_000, + }, + }, 'deepseek-r1-distill-llama-70b': { name: 'DeepSeek R1-Distill Llama 3.3 70B (Preview)', description: @@ -180,6 +210,19 @@ export default new bp.Integration({ provider, models: languageModels, defaultModel: 'llama-3.3-70b-versatile', + overrideRequest: (request) => { + if (input.model?.id === 'openai/gpt-oss-20b' || input.model?.id === 'openai/gpt-oss-120b') { + request.reasoning_effort = validateGptOssReasoningEffort(input, logger) + + // GPT-OSS models don't work well with a stop sequence, so we have to remove it from the request. + delete request.stop + + // Reasoning models don't support temperature + delete request.temperature + } + + return request + }, overrideResponse: (response) => { if (input.model?.id === 'deepseek-r1-distill-llama-70b') { for (const choice of response.choices) { diff --git a/integrations/groq/src/schemas.ts b/integrations/groq/src/schemas.ts index 886ffec7d0b..5c5018c01fa 100644 --- a/integrations/groq/src/schemas.ts +++ b/integrations/groq/src/schemas.ts @@ -2,6 +2,8 @@ import { z } from '@botpress/sdk' export const modelId = z .enum([ + 'openai/gpt-oss-20b', + 'openai/gpt-oss-120b', 'deepseek-r1-distill-llama-70b', 'llama-3.3-70b-versatile', 'llama-3.2-1b-preview', diff --git a/integrations/openai/src/index.ts b/integrations/openai/src/index.ts index 793b510d92b..b453a4e8db3 100644 --- a/integrations/openai/src/index.ts +++ b/integrations/openai/src/index.ts @@ -1,5 +1,6 @@ import { InvalidPayloadError } from '@botpress/client' import { llm, speechToText, textToImage } from '@botpress/common' +import { validateOpenAIReasoningEffort } from '@botpress/common/src/llm/openai' import crypto from 'crypto' import { TextToSpeechPricePer1MCharacters } from 'integration.definition' import OpenAI from 'openai' @@ -346,27 +347,13 @@ export default new bp.Integration({ if (input.reasoningEffort === undefined && isGPT5) { // GPT-5 is a hybrid model but it doesn't support optional reasoning, so if reasoning effort isn't specified we assume the user wants to use the least amount of reasoning possible (to reduce cost/latency). request.reasoning_effort = 'minimal' - } else if (input.reasoningEffort === 'none') { - const acceptedValues = SupportedReasoningEfforts.map((x) => `"${x}"`) - .map((x, i) => (i === SupportedReasoningEfforts.length - 1 ? `or ${x}` : x)) - .join(', ') - throw new InvalidPayloadError( - `Using "none" to disabling reasoning is not supported with OpenAI reasoning models, please use ${acceptedValues} instead or switch to a non-reasoning model` - ) - } else if (SupportedReasoningEfforts.includes(input.reasoningEffort as any)) { - request.reasoning_effort = input.reasoningEffort as ChatCompletionReasoningEffort } else { - request.reasoning_effort = 'medium' - logger - .forBot() - .info( - `Reasoning effort "${input.reasoningEffort}" is not supported by OpenAI, using "${request.reasoning_effort}" effort instead` - ) + request.reasoning_effort = validateOpenAIReasoningEffort(input, logger) } if (isGPT5) { // GPT-5 doesn't support stop sequences - request.stop = undefined + delete request.stop } // Reasoning models don't allow setting temperature diff --git a/package.json b/package.json index 60d1b1cc34c..f757278bb0f 100644 --- a/package.json +++ b/package.json @@ -7,14 +7,14 @@ "test": "vitest --run", "check:bplint": "turbo check:bplint", "check:dep": "depsynky check --ignore-dev", - "check:sherif": "sherif -i zod -i axios -i query-string -i googleapis -i @linear/sdk", + "check:sherif": "sherif -i zod -i axios -i query-string -i googleapis -i @linear/sdk -i openai", "check:format": "prettier --check .", "check:eslint": "eslint ./ --max-warnings=0", "check:oxlint": "oxlint -c .oxlintrc.json", "check:lint": "pnpm check:bplint && pnpm check:oxlint && pnpm check:eslint", "check:type": "turbo check:type", "fix:dep": "depsynky sync --ignore-dev", - "fix:sherif": "sherif -i zod -i axios -i query-string -i googleapis -i @linear/sdk --fix", + "fix:sherif": "sherif -i zod -i axios -i query-string -i googleapis -i @linear/sdk -i openai --fix", "fix:format": "prettier --write .", "fix:oxlint": "oxlint -c .oxlintrc.json --fix --fix-suggestions", "fix:lint": "eslint --fix ./", diff --git a/packages/cli/package.json b/packages/cli/package.json index 0ded0e009a1..1ed6f23463c 100644 --- a/packages/cli/package.json +++ b/packages/cli/package.json @@ -1,6 +1,6 @@ { "name": "@botpress/cli", - "version": "4.16.1", + "version": "4.16.2", "description": "Botpress CLI", "scripts": { "build": "pnpm run bundle && pnpm run template:gen", diff --git a/packages/cli/src/command-implementations/profile-commands.ts b/packages/cli/src/command-implementations/profile-commands.ts index 6ed09ae289d..4948c378c15 100644 --- a/packages/cli/src/command-implementations/profile-commands.ts +++ b/packages/cli/src/command-implementations/profile-commands.ts @@ -40,10 +40,13 @@ export class ListProfilesCommand extends GlobalCommand { public async run(): Promise { + const logSuccess = (profileName: string) => this.logger.success(`Now using profile "${profileName}"`) + if (this.argv.profileToUse) { const profile = await this.readProfileFromFS(this.argv.profileToUse) await this.globalCache.set('activeProfile', this.argv.profileToUse) await _updateGlobalCache({ globalCache: this.globalCache, profileName: this.argv.profileToUse, profile }) + logSuccess(this.argv.profileToUse) return } const profiles = await this.readProfilesFromFS() @@ -63,6 +66,7 @@ export class UseProfileCommand extends GlobalCommand `"${x}"`) + .map((x, i) => (i === GptOssSupportedReasoningEfforts.length - 1 ? `or ${x}` : x)) + .join(', ') + throw new InvalidPayloadError( + `Using "none" to disabling reasoning is not supported by ${input.model ? `the "${input.model?.id}" model` : 'this model'}, please use ${acceptedValues} instead or switch to a non-reasoning model` + ) + } + + if (GptOssSupportedReasoningEfforts.includes(input.reasoningEffort as any)) { + return input.reasoningEffort as ChatCompletionReasoningEffort + } else { + const reasoningEffortOverride: ChatCompletionReasoningEffort = 'medium' + logger + .forBot() + .info( + `Reasoning effort "${input.reasoningEffort}" is not supported by ${input.model ? `the "${input.model?.id}" model` : 'this model'}, using "${reasoningEffortOverride}" effort instead` + ) + return reasoningEffortOverride + } +} + +export function validateOpenAIReasoningEffort( + input: { reasoningEffort?: ReasoningEffort; model?: { id: string } }, + logger: IntegrationLogger +): ChatCompletionReasoningEffort | undefined { + // Reasoning efforts supported by commercial OpenAI models are the same as the GPT-OSS models at the moment, so we reuse the same validation logic. + return validateGptOssReasoningEffort(input, logger) +} diff --git a/packages/common/src/llm/schemas.ts b/packages/common/src/llm/schemas.ts index dc823ab6c56..78a4e26731f 100644 --- a/packages/common/src/llm/schemas.ts +++ b/packages/common/src/llm/schemas.ts @@ -81,6 +81,7 @@ export const ModelSchema = ModelRefSchema.extend({ }) const ReasoningEffortSchema = z.enum(['low', 'medium', 'high', 'dynamic', 'none']) +export type ReasoningEffort = z.infer export const GenerateContentInputSchema = (modelRefSchema: S) => z.object({ diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index ec655f145cd..5cf0d117bc7 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -477,8 +477,8 @@ importers: specifier: workspace:* version: link:../../packages/sdk openai: - specifier: ^4.86.1 - version: 4.86.1(zod@3.22.4) + specifier: ^5.12.1 + version: 5.12.1(zod@3.22.4) devDependencies: '@botpress/cli': specifier: workspace:* @@ -680,8 +680,8 @@ importers: specifier: workspace:* version: link:../../packages/sdk openai: - specifier: ^4.86.1 - version: 4.86.1(zod@3.22.4) + specifier: ^5.12.1 + version: 5.12.1(zod@3.22.4) devDependencies: '@botpress/cli': specifier: workspace:* @@ -927,8 +927,8 @@ importers: specifier: workspace:* version: link:../../packages/sdk openai: - specifier: ^4.86.1 - version: 4.86.1(zod@3.22.4) + specifier: ^5.12.1 + version: 5.12.1(zod@3.22.4) devDependencies: '@botpress/cli': specifier: workspace:* @@ -8615,12 +8615,6 @@ packages: resolution: {integrity: sha512-F0KIgDJfy2nA3zMLmWGKxcH2ZVEtCZXHHdOQs2gSaQ27+lNeEfGxzkIw90aXswATX7AZ33tahPbzy6KAfUreVw==} dev: false - /@types/node@18.19.76: - resolution: {integrity: sha512-yvR7Q9LdPz2vGpmpJX5LolrgRdWvB67MJKDPSgIIzpFbaf9a1j/f5DnLp5VDyHGMR0QZHlTr1afsD87QCXFHKw==} - dependencies: - undici-types: 5.26.5 - dev: false - /@types/node@22.16.4: resolution: {integrity: sha512-PYRhNtZdm2wH/NT2k/oAJ6/f2VD2N2Dag0lGlx2vWgMSJXGNmlce5MiTQzoWAiIJtso30mjnfQCOKVH+kAQC/g==} dependencies: @@ -9116,13 +9110,6 @@ packages: engines: {node: '>= 14'} dev: false - /agentkeepalive@4.5.0: - resolution: {integrity: sha512-5GG/5IbQQpC9FpkRGsSvZI5QYeSCzlJHdpBQntCsuTOxhKD8lqKhrleg2Yi7yvMIf82Ycmmqln9U8V9qwEiJew==} - engines: {node: '>= 8.0.0'} - dependencies: - humanize-ms: 1.2.1 - dev: false - /aggregate-error@3.1.0: resolution: {integrity: sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==} engines: {node: '>=8'} @@ -12085,10 +12072,6 @@ packages: resolution: {integrity: sha512-j0KLYPhm6zeac4lz3oJ3o65qvgQCcPubiyotZrXqEaG4hNagNYO8qdlUrX5vwqv9ohqeT/Z3j6+yW067yWWdUw==} dev: false - /form-data-encoder@1.7.2: - resolution: {integrity: sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==} - dev: false - /form-data@2.3.3: resolution: {integrity: sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==} engines: {node: '>= 0.12'} @@ -12133,14 +12116,6 @@ packages: es-set-tostringtag: 2.1.0 mime-types: 2.1.35 - /formdata-node@4.4.1: - resolution: {integrity: sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==} - engines: {node: '>= 12.20'} - dependencies: - node-domexception: 1.0.0 - web-streams-polyfill: 4.0.0-beta.3 - dev: false - /formdata-polyfill@4.0.10: resolution: {integrity: sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==} engines: {node: '>=12.20.0'} @@ -12891,12 +12866,6 @@ packages: resolution: {integrity: sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ==} engines: {node: '>=16.17.0'} - /humanize-ms@1.2.1: - resolution: {integrity: sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==} - dependencies: - ms: 2.1.3 - dev: false - /husky@9.1.7: resolution: {integrity: sha512-5gs5ytaNjBrh5Ow3zrvdUUY+0VxIuWVL4i9irt6friV+BqdCfmV11CQTWMiBYWHbXhco+J1kHfTOUkePhCDvMA==} engines: {node: '>=18'} @@ -15237,6 +15206,7 @@ packages: /node-domexception@1.0.0: resolution: {integrity: sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==} engines: {node: '>=10.5.0'} + dev: true /node-fetch@2.7.0: resolution: {integrity: sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==} @@ -15477,30 +15447,6 @@ packages: is-wsl: 2.2.0 dev: false - /openai@4.86.1(zod@3.22.4): - resolution: {integrity: sha512-x3iCLyaC3yegFVZaxOmrYJjitKxZ9hpVbLi+ZlT5UHuHTMlEQEbKXkGOM78z9qm2T5GF+XRUZCP2/aV4UPFPJQ==} - hasBin: true - peerDependencies: - ws: ^8.18.0 - zod: ^3.23.8 - peerDependenciesMeta: - ws: - optional: true - zod: - optional: true - dependencies: - '@types/node': 18.19.76 - '@types/node-fetch': 2.6.12 - abort-controller: 3.0.0 - agentkeepalive: 4.5.0 - form-data-encoder: 1.7.2 - formdata-node: 4.4.1 - node-fetch: 2.7.0 - zod: 3.22.4 - transitivePeerDependencies: - - encoding - dev: false - /openai@5.12.1(zod@3.22.4): resolution: {integrity: sha512-26s536j4Fi7P3iUma1S9H33WRrw0Qu8pJ2nYJHffrlKHPU0JK4d0r3NcMgqEcAeTdNLGYNyoFsqN4g4YE9vutg==} hasBin: true @@ -18098,10 +18044,6 @@ packages: resolution: {integrity: sha512-+A5Sja4HP1M08MaXya7p5LvjuM7K6q/2EaC0+iovj/wOcMsTzMvDFbasi/oSapiwOlt252IqsKqPjCl7huKS0A==} dev: false - /undici-types@5.26.5: - resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==} - dev: false - /undici-types@6.21.0: resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==} @@ -18545,11 +18487,6 @@ packages: engines: {node: '>= 8'} dev: true - /web-streams-polyfill@4.0.0-beta.3: - resolution: {integrity: sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==} - engines: {node: '>= 14'} - dev: false - /webidl-conversions@3.0.1: resolution: {integrity: sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==}