diff --git a/integrations/openai/integration.definition.ts b/integrations/openai/integration.definition.ts index ee72d97b67c..b7bf60ec8c1 100644 --- a/integrations/openai/integration.definition.ts +++ b/integrations/openai/integration.definition.ts @@ -18,7 +18,7 @@ export default new IntegrationDefinition({ title: 'OpenAI', description: 'Gain access to OpenAI models for text generation, speech synthesis, audio transcription, and image generation.', - version: '15.0.4', + version: '16.0.0', readme: 'hub.md', icon: 'icon.svg', entities: { diff --git a/integrations/openai/package.json b/integrations/openai/package.json index 6024fdfbd71..ec932d0d888 100644 --- a/integrations/openai/package.json +++ b/integrations/openai/package.json @@ -10,7 +10,7 @@ "@botpress/client": "workspace:*", "@botpress/common": "workspace:*", "@botpress/sdk": "workspace:*", - "openai": "^4.86.1" + "openai": "^5.12.1" }, "devDependencies": { "@botpress/cli": "workspace:*", diff --git a/integrations/openai/src/index.ts b/integrations/openai/src/index.ts index 03422795525..793b510d92b 100644 --- a/integrations/openai/src/index.ts +++ b/integrations/openai/src/index.ts @@ -3,8 +3,9 @@ import { llm, speechToText, textToImage } from '@botpress/common' import crypto from 'crypto' import { TextToSpeechPricePer1MCharacters } from 'integration.definition' import OpenAI from 'openai' -import { ChatCompletionReasoningEffort, ImageGenerateParams, Images } from 'openai/resources' +import { ImageGenerateParams, Images } from 'openai/resources' import { SpeechCreateParams } from 'openai/resources/audio/speech' +import { ChatCompletionReasoningEffort } from 'openai/resources/chat/completions' import { LanguageModelId, ImageModelId, SpeechToTextModelId } from './schemas' import * as bp from '.botpress' @@ -20,8 +21,50 @@ const DEFAULT_IMAGE_MODEL_ID: ImageModelId = 'dall-e-3-standard-1024' // https://openai.com/api/pricing/ const languageModels: Record = { // IMPORTANT: Only full model names should be supported here, as the short model names can be pointed by OpenAI at any time to a newer model with different pricing. + 'gpt-5-2025-08-07': { + name: 'GPT-5', + description: + "GPT-5 is OpenAI's latest and most advanced AI model. It is a reasoning model that chooses the best way to respond based on task complexity and user intent. GPT-5 delivers expert-level performance across coding, math, writing, health, and visual perception, with improved accuracy, speed, and reduced hallucinations. It excels in complex tasks, long-context understanding, multimodal inputs (text and images), and safe, nuanced responses.", + tags: ['recommended', 'reasoning', 'general-purpose'], + input: { + costPer1MTokens: 1.25, + maxTokens: 400_000, + }, + output: { + costPer1MTokens: 10, + maxTokens: 128_000, + }, + }, + 'gpt-5-mini-2025-08-07': { + name: 'GPT-5 Mini', + description: + 'GPT-5 Mini is a lightweight and cost-effective version of GPT-5, optimized for applications where speed and efficiency matter more than full advanced capabilities. It is designed for cost-sensitive use cases such as chatbots, content generation, and high-volume usage, striking a balance between performance and affordability, making it suitable for simpler tasks that do not require deep multi-step reasoning or the full reasoning power of GPT-5', + tags: ['recommended', 'reasoning', 'general-purpose'], + input: { + costPer1MTokens: 0.25, + maxTokens: 400_000, + }, + output: { + costPer1MTokens: 2, + maxTokens: 128_000, + }, + }, + 'gpt-5-nano-2025-08-07': { + name: 'GPT-5 Nano', + description: + 'GPT-5 Nano is an ultra-lightweight version of GPT-5 optimized for speed and very low latency, making it ideal for use cases like simple chatbots, basic content generation, summarization, and classification tasks.', + tags: ['low-cost', 'reasoning', 'general-purpose'], + input: { + costPer1MTokens: 0.05, + maxTokens: 400_000, + }, + output: { + costPer1MTokens: 0.4, + maxTokens: 128_000, + }, + }, 'o4-mini-2025-04-16': { - name: 'GPT o4-mini', + name: 'o4-mini', description: "o4-mini is OpenAI's latest small o-series model. It's optimized for fast, effective reasoning with exceptionally efficient performance in coding and visual tasks.", tags: ['reasoning', 'vision'], @@ -35,7 +78,7 @@ const languageModels: Record = { }, }, 'o3-2025-04-16': { - name: 'GPT o3', + name: 'o3', description: 'o3 is a well-rounded and powerful model across domains. It sets a new standard for math, science, coding, and visual reasoning tasks. It also excels at technical writing and instruction-following. Use it to think through multi-step problems that involve analysis across text, code, and images.', tags: ['reasoning', 'vision'], @@ -49,7 +92,7 @@ const languageModels: Record = { }, }, 'gpt-4.1-2025-04-14': { - name: 'GPT 4.1', + name: 'GPT-4.1', description: 'GPT 4.1 is our flagship model for complex tasks. It is well suited for problem solving across domains. The knowledge cutoff is June 2024.', tags: ['recommended', 'vision', 'general-purpose'], @@ -63,7 +106,7 @@ const languageModels: Record = { }, }, 'gpt-4.1-mini-2025-04-14': { - name: 'GPT 4.1 Mini', + name: 'GPT-4.1 Mini', description: 'GPT 4.1 mini provides a balance between intelligence, speed, and cost that makes it an attractive model for many use cases. The knowledge cutoff is June 2024.', tags: ['recommended', 'vision', 'general-purpose'], @@ -77,7 +120,7 @@ const languageModels: Record = { }, }, 'gpt-4.1-nano-2025-04-14': { - name: 'GPT 4.1 Nano', + name: 'GPT-4.1 Nano', description: 'GPT-4.1 nano is the fastest, most cost-effective GPT 4.1 model. The knowledge cutoff is June 2024.', tags: ['low-cost', 'vision', 'general-purpose'], input: { @@ -90,7 +133,7 @@ const languageModels: Record = { }, }, 'o3-mini-2025-01-31': { - name: 'GPT o3-mini', + name: 'o3-mini', description: 'o3-mini is the most recent small reasoning model from OpenAI, providing high intelligence at the same cost and latency targets of o1-mini. o3-mini also supports key developer features, like Structured Outputs, function calling, Batch API, and more. Like other models in the o-series, it is designed to excel at science, math, and coding tasks. The knowledge cutoff for o3-mini models is October, 2023.', tags: ['reasoning', 'general-purpose'], @@ -104,7 +147,7 @@ const languageModels: Record = { }, }, 'o1-2024-12-17': { - name: 'GPT o1', + name: 'o1', description: 'The o1 model is designed to solve hard problems across domains. The o1 series of models are trained with reinforcement learning to perform complex reasoning. o1 models think before they answer, producing a long internal chain of thought before responding to the user.', tags: ['reasoning', 'vision', 'general-purpose'], @@ -118,7 +161,7 @@ const languageModels: Record = { }, }, 'o1-mini-2024-09-12': { - name: 'GPT o1-mini', + name: 'o1-mini', description: 'The o1-mini model is a fast and affordable reasoning model for specialized tasks. The o1 series of models are trained with reinforcement learning to perform complex reasoning. o1 models think before they answer, producing a long internal chain of thought before responding to the user.', tags: ['reasoning', 'vision', 'general-purpose'], @@ -273,7 +316,7 @@ const SECONDS_IN_A_DAY = 24 * 60 * 60 const provider = 'OpenAI' -const SupportedReasoningEfforts = ['low', 'medium', 'high'] as ChatCompletionReasoningEffort[] +const SupportedReasoningEfforts = ['minimal', 'low', 'medium', 'high'] as ChatCompletionReasoningEffort[] export default new bp.Integration({ register: async () => {}, @@ -289,17 +332,28 @@ export default new bp.Integration({ models: languageModels, defaultModel: DEFAULT_LANGUAGE_MODEL_ID, overrideRequest: (request) => { - if (input.model?.id.startsWith('o1-') || input.model?.id.startsWith('o3-')) { - if (input.reasoningEffort === 'none') { + const isGPT5 = + input.model?.id === 'gpt-5-2025-08-07' || + input.model?.id === 'gpt-5-mini-2025-08-07' || + input.model?.id === 'gpt-5-nano-2025-08-07' + + const isOReasoningModel = + input.model?.id.startsWith('o1-') || + input.model?.id.startsWith('o3-') || + input.model?.id.startsWith('o4-') + + if (isGPT5 || isOReasoningModel) { + if (input.reasoningEffort === undefined && isGPT5) { + // GPT-5 is a hybrid model but it doesn't support optional reasoning, so if reasoning effort isn't specified we assume the user wants to use the least amount of reasoning possible (to reduce cost/latency). + request.reasoning_effort = 'minimal' + } else if (input.reasoningEffort === 'none') { const acceptedValues = SupportedReasoningEfforts.map((x) => `"${x}"`) .map((x, i) => (i === SupportedReasoningEfforts.length - 1 ? `or ${x}` : x)) .join(', ') throw new InvalidPayloadError( `Using "none" to disabling reasoning is not supported with OpenAI reasoning models, please use ${acceptedValues} instead or switch to a non-reasoning model` ) - } - - if (SupportedReasoningEfforts.includes(input.reasoningEffort as any)) { + } else if (SupportedReasoningEfforts.includes(input.reasoningEffort as any)) { request.reasoning_effort = input.reasoningEffort as ChatCompletionReasoningEffort } else { request.reasoning_effort = 'medium' @@ -310,7 +364,12 @@ export default new bp.Integration({ ) } - // The o1 models don't allow setting temperature + if (isGPT5) { + // GPT-5 doesn't support stop sequences + request.stop = undefined + } + + // Reasoning models don't allow setting temperature delete request.temperature } return request @@ -353,7 +412,7 @@ export default new bp.Integration({ response_format: 'url', }) - const temporaryImageUrl = result.data[0]?.url + const temporaryImageUrl = result.data?.[0]?.url if (!temporaryImageUrl) { throw new Error('No image was returned by OpenAI') } diff --git a/integrations/openai/src/schemas.ts b/integrations/openai/src/schemas.ts index c4e74d5c7a9..5db64d511b0 100644 --- a/integrations/openai/src/schemas.ts +++ b/integrations/openai/src/schemas.ts @@ -2,6 +2,9 @@ import { z } from '@botpress/sdk' export const languageModelId = z .enum([ + 'gpt-5-2025-08-07', + 'gpt-5-mini-2025-08-07', + 'gpt-5-nano-2025-08-07', 'o4-mini-2025-04-16', 'o3-2025-04-16', 'gpt-4.1-2025-04-14', diff --git a/integrations/zendesk/hub.md b/integrations/zendesk/hub.md index c473e1401b5..f28dc20d794 100644 --- a/integrations/zendesk/hub.md +++ b/integrations/zendesk/hub.md @@ -26,9 +26,3 @@ Password: `API_TOKEN` 3. Enable the integration to complete the setup. Once these steps are completed, your Zendesk articles will automatically sync to the specified knowledge base in Botpress. You can manually sync by using the "Sync KB" action. - -### HITL - -#### Via Channel Types - -On the "Start HITL" card you can use only "via channel" types documented by [Zendesk Docs](https://developer.zendesk.com/documentation/ticketing/reference-guides/via-types) diff --git a/integrations/zendesk/integration.definition.ts b/integrations/zendesk/integration.definition.ts index 7f993bb3be5..82001b41060 100644 --- a/integrations/zendesk/integration.definition.ts +++ b/integrations/zendesk/integration.definition.ts @@ -7,7 +7,7 @@ import { actions, events, configuration, channels, states, user } from './src/de export default new sdk.IntegrationDefinition({ name: 'zendesk', title: 'Zendesk', - version: '2.8.0', + version: '2.8.1', icon: 'icon.svg', description: 'Optimize your support workflow. Trigger workflows from ticket updates as well as manage tickets, access conversations, and engage with customers.', @@ -27,13 +27,6 @@ export default new sdk.IntegrationDefinition({ .title('Ticket Priority') .describe('Priority of the ticket. Leave empty for default priority.') .optional(), - viaChannel: sdk.z - .string() - .title('Via Channel') - .describe( - 'Via Channel to use (example: "whatsapp", "instagram_dm" ), only use values documented by Zendesk, check the "Info" tab at the Zendesk integration configuration page for more details. Leave empty or use an invalid channel type and you will get "API".' - ) - .optional(), chatbotName: sdk.z .string() .title('Chatbot Name') diff --git a/integrations/zendesk/src/actions/hitl.ts b/integrations/zendesk/src/actions/hitl.ts index 7869b3d43cb..5bd26d2c8dc 100644 --- a/integrations/zendesk/src/actions/hitl.ts +++ b/integrations/zendesk/src/actions/hitl.ts @@ -6,8 +6,6 @@ import * as bp from '.botpress' export const startHitl: bp.IntegrationProps['actions']['startHitl'] = async (props) => { const { ctx, input, client } = props - const { viaChannel, priority } = input.hitlSession || {} - const downstreamBotpressUser = await client.getUser({ id: ctx.botUserId }) const chatbotName = input.hitlSession?.chatbotName ?? downstreamBotpressUser.user.name ?? 'Botpress' const chatbotPhotoUrl = @@ -29,10 +27,7 @@ export const startHitl: bp.IntegrationProps['actions']['startHitl'] = async (pro id: zendeskBotpressUser, }, { - priority, - via: { - channel: viaChannel, - }, + priority: input.hitlSession?.priority, } ) diff --git a/packages/cli/package.json b/packages/cli/package.json index e19b7b53a5d..0ded0e009a1 100644 --- a/packages/cli/package.json +++ b/packages/cli/package.json @@ -1,6 +1,6 @@ { "name": "@botpress/cli", - "version": "4.15.2", + "version": "4.16.1", "description": "Botpress CLI", "scripts": { "build": "pnpm run bundle && pnpm run template:gen", diff --git a/packages/cli/src/command-definitions.ts b/packages/cli/src/command-definitions.ts index dfe60558843..be2a2729ca6 100644 --- a/packages/cli/src/command-definitions.ts +++ b/packages/cli/src/command-definitions.ts @@ -48,4 +48,18 @@ export default { dev: { description: 'Run your project in dev mode', schema: config.schemas.dev }, lint: { description: 'EXPERIMENTAL: Lint an integration definition', schema: config.schemas.lint }, chat: { description: 'EXPERIMENTAL: Chat with a bot directly from the CLI', schema: config.schemas.chat }, + profiles: { + description: 'Commands for using CLI profiles', + subcommands: { + list: { description: 'List all available profiles', schema: config.schemas.listProfiles, alias: 'ls' }, + active: { + description: 'Get the profile properties you are currently using', + schema: config.schemas.activeProfile, + }, + use: { + description: 'Set the current profile', + schema: config.schemas.useProfile, + }, + }, + }, } satisfies DefinitionTree diff --git a/packages/cli/src/command-implementations/global-command.ts b/packages/cli/src/command-implementations/global-command.ts index ab5a47b926a..39a966856a8 100644 --- a/packages/cli/src/command-implementations/global-command.ts +++ b/packages/cli/src/command-implementations/global-command.ts @@ -14,7 +14,7 @@ import * as utils from '../utils' import { BaseCommand } from './base-command' export type GlobalCommandDefinition = CommandDefinition -export type GlobalCache = { apiUrl: string; token: string; workspaceId: string } +export type GlobalCache = { apiUrl: string; token: string; workspaceId: string; activeProfile: string } export type ConfigurableGlobalPaths = { botpressHomeDir: string @@ -25,7 +25,7 @@ export type ConstantGlobalPaths = typeof consts.fromHomeDir & typeof consts.from export type AllGlobalPaths = ConfigurableGlobalPaths & ConstantGlobalPaths const profileCredentialSchema = z.object({ apiUrl: z.string(), workspaceId: z.string(), token: z.string() }) -type ProfileCredentials = z.infer +export type ProfileCredentials = z.infer class GlobalPaths extends utils.path.PathStore { public constructor(argv: CommandArgv) { @@ -93,7 +93,7 @@ export abstract class GlobalCommand extends B 'You are currently using credential command line arguments or environment variables as well as a profile. Your profile has overwritten the variables' ) } - ;({ token, workspaceId, apiUrl } = await this._readProfileFromFS(this.argv.profile)) + ;({ token, workspaceId, apiUrl } = await this.readProfileFromFS(this.argv.profile)) this.logger.log(`Using profile "${this.argv.profile}"`, { prefix: '👤' }) } else { token = credentials.token ?? (await cache.get('token')) @@ -112,26 +112,48 @@ export abstract class GlobalCommand extends B return this.api.newClient({ apiUrl, token, workspaceId }, this.logger) } - private async _readProfileFromFS(profile: string): Promise { + protected async readProfileFromFS(profile: string): Promise { + const parsedProfiles = await this.readProfilesFromFS() + + const profileData = parsedProfiles[profile] + if (!profileData) { + throw new errors.BotpressCLIError( + `Profile "${profile}" not found in "${this.globalPaths.abs.profilesPath}". Found profiles '${Object.keys(parsedProfiles).join("', '")}'.` + ) + } + + return profileData + } + + protected async readProfilesFromFS(): Promise> { if (!fs.existsSync(this.globalPaths.abs.profilesPath)) { throw new errors.BotpressCLIError(`Profile file not found at "${this.globalPaths.abs.profilesPath}"`) } const fileContent = await fs.promises.readFile(this.globalPaths.abs.profilesPath, 'utf-8') const parsedProfiles = JSON.parse(fileContent) - const zodParseResult = z.record(profileCredentialSchema).safeParse(parsedProfiles, {}) + const zodParseResult = z.record(profileCredentialSchema).safeParse(parsedProfiles) if (!zodParseResult.success) { throw errors.BotpressCLIError.wrap(zodParseResult.error, 'Error parsing profiles: ') } - const profileData = parsedProfiles[profile] - if (!profileData) { - throw new errors.BotpressCLIError( - `Profile "${profile}" not found in "${this.globalPaths.abs.profilesPath}". Found profiles '${Object.keys(parsedProfiles).join("', '")}'.` - ) + return zodParseResult.data + } + + protected async writeProfileToFS(profileName: string, profile: ProfileCredentials): Promise { + let profiles: Record + if (fs.existsSync(this.globalPaths.abs.profilesPath)) { + profiles = await this.readProfilesFromFS() + } else { + profiles = {} } + profiles[profileName] = profile - return parsedProfiles[profile] + await fs.promises.writeFile( + this.globalPaths.abs.profilesPath, + JSON.stringify({ [consts.defaultProfileName]: profiles.defaultProfileName, ...profiles }, null, 2), + 'utf-8' + ) } protected async ensureLoginAndCreateClient(credentials: YargsConfig) { diff --git a/packages/cli/src/command-implementations/index.ts b/packages/cli/src/command-implementations/index.ts index bebc42a90a8..79150846e1f 100644 --- a/packages/cli/src/command-implementations/index.ts +++ b/packages/cli/src/command-implementations/index.ts @@ -21,6 +21,7 @@ import { LintCommand } from './lint-command' import { LoginCommand } from './login-command' import { LogoutCommand } from './logout-command' import * as plugins from './plugin-commands' +import * as profiles from './profile-commands' import { ReadCommand } from './read-command' import { ServeCommand } from './serve-command' @@ -79,4 +80,11 @@ export default { dev: getHandler(DevCommand), lint: getHandler(LintCommand), chat: getHandler(ChatCommand), + profiles: { + subcommands: { + list: getHandler(profiles.ListProfilesCommand), + active: getHandler(profiles.ActiveProfileCommand), + use: getHandler(profiles.UseProfileCommand), + }, + }, } satisfies ImplementationTree diff --git a/packages/cli/src/command-implementations/init-command.ts b/packages/cli/src/command-implementations/init-command.ts index ef8d10fa4b7..4d6a2e9c397 100644 --- a/packages/cli/src/command-implementations/init-command.ts +++ b/packages/cli/src/command-implementations/init-command.ts @@ -21,21 +21,29 @@ export class InitCommand extends GlobalCommand { const projectType = await this._promptProjectType() const workDir = utils.path.absoluteFrom(utils.path.cwd(), this.argv.workDir) - if (projectType === 'bot') { - await this._initBot({ workDir }) - return - } + try { + if (projectType === 'bot') { + await this._initBot({ workDir }) + return + } - if (projectType === 'integration') { - const workspaceHandle = await this._promptWorkspaceHandle() - await this._initIntegration({ workDir, workspaceHandle }) - return - } + if (projectType === 'integration') { + const workspaceHandle = await this._promptWorkspaceHandle() + await this._initIntegration({ workDir, workspaceHandle }) + return + } - if (projectType === 'plugin') { - const workspaceHandle = await this._promptWorkspaceHandle() - await this._initPlugin({ workDir, workspaceHandle }) - return + if (projectType === 'plugin') { + const workspaceHandle = await this._promptWorkspaceHandle() + await this._initPlugin({ workDir, workspaceHandle }) + return + } + } catch (error) { + if (error instanceof errors.AbortedOperationError) { + this.logger.log(error.message) + return + } + throw error } type _assertion = utils.types.AssertNever @@ -80,22 +88,14 @@ export class InitCommand extends GlobalCommand { const name = await this._getName('plugin', template.defaultProjectName) const { fullName, shortName } = this._getFullNameAndShortName({ workspaceHandle, name }) - try { - await this._copy({ - srcDir: template.absolutePath, - destDir: workDir, - name: shortName, - pkgJson: { - pluginName: fullName, - }, - }) - } catch (error) { - if (error instanceof errors.AbortedOperationError) { - this.logger.log('Aborted') - return - } - throw error - } + await this._copy({ + srcDir: template.absolutePath, + destDir: workDir, + name: shortName, + pkgJson: { + pluginName: fullName, + }, + }) this.logger.success(`Plugin project initialized in ${chalk.bold(pathlib.join(workDir, shortName))}`) } diff --git a/packages/cli/src/command-implementations/login-command.ts b/packages/cli/src/command-implementations/login-command.ts index f1d2e1cb39f..0e836c91931 100644 --- a/packages/cli/src/command-implementations/login-command.ts +++ b/packages/cli/src/command-implementations/login-command.ts @@ -1,12 +1,32 @@ import * as client from '@botpress/client' +import * as fs from 'fs' import * as paging from '../api/paging' import type commandDefinitions from '../command-definitions' +import * as consts from '../consts' import * as errors from '../errors' import { GlobalCommand } from './global-command' export type LoginCommandDefinition = typeof commandDefinitions.login export class LoginCommand extends GlobalCommand { public async run(): Promise { + let profileName = consts.defaultProfileName + if (this.argv.profile) { + let profileExists: boolean = false + if (fs.existsSync(this.globalPaths.abs.profilesPath)) { + const profiles = await this.readProfilesFromFS() + profileExists = profiles[this.argv.profile] !== undefined + } + if (profileExists) { + const overwrite = await this.prompt.confirm( + `This command will overwrite the existing profile '${this.argv.profile}'. Do you want to continue?` + ) + if (!overwrite) throw new errors.AbortedOperationError() + } else { + this.logger.log(`This command will create new profile '${this.argv.profile}'`, { prefix: 'â„šī¸Ž' }) + } + profileName = this.argv.profile + } + const promptedToken = await this.globalCache.sync('token', this.argv.token, async (previousToken) => { const prompted = await this.prompt.text('Enter your Personal Access Token', { initial: previousToken, @@ -18,7 +38,9 @@ export class LoginCommand extends GlobalCommand { return prompted }) - + if (this.argv.apiUrl !== consts.defaultBotpressApiUrl) { + this.logger.log(`Using custom api url ${this.argv.apiUrl} to try fetching workspaces`, { prefix: '🔗' }) + } const promptedWorkspaceId = await this.globalCache.sync('workspaceId', this.argv.workspaceId, async (defaultId) => { const tmpClient = new client.Client({ apiUrl: this.argv.apiUrl, token: promptedToken }) // no workspaceId yet const userWorkspaces = await paging @@ -56,6 +78,12 @@ export class LoginCommand extends GlobalCommand { throw errors.BotpressCLIError.wrap(thrown, 'Login failed. Please check your credentials') }) + await this.writeProfileToFS(profileName, { + apiUrl: this.argv.apiUrl, + token: promptedToken, + workspaceId: promptedWorkspaceId, + }) + this.logger.success('Logged In') } } diff --git a/packages/cli/src/command-implementations/profile-commands.ts b/packages/cli/src/command-implementations/profile-commands.ts new file mode 100644 index 00000000000..6ed09ae289d --- /dev/null +++ b/packages/cli/src/command-implementations/profile-commands.ts @@ -0,0 +1,78 @@ +import chalk from 'chalk' +import type commandDefinitions from '../command-definitions' +import * as consts from '../consts' +import * as errors from '../errors' +import * as utils from '../utils' +import { GlobalCache, GlobalCommand, ProfileCredentials } from './global-command' + +export type ActiveProfileCommandDefinition = typeof commandDefinitions.profiles.subcommands.active +export class ActiveProfileCommand extends GlobalCommand { + public async run(): Promise { + let activeProfileName = await this.globalCache.get('activeProfile') + + if (!activeProfileName) { + this.logger.log(`No active profile set, defaulting to ${consts.defaultProfileName}`) + activeProfileName = consts.defaultProfileName + await this.globalCache.set('activeProfile', activeProfileName) + } + + const profile = await this.readProfileFromFS(activeProfileName) + this.logger.log('Active profile:') + this.logger.json({ [activeProfileName]: profile }) + } +} + +export type ListProfilesCommandDefinition = typeof commandDefinitions.profiles.subcommands.list +export class ListProfilesCommand extends GlobalCommand { + public async run(): Promise { + const profiles = await this.readProfilesFromFS() + if (Object.keys(profiles).length === 0) { + this.logger.log('No profiles found') + return + } + const activeProfileName = await this.globalCache.get('activeProfile') + const profileNames = Object.keys(profiles) + this.logger.log(`Active profile: '${chalk.bold(activeProfileName)}'`) + this.logger.json(profileNames) + } +} + +export type UseProfileCommandDefinition = typeof commandDefinitions.profiles.subcommands.use +export class UseProfileCommand extends GlobalCommand { + public async run(): Promise { + if (this.argv.profileToUse) { + const profile = await this.readProfileFromFS(this.argv.profileToUse) + await this.globalCache.set('activeProfile', this.argv.profileToUse) + await _updateGlobalCache({ globalCache: this.globalCache, profileName: this.argv.profileToUse, profile }) + return + } + const profiles = await this.readProfilesFromFS() + const choices = Object.entries(profiles).map(([profileName, _]) => ({ + title: profileName, + description: '', + value: profileName, + })) + const selectedProfile = await this.prompt.select('Select the profile you want to use.', { choices }) + + if (!selectedProfile) { + this.logger.log('No profile selected, aborting.') + return + } + + const profile = profiles[selectedProfile] + if (!profile) throw new errors.BotpressCLIError('The selected profile could not be read') + await this.globalCache.set('activeProfile', selectedProfile) + await _updateGlobalCache({ globalCache: this.globalCache, profileName: selectedProfile, profile }) + } +} + +const _updateGlobalCache = async (props: { + globalCache: utils.cache.FSKeyValueCache + profileName: string + profile: ProfileCredentials +}): Promise => { + await props.globalCache.set('activeProfile', props.profileName) + await props.globalCache.set('apiUrl', props.profile.apiUrl) + await props.globalCache.set('token', props.profile.token) + await props.globalCache.set('workspaceId', props.profile.workspaceId) +} diff --git a/packages/cli/src/config.ts b/packages/cli/src/config.ts index bbe020674fb..433308ae327 100644 --- a/packages/cli/src/config.ts +++ b/packages/cli/src/config.ts @@ -127,7 +127,8 @@ const globalSchema = { }, profile: { type: 'string', - description: 'The CLI profile defined in the $BP_BOTPRESS_HOME/profiles.json json format file', + description: 'The CLI profile defined in the $BP_BOTPRESS_HOME/profiles.json', + alias: 'p', }, } satisfies CommandSchema @@ -362,6 +363,24 @@ const chatSchema = { }, } satisfies CommandSchema +const listProfilesSchema = { + ...globalSchema, +} satisfies CommandSchema + +const activeProfileSchema = { + ...globalSchema, +} satisfies CommandSchema + +const useProfileSchema = { + ...globalSchema, + profileToUse: { + type: 'string', + description: 'The CLI profile defined in the $BP_BOTPRESS_HOME/profiles.json', + positional: true, + idx: 0, + }, +} satisfies CommandSchema + // exports export const schemas = { @@ -395,4 +414,7 @@ export const schemas = { dev: devSchema, lint: lintSchema, chat: chatSchema, + listProfiles: listProfilesSchema, + activeProfile: activeProfileSchema, + useProfile: useProfileSchema, } as const diff --git a/packages/cli/src/consts.ts b/packages/cli/src/consts.ts index bd6dbd31a1e..0b21003def5 100644 --- a/packages/cli/src/consts.ts +++ b/packages/cli/src/consts.ts @@ -19,6 +19,7 @@ export const installDirName = 'bp_modules' export const outDirName = '.botpress' export const distDirName = 'dist' export const profileFileName = 'profiles.json' +export const defaultProfileName = 'default' export const fromCliRootDir = {} diff --git a/packages/common/package.json b/packages/common/package.json index 69226e310e6..edfd6c7e1c7 100644 --- a/packages/common/package.json +++ b/packages/common/package.json @@ -10,7 +10,7 @@ "@botpress/sdk": "workspace:*", "dedent": "^1.6.0", "marked": "^15.0.1", - "openai": "^4.86.1", + "openai": "^5.12.1", "preact": "^10.26.6", "preact-render-to-string": "^6.5.13" } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index d9094406d0e..ec655f145cd 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1208,8 +1208,8 @@ importers: specifier: workspace:* version: link:../../packages/sdk openai: - specifier: ^4.86.1 - version: 4.86.1(zod@3.22.4) + specifier: ^5.12.1 + version: 5.12.1(zod@3.22.4) devDependencies: '@botpress/cli': specifier: workspace:* @@ -2297,8 +2297,8 @@ importers: specifier: ^15.0.1 version: 15.0.1 openai: - specifier: ^4.86.1 - version: 4.86.1(zod@3.22.4) + specifier: ^5.12.1 + version: 5.12.1(zod@3.22.4) preact: specifier: ^10.26.6 version: 10.26.6 @@ -15501,6 +15501,21 @@ packages: - encoding dev: false + /openai@5.12.1(zod@3.22.4): + resolution: {integrity: sha512-26s536j4Fi7P3iUma1S9H33WRrw0Qu8pJ2nYJHffrlKHPU0JK4d0r3NcMgqEcAeTdNLGYNyoFsqN4g4YE9vutg==} + hasBin: true + peerDependencies: + ws: ^8.18.0 + zod: ^3.23.8 + peerDependenciesMeta: + ws: + optional: true + zod: + optional: true + dependencies: + zod: 3.22.4 + dev: false + /openapi-types@12.1.3: resolution: {integrity: sha512-N4YtSYJqghVu4iek2ZUvcN/0aqH1kRDuNqzcycDxhOUpg7GdvLa2F3DgS6yBNhInhv2r/6I0Flkn7CqL8+nIcw==}