Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion integrations/openai/integration.definition.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ export default new IntegrationDefinition({
title: 'OpenAI',
description:
'Gain access to OpenAI models for text generation, speech synthesis, audio transcription, and image generation.',
version: '15.0.4',
version: '16.0.0',
readme: 'hub.md',
icon: 'icon.svg',
entities: {
Expand Down
2 changes: 1 addition & 1 deletion integrations/openai/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
"@botpress/client": "workspace:*",
"@botpress/common": "workspace:*",
"@botpress/sdk": "workspace:*",
"openai": "^4.86.1"
"openai": "^5.12.1"
},
"devDependencies": {
"@botpress/cli": "workspace:*",
Expand Down
93 changes: 76 additions & 17 deletions integrations/openai/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,9 @@ import { llm, speechToText, textToImage } from '@botpress/common'
import crypto from 'crypto'
import { TextToSpeechPricePer1MCharacters } from 'integration.definition'
import OpenAI from 'openai'
import { ChatCompletionReasoningEffort, ImageGenerateParams, Images } from 'openai/resources'
import { ImageGenerateParams, Images } from 'openai/resources'
import { SpeechCreateParams } from 'openai/resources/audio/speech'
import { ChatCompletionReasoningEffort } from 'openai/resources/chat/completions'
import { LanguageModelId, ImageModelId, SpeechToTextModelId } from './schemas'
import * as bp from '.botpress'

Expand All @@ -20,8 +21,50 @@ const DEFAULT_IMAGE_MODEL_ID: ImageModelId = 'dall-e-3-standard-1024'
// https://openai.com/api/pricing/
const languageModels: Record<LanguageModelId, llm.ModelDetails> = {
// IMPORTANT: Only full model names should be supported here, as the short model names can be pointed by OpenAI at any time to a newer model with different pricing.
'gpt-5-2025-08-07': {
name: 'GPT-5',
description:
"GPT-5 is OpenAI's latest and most advanced AI model. It is a reasoning model that chooses the best way to respond based on task complexity and user intent. GPT-5 delivers expert-level performance across coding, math, writing, health, and visual perception, with improved accuracy, speed, and reduced hallucinations. It excels in complex tasks, long-context understanding, multimodal inputs (text and images), and safe, nuanced responses.",
tags: ['recommended', 'reasoning', 'general-purpose'],
input: {
costPer1MTokens: 1.25,
maxTokens: 400_000,
},
output: {
costPer1MTokens: 10,
maxTokens: 128_000,
},
},
'gpt-5-mini-2025-08-07': {
name: 'GPT-5 Mini',
description:
'GPT-5 Mini is a lightweight and cost-effective version of GPT-5, optimized for applications where speed and efficiency matter more than full advanced capabilities. It is designed for cost-sensitive use cases such as chatbots, content generation, and high-volume usage, striking a balance between performance and affordability, making it suitable for simpler tasks that do not require deep multi-step reasoning or the full reasoning power of GPT-5',
tags: ['recommended', 'reasoning', 'general-purpose'],
input: {
costPer1MTokens: 0.25,
maxTokens: 400_000,
},
output: {
costPer1MTokens: 2,
maxTokens: 128_000,
},
},
'gpt-5-nano-2025-08-07': {
name: 'GPT-5 Nano',
description:
'GPT-5 Nano is an ultra-lightweight version of GPT-5 optimized for speed and very low latency, making it ideal for use cases like simple chatbots, basic content generation, summarization, and classification tasks.',
tags: ['low-cost', 'reasoning', 'general-purpose'],
input: {
costPer1MTokens: 0.05,
maxTokens: 400_000,
},
output: {
costPer1MTokens: 0.4,
maxTokens: 128_000,
},
},
'o4-mini-2025-04-16': {
name: 'GPT o4-mini',
name: 'o4-mini',
description:
"o4-mini is OpenAI's latest small o-series model. It's optimized for fast, effective reasoning with exceptionally efficient performance in coding and visual tasks.",
tags: ['reasoning', 'vision'],
Expand All @@ -35,7 +78,7 @@ const languageModels: Record<LanguageModelId, llm.ModelDetails> = {
},
},
'o3-2025-04-16': {
name: 'GPT o3',
name: 'o3',
description:
'o3 is a well-rounded and powerful model across domains. It sets a new standard for math, science, coding, and visual reasoning tasks. It also excels at technical writing and instruction-following. Use it to think through multi-step problems that involve analysis across text, code, and images.',
tags: ['reasoning', 'vision'],
Expand All @@ -49,7 +92,7 @@ const languageModels: Record<LanguageModelId, llm.ModelDetails> = {
},
},
'gpt-4.1-2025-04-14': {
name: 'GPT 4.1',
name: 'GPT-4.1',
description:
'GPT 4.1 is our flagship model for complex tasks. It is well suited for problem solving across domains. The knowledge cutoff is June 2024.',
tags: ['recommended', 'vision', 'general-purpose'],
Expand All @@ -63,7 +106,7 @@ const languageModels: Record<LanguageModelId, llm.ModelDetails> = {
},
},
'gpt-4.1-mini-2025-04-14': {
name: 'GPT 4.1 Mini',
name: 'GPT-4.1 Mini',
description:
'GPT 4.1 mini provides a balance between intelligence, speed, and cost that makes it an attractive model for many use cases. The knowledge cutoff is June 2024.',
tags: ['recommended', 'vision', 'general-purpose'],
Expand All @@ -77,7 +120,7 @@ const languageModels: Record<LanguageModelId, llm.ModelDetails> = {
},
},
'gpt-4.1-nano-2025-04-14': {
name: 'GPT 4.1 Nano',
name: 'GPT-4.1 Nano',
description: 'GPT-4.1 nano is the fastest, most cost-effective GPT 4.1 model. The knowledge cutoff is June 2024.',
tags: ['low-cost', 'vision', 'general-purpose'],
input: {
Expand All @@ -90,7 +133,7 @@ const languageModels: Record<LanguageModelId, llm.ModelDetails> = {
},
},
'o3-mini-2025-01-31': {
name: 'GPT o3-mini',
name: 'o3-mini',
description:
'o3-mini is the most recent small reasoning model from OpenAI, providing high intelligence at the same cost and latency targets of o1-mini. o3-mini also supports key developer features, like Structured Outputs, function calling, Batch API, and more. Like other models in the o-series, it is designed to excel at science, math, and coding tasks. The knowledge cutoff for o3-mini models is October, 2023.',
tags: ['reasoning', 'general-purpose'],
Expand All @@ -104,7 +147,7 @@ const languageModels: Record<LanguageModelId, llm.ModelDetails> = {
},
},
'o1-2024-12-17': {
name: 'GPT o1',
name: 'o1',
description:
'The o1 model is designed to solve hard problems across domains. The o1 series of models are trained with reinforcement learning to perform complex reasoning. o1 models think before they answer, producing a long internal chain of thought before responding to the user.',
tags: ['reasoning', 'vision', 'general-purpose'],
Expand All @@ -118,7 +161,7 @@ const languageModels: Record<LanguageModelId, llm.ModelDetails> = {
},
},
'o1-mini-2024-09-12': {
name: 'GPT o1-mini',
name: 'o1-mini',
description:
'The o1-mini model is a fast and affordable reasoning model for specialized tasks. The o1 series of models are trained with reinforcement learning to perform complex reasoning. o1 models think before they answer, producing a long internal chain of thought before responding to the user.',
tags: ['reasoning', 'vision', 'general-purpose'],
Expand Down Expand Up @@ -273,7 +316,7 @@ const SECONDS_IN_A_DAY = 24 * 60 * 60

const provider = 'OpenAI'

const SupportedReasoningEfforts = ['low', 'medium', 'high'] as ChatCompletionReasoningEffort[]
const SupportedReasoningEfforts = ['minimal', 'low', 'medium', 'high'] as ChatCompletionReasoningEffort[]

export default new bp.Integration({
register: async () => {},
Expand All @@ -289,17 +332,28 @@ export default new bp.Integration({
models: languageModels,
defaultModel: DEFAULT_LANGUAGE_MODEL_ID,
overrideRequest: (request) => {
if (input.model?.id.startsWith('o1-') || input.model?.id.startsWith('o3-')) {
if (input.reasoningEffort === 'none') {
const isGPT5 =
input.model?.id === 'gpt-5-2025-08-07' ||
input.model?.id === 'gpt-5-mini-2025-08-07' ||
input.model?.id === 'gpt-5-nano-2025-08-07'

const isOReasoningModel =
input.model?.id.startsWith('o1-') ||
input.model?.id.startsWith('o3-') ||
input.model?.id.startsWith('o4-')

if (isGPT5 || isOReasoningModel) {
if (input.reasoningEffort === undefined && isGPT5) {
// GPT-5 is a hybrid model but it doesn't support optional reasoning, so if reasoning effort isn't specified we assume the user wants to use the least amount of reasoning possible (to reduce cost/latency).
request.reasoning_effort = 'minimal'
} else if (input.reasoningEffort === 'none') {
const acceptedValues = SupportedReasoningEfforts.map((x) => `"${x}"`)
.map((x, i) => (i === SupportedReasoningEfforts.length - 1 ? `or ${x}` : x))
.join(', ')
throw new InvalidPayloadError(
`Using "none" to disabling reasoning is not supported with OpenAI reasoning models, please use ${acceptedValues} instead or switch to a non-reasoning model`
)
}

if (SupportedReasoningEfforts.includes(input.reasoningEffort as any)) {
} else if (SupportedReasoningEfforts.includes(input.reasoningEffort as any)) {
request.reasoning_effort = input.reasoningEffort as ChatCompletionReasoningEffort
} else {
request.reasoning_effort = 'medium'
Expand All @@ -310,7 +364,12 @@ export default new bp.Integration({
)
}

// The o1 models don't allow setting temperature
if (isGPT5) {
// GPT-5 doesn't support stop sequences
request.stop = undefined
}

// Reasoning models don't allow setting temperature
delete request.temperature
}
return request
Expand Down Expand Up @@ -353,7 +412,7 @@ export default new bp.Integration({
response_format: 'url',
})

const temporaryImageUrl = result.data[0]?.url
const temporaryImageUrl = result.data?.[0]?.url
if (!temporaryImageUrl) {
throw new Error('No image was returned by OpenAI')
}
Expand Down
3 changes: 3 additions & 0 deletions integrations/openai/src/schemas.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@ import { z } from '@botpress/sdk'

export const languageModelId = z
.enum([
'gpt-5-2025-08-07',
'gpt-5-mini-2025-08-07',
'gpt-5-nano-2025-08-07',
'o4-mini-2025-04-16',
'o3-2025-04-16',
'gpt-4.1-2025-04-14',
Expand Down
6 changes: 0 additions & 6 deletions integrations/zendesk/hub.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,3 @@ Password: `API_TOKEN`
3. Enable the integration to complete the setup.

Once these steps are completed, your Zendesk articles will automatically sync to the specified knowledge base in Botpress. You can manually sync by using the "Sync KB" action.

### HITL

#### Via Channel Types

On the "Start HITL" card you can use only "via channel" types documented by [Zendesk Docs](https://developer.zendesk.com/documentation/ticketing/reference-guides/via-types)
9 changes: 1 addition & 8 deletions integrations/zendesk/integration.definition.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ import { actions, events, configuration, channels, states, user } from './src/de
export default new sdk.IntegrationDefinition({
name: 'zendesk',
title: 'Zendesk',
version: '2.8.0',
version: '2.8.1',
icon: 'icon.svg',
description:
'Optimize your support workflow. Trigger workflows from ticket updates as well as manage tickets, access conversations, and engage with customers.',
Expand All @@ -27,13 +27,6 @@ export default new sdk.IntegrationDefinition({
.title('Ticket Priority')
.describe('Priority of the ticket. Leave empty for default priority.')
.optional(),
viaChannel: sdk.z
.string()
.title('Via Channel')
.describe(
'Via Channel to use (example: "whatsapp", "instagram_dm" ), only use values documented by Zendesk, check the "Info" tab at the Zendesk integration configuration page for more details. Leave empty or use an invalid channel type and you will get "API".'
)
.optional(),
chatbotName: sdk.z
.string()
.title('Chatbot Name')
Expand Down
7 changes: 1 addition & 6 deletions integrations/zendesk/src/actions/hitl.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,6 @@ import * as bp from '.botpress'
export const startHitl: bp.IntegrationProps['actions']['startHitl'] = async (props) => {
const { ctx, input, client } = props

const { viaChannel, priority } = input.hitlSession || {}

const downstreamBotpressUser = await client.getUser({ id: ctx.botUserId })
const chatbotName = input.hitlSession?.chatbotName ?? downstreamBotpressUser.user.name ?? 'Botpress'
const chatbotPhotoUrl =
Expand All @@ -29,10 +27,7 @@ export const startHitl: bp.IntegrationProps['actions']['startHitl'] = async (pro
id: zendeskBotpressUser,
},
{
priority,
via: {
channel: viaChannel,
},
priority: input.hitlSession?.priority,
}
)

Expand Down
2 changes: 1 addition & 1 deletion packages/cli/package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "@botpress/cli",
"version": "4.15.2",
"version": "4.16.1",
"description": "Botpress CLI",
"scripts": {
"build": "pnpm run bundle && pnpm run template:gen",
Expand Down
14 changes: 14 additions & 0 deletions packages/cli/src/command-definitions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -48,4 +48,18 @@ export default {
dev: { description: 'Run your project in dev mode', schema: config.schemas.dev },
lint: { description: 'EXPERIMENTAL: Lint an integration definition', schema: config.schemas.lint },
chat: { description: 'EXPERIMENTAL: Chat with a bot directly from the CLI', schema: config.schemas.chat },
profiles: {
description: 'Commands for using CLI profiles',
subcommands: {
list: { description: 'List all available profiles', schema: config.schemas.listProfiles, alias: 'ls' },
active: {
description: 'Get the profile properties you are currently using',
schema: config.schemas.activeProfile,
},
use: {
description: 'Set the current profile',
schema: config.schemas.useProfile,
},
},
},
} satisfies DefinitionTree
44 changes: 33 additions & 11 deletions packages/cli/src/command-implementations/global-command.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ import * as utils from '../utils'
import { BaseCommand } from './base-command'

export type GlobalCommandDefinition = CommandDefinition<typeof config.schemas.global>
export type GlobalCache = { apiUrl: string; token: string; workspaceId: string }
export type GlobalCache = { apiUrl: string; token: string; workspaceId: string; activeProfile: string }

export type ConfigurableGlobalPaths = {
botpressHomeDir: string
Expand All @@ -25,7 +25,7 @@ export type ConstantGlobalPaths = typeof consts.fromHomeDir & typeof consts.from
export type AllGlobalPaths = ConfigurableGlobalPaths & ConstantGlobalPaths

const profileCredentialSchema = z.object({ apiUrl: z.string(), workspaceId: z.string(), token: z.string() })
type ProfileCredentials = z.infer<typeof profileCredentialSchema>
export type ProfileCredentials = z.infer<typeof profileCredentialSchema>

class GlobalPaths extends utils.path.PathStore<keyof AllGlobalPaths> {
public constructor(argv: CommandArgv<GlobalCommandDefinition>) {
Expand Down Expand Up @@ -93,7 +93,7 @@ export abstract class GlobalCommand<C extends GlobalCommandDefinition> extends B
'You are currently using credential command line arguments or environment variables as well as a profile. Your profile has overwritten the variables'
)
}
;({ token, workspaceId, apiUrl } = await this._readProfileFromFS(this.argv.profile))
;({ token, workspaceId, apiUrl } = await this.readProfileFromFS(this.argv.profile))
this.logger.log(`Using profile "${this.argv.profile}"`, { prefix: '👤' })
} else {
token = credentials.token ?? (await cache.get('token'))
Expand All @@ -112,26 +112,48 @@ export abstract class GlobalCommand<C extends GlobalCommandDefinition> extends B
return this.api.newClient({ apiUrl, token, workspaceId }, this.logger)
}

private async _readProfileFromFS(profile: string): Promise<ProfileCredentials> {
protected async readProfileFromFS(profile: string): Promise<ProfileCredentials> {
const parsedProfiles = await this.readProfilesFromFS()

const profileData = parsedProfiles[profile]
if (!profileData) {
throw new errors.BotpressCLIError(
`Profile "${profile}" not found in "${this.globalPaths.abs.profilesPath}". Found profiles '${Object.keys(parsedProfiles).join("', '")}'.`
)
}

return profileData
}

protected async readProfilesFromFS(): Promise<Record<string, ProfileCredentials>> {
if (!fs.existsSync(this.globalPaths.abs.profilesPath)) {
throw new errors.BotpressCLIError(`Profile file not found at "${this.globalPaths.abs.profilesPath}"`)
}
const fileContent = await fs.promises.readFile(this.globalPaths.abs.profilesPath, 'utf-8')
const parsedProfiles = JSON.parse(fileContent)

const zodParseResult = z.record(profileCredentialSchema).safeParse(parsedProfiles, {})
const zodParseResult = z.record(profileCredentialSchema).safeParse(parsedProfiles)
if (!zodParseResult.success) {
throw errors.BotpressCLIError.wrap(zodParseResult.error, 'Error parsing profiles: ')
}

const profileData = parsedProfiles[profile]
if (!profileData) {
throw new errors.BotpressCLIError(
`Profile "${profile}" not found in "${this.globalPaths.abs.profilesPath}". Found profiles '${Object.keys(parsedProfiles).join("', '")}'.`
)
return zodParseResult.data
}

protected async writeProfileToFS(profileName: string, profile: ProfileCredentials): Promise<void> {
let profiles: Record<string, ProfileCredentials>
if (fs.existsSync(this.globalPaths.abs.profilesPath)) {
profiles = await this.readProfilesFromFS()
} else {
profiles = {}
}
profiles[profileName] = profile

return parsedProfiles[profile]
await fs.promises.writeFile(
this.globalPaths.abs.profilesPath,
JSON.stringify({ [consts.defaultProfileName]: profiles.defaultProfileName, ...profiles }, null, 2),
'utf-8'
)
}

protected async ensureLoginAndCreateClient(credentials: YargsConfig<typeof config.schemas.credentials>) {
Expand Down
Loading
Loading