diff --git a/integrations/charts/integration.definition.ts b/integrations/charts/integration.definition.ts
index e2610158708..ec596ade9cd 100644
--- a/integrations/charts/integration.definition.ts
+++ b/integrations/charts/integration.definition.ts
@@ -5,7 +5,7 @@ import { actionDefinitions } from 'src/definitions/actions'
export default new IntegrationDefinition({
name: 'charts',
description: 'Easily generate a variety of charts, including line, bar, pie, and scatter plots, etc.',
- version: '0.2.4',
+ version: '0.2.5',
readme: 'hub.md',
icon: 'icon.svg',
actions: actionDefinitions,
diff --git a/integrations/charts/src/actions/utils.ts b/integrations/charts/src/actions/utils.ts
index be4981381d5..c36ecc0106e 100644
--- a/integrations/charts/src/actions/utils.ts
+++ b/integrations/charts/src/actions/utils.ts
@@ -24,6 +24,7 @@ export const buildChart = async (props: BuildChartProps) => {
content: response.data,
index: false,
accessPolicies: ['public_content'],
+ publicContentImmediatelyAccessible: true,
})
return file.url!
diff --git a/integrations/mistral-ai/hub.md b/integrations/mistral-ai/hub.md
new file mode 100644
index 00000000000..e76775cbfce
--- /dev/null
+++ b/integrations/mistral-ai/hub.md
@@ -0,0 +1,5 @@
+# Mistral AI Integration
+
+This integration allows your bot to choose from a curated list of [Mistral AI Models](https://docs.mistral.ai/getting-started/models) as the LLM of your choice for a node, workflow, or skill in your bot.
+
+Usage is charged to the AI Spend of your workspace in Botpress Cloud at the [same pricing](https://mistral.ai/pricing#api-pricing) (at cost) as directly with Mistral.
diff --git a/integrations/mistral-ai/icon.svg b/integrations/mistral-ai/icon.svg
new file mode 100644
index 00000000000..84a67786554
--- /dev/null
+++ b/integrations/mistral-ai/icon.svg
@@ -0,0 +1,19 @@
+
diff --git a/integrations/mistral-ai/integration.definition.ts b/integrations/mistral-ai/integration.definition.ts
new file mode 100644
index 00000000000..56affb16901
--- /dev/null
+++ b/integrations/mistral-ai/integration.definition.ts
@@ -0,0 +1,26 @@
+import { IntegrationDefinition, z } from '@botpress/sdk'
+import { ModelId } from 'src/schemas'
+import llm from './bp_modules/llm'
+
+export default new IntegrationDefinition({
+ name: 'mistral-ai',
+ title: 'Mistral AI',
+ description: 'Access a curated list of Mistral AI models to set as your chosen LLM.',
+ version: '0.1.0',
+ readme: 'hub.md',
+ icon: 'icon.svg',
+ entities: {
+ modelRef: {
+ schema: z.object({
+ id: ModelId,
+ }),
+ },
+ },
+ secrets: {
+ MISTRAL_API_KEY: {
+ description: 'Mistral AI API key',
+ },
+ },
+}).extend(llm, ({ entities }) => ({
+ entities: { modelRef: entities.modelRef },
+}))
diff --git a/integrations/mistral-ai/package.json b/integrations/mistral-ai/package.json
new file mode 100644
index 00000000000..5d6985bddba
--- /dev/null
+++ b/integrations/mistral-ai/package.json
@@ -0,0 +1,23 @@
+{
+ "name": "@botpresshub/mistral-ai",
+ "scripts": {
+ "build": "bp add -y && bp build",
+ "check:type": "tsc --noEmit",
+ "check:bplint": "bp lint"
+ },
+ "private": true,
+ "dependencies": {
+ "@botpress/client": "workspace:*",
+ "@botpress/common": "workspace:*",
+ "@botpress/sdk": "workspace:*",
+ "@mistralai/mistralai": "^1.11.0"
+ },
+ "devDependencies": {
+ "@botpress/cli": "workspace:*",
+ "@botpress/sdk": "workspace:*",
+ "@botpresshub/llm": "workspace:*"
+ },
+ "bpDependencies": {
+ "llm": "../../interfaces/llm"
+ }
+}
diff --git a/integrations/mistral-ai/src/actions/generate-content.ts b/integrations/mistral-ai/src/actions/generate-content.ts
new file mode 100644
index 00000000000..d93ddebe324
--- /dev/null
+++ b/integrations/mistral-ai/src/actions/generate-content.ts
@@ -0,0 +1,458 @@
+import { InvalidPayloadError } from '@botpress/client'
+import { llm } from '@botpress/common'
+import { IntegrationLogger, z } from '@botpress/sdk'
+import { Mistral } from '@mistralai/mistralai'
+import type {
+ Messages,
+ ChatCompletionRequest,
+ ChatCompletionResponse,
+ Tool,
+ ToolChoice,
+ ToolChoiceEnum,
+ ContentChunk,
+ ToolCall,
+ FinishReason,
+} from '@mistralai/mistralai/models/components'
+import {
+ SDKError,
+ HTTPValidationError,
+ ResponseValidationError,
+ HTTPClientError,
+} from '@mistralai/mistralai/models/errors'
+import { ModelId } from 'src/schemas'
+
+const MistralAPIErrorSchema = z.object({
+ error: z
+ .object({
+ message: z.string(),
+ type: z.string().optional(),
+ code: z.string().optional(),
+ })
+ .optional(),
+ message: z.string().optional(), // Some errors might have message at root
+ detail: z
+ .array(
+ z.object({
+ loc: z.array(z.union([z.string(), z.number()])),
+ msg: z.string(),
+ type: z.string(),
+ })
+ )
+ .optional(), // For 422 validation errors
+})
+
+export async function generateContent(
+ input: llm.GenerateContentInput,
+ mistral: Mistral,
+ logger: IntegrationLogger,
+ params: {
+ models: Record
+ defaultModel: ModelId
+ }
+): Promise {
+ const modelId = (input.model?.id || params.defaultModel) as ModelId
+
+ const model = params.models[modelId]
+
+ if (!model) {
+ throw new InvalidPayloadError(
+ `Model ID "${modelId}" is not allowed, supported model IDs are: ${Object.keys(params.models).join(', ')}`
+ )
+ }
+
+ if (input.messages.length === 0 && !input.systemPrompt) {
+ throw new InvalidPayloadError('At least one message or a system prompt is required')
+ }
+
+ if (input.maxTokens && input.maxTokens > model.output.maxTokens) {
+ throw new InvalidPayloadError(
+ `maxTokens must be less than or equal to ${model.output.maxTokens} for model ID "${modelId}`
+ )
+ }
+
+ if (input.responseFormat === 'json_object') {
+ input.systemPrompt =
+ (input.systemPrompt || '') +
+ '\n\nYour response must always be in valid JSON format and expressed as a JSON object.'
+ }
+
+ const messages: Messages[] = []
+
+ // Add system prompt
+ if (input.systemPrompt) {
+ messages.unshift({
+ role: 'system',
+ content: input.systemPrompt,
+ })
+ }
+
+ for (const message of input.messages) {
+ messages.push(mapToMistralMessage(message))
+ }
+
+ const request: ChatCompletionRequest = {
+ model: modelId,
+ maxTokens: input.maxTokens ?? model.output.maxTokens,
+ temperature: input.temperature,
+ topP: input.topP,
+ stop: input.stopSequences,
+ metadata: {
+ user_id: input.userId,
+ },
+ tools: mapToMistralTools(input),
+ toolChoice: mapToMistralToolChoice(input.toolChoice),
+ messages,
+ }
+
+ if (input.debug) {
+ logger.forBot().info('Request being sent to Mistral: ' + JSON.stringify(request, null, 2))
+ }
+
+ let response: ChatCompletionResponse | undefined
+
+ try {
+ response = await mistral.chat.complete(request)
+ } catch (thrown: unknown) {
+ // Validation errors (422)
+ if (thrown instanceof HTTPValidationError) {
+ // err has: statusCode, body, detail[]
+ if (thrown.detail && thrown.detail.length > 0) {
+ const validationMessages = thrown.detail.map((d) => `${d.loc.join('.')}: ${d.msg}`).join('; ')
+
+ if (input.debug) {
+ logger.forBot().error(`Mistral validation errors: ${JSON.stringify(thrown.detail, null, 2)}`)
+ }
+
+ throw llm.createUpstreamProviderFailedError(
+ thrown,
+ `Mistral validation error (${thrown.statusCode}): ${validationMessages}`
+ )
+ }
+ }
+
+ // General SDK/API errors
+ if (thrown instanceof SDKError) {
+ let errorMessage = thrown.message
+
+ // parse body for more details
+ try {
+ const parsedBody = JSON.parse(thrown.body)
+ const parsedError = MistralAPIErrorSchema.safeParse(parsedBody)
+
+ if (parsedError.success && parsedError.data.error) {
+ errorMessage = parsedError.data.error.message
+
+ input.debug && logger.forBot().error(`Mistral API error: ${JSON.stringify(parsedError.data, null, 2)}`)
+
+ const errorType = parsedError.data.error.type ? ` (${parsedError.data.error.type})` : ''
+ throw llm.createUpstreamProviderFailedError(
+ thrown,
+ `Mistral error ${thrown.statusCode}${errorType}: ${errorMessage}`
+ )
+ }
+ } catch (parseErr) {
+ const parseErrorMessage = parseErr instanceof Error ? parseErr.message : String(parseErr)
+ // use basic info
+ if (input.debug) {
+ logger.forBot().warn(`Could not parse Mistral error body: ${thrown.body}, parse error: ${parseErrorMessage}`)
+ }
+ }
+
+ throw llm.createUpstreamProviderFailedError(thrown, `Mistral error ${thrown.statusCode}: ${errorMessage}`)
+ }
+
+ // Response validation errors
+ if (thrown instanceof ResponseValidationError) {
+ // Response from Mistral was invalid/unexpected format
+ if (input.debug) {
+ logger.forBot().error(`Mistral response validation error: ${thrown.message}`)
+ }
+
+ throw llm.createUpstreamProviderFailedError(thrown, `Mistral response validation error: ${thrown.message}`)
+ }
+
+ // Network/client errors
+ if (thrown instanceof HTTPClientError) {
+ if (input.debug) {
+ logger.forBot().error(`Mistral client error (${thrown.name}): ${thrown.message}`)
+ }
+
+ throw llm.createUpstreamProviderFailedError(thrown, `Mistral client error (${thrown.name}): ${thrown.message}`)
+ }
+
+ // unknown errors
+ if (input.debug) {
+ logger.forBot().error(`Unexpected error calling Mistral: ${JSON.stringify(thrown, null, 2)}`)
+ }
+
+ const error = thrown instanceof Error ? thrown : Error(String(thrown))
+ throw llm.createUpstreamProviderFailedError(error, `Mistral error: ${error.message}`)
+ } finally {
+ if (input.debug && response) {
+ logger.forBot().info('Response received from Mistral: ' + JSON.stringify(response, null, 2))
+ }
+ }
+
+ // fallback to zero, as it's done in the OpenAI integration
+ const inputTokens = response.usage?.promptTokens ?? 0
+ const outputTokens = response.usage?.completionTokens ?? 0
+
+ const inputCost = calculateTokenCost(model.input.costPer1MTokens, inputTokens)
+ const outputCost = calculateTokenCost(model.output.costPer1MTokens, outputTokens)
+ const cost = inputCost + outputCost
+
+ return {
+ id: response.id,
+ provider: 'mistral-ai',
+ model: response.model,
+ choices: response.choices.map((choice) => ({
+ role: 'assistant',
+ // TODO: Investigate showing images, for now it's not supported by any other provider
+ type: 'text', // Mistral can return multimodal content, but we extract text only,
+ content: extractTextContent(choice.message.content),
+ index: choice.index,
+ stopReason: mapToStopReason(choice.finishReason),
+ toolCalls: mapFromMistralToolCalls(choice.message.toolCalls, logger),
+ })),
+ usage: {
+ inputTokens,
+ inputCost,
+ outputTokens,
+ outputCost,
+ },
+ botpress: {
+ cost, // DEPRECATED
+ },
+ }
+}
+
+function mapToMistralMessage(message: llm.Message): Messages {
+ // Handle special messages where the role is overridden (tool calls)
+ if (message.type === 'tool_result') {
+ if (!message.toolResultCallId) {
+ throw new InvalidPayloadError('`toolResultCallId` is required when message type is "tool_result"')
+ }
+
+ return {
+ role: 'tool',
+ toolCallId: message.toolResultCallId,
+ content: message.content as string,
+ }
+ } else if (message.type === 'tool_calls') {
+ if (!message.toolCalls || message.toolCalls.length === 0) {
+ throw new InvalidPayloadError('`toolCalls` must contain at least one tool call when type is "tool_calls"')
+ }
+
+ return {
+ role: 'assistant',
+ toolCalls: message.toolCalls.map(mapToMistralToolCall),
+ // content can be omitted or null for tool call messages
+ }
+ }
+
+ // Handle regular messages by role
+ switch (message.role) {
+ case 'user':
+ case 'assistant':
+ return mapStandardMessage(message)
+ default:
+ throw new InvalidPayloadError(`Message role "${message.role}" is not supported`)
+ }
+}
+
+function mapStandardMessage(message: llm.Message): Messages {
+ if (message.type === 'text') {
+ if (typeof message.content !== 'string') {
+ throw new InvalidPayloadError('`content` must be a string when message type is "text"')
+ }
+
+ return {
+ role: message.role,
+ content: message.content,
+ }
+ }
+
+ if (message.type === 'multipart') {
+ if (!Array.isArray(message.content)) {
+ throw new InvalidPayloadError('`content` must be an array when message type is "multipart"')
+ }
+
+ return {
+ role: message.role,
+ content: mapMultipartContent(message.content),
+ }
+ }
+
+ throw new InvalidPayloadError(`Message type "${message.type}" is not supported for ${message.role} messages`)
+}
+
+/** Map multipart content into Mistral (ContentChunk) format */
+function mapMultipartContent(content: NonNullable): ContentChunk[] {
+ if (typeof content === 'string') {
+ throw new InvalidPayloadError('Content must be an array for multipart messages')
+ }
+
+ const mistralContent: ContentChunk[] = []
+
+ for (const part of content) {
+ if (part.type === 'text') {
+ if (!part.text) {
+ throw new InvalidPayloadError('`text` is required when part type is "text"')
+ }
+
+ mistralContent.push({
+ type: 'text',
+ text: part.text,
+ })
+ } else if (part.type === 'image') {
+ if (!part.url) {
+ throw new InvalidPayloadError('`url` is required when part type is "image"')
+ }
+ mistralContent.push({
+ type: 'image_url',
+ imageUrl: part.url,
+ })
+ }
+ }
+
+ return mistralContent
+}
+
+function mapToMistralTools(input: llm.GenerateContentInput): Tool[] | undefined {
+ if (input.toolChoice?.type === 'none') {
+ // Don't return any tools if tool choice was to not use any tools
+ return []
+ }
+
+ const mistralTools = input.tools as Tool[] | undefined
+
+ // note: don't send an empty tools array
+ return mistralTools?.length ? mistralTools : undefined
+}
+
+function mapToMistralToolCall(toolCall: llm.ToolCall): ToolCall {
+ return {
+ id: toolCall.id,
+ type: 'function',
+ function: {
+ name: toolCall.function.name,
+ // Mistral expects a JSON string, not an object
+ arguments: JSON.stringify(toolCall.function.arguments),
+ },
+ }
+}
+
+function mapToMistralToolChoice(
+ toolChoice: llm.GenerateContentInput['toolChoice']
+): ToolChoice | ToolChoiceEnum | undefined {
+ if (!toolChoice) {
+ return undefined
+ }
+
+ switch (toolChoice.type) {
+ case 'any':
+ case 'auto':
+ case 'none':
+ return toolChoice.type
+ case 'specific':
+ return {
+ type: 'function',
+ function: {
+ name: toolChoice.functionName,
+ },
+ }
+ default:
+ return undefined
+ }
+}
+
+function calculateTokenCost(costPer1MTokens: number, tokenCount: number) {
+ return (costPer1MTokens / 1_000_000) * tokenCount
+}
+
+function mapToStopReason(mistralFinishReason: FinishReason): llm.GenerateContentOutput['choices'][0]['stopReason'] {
+ switch (mistralFinishReason) {
+ case 'stop':
+ return 'stop'
+ case 'length':
+ case 'model_length':
+ return 'max_tokens'
+ case 'tool_calls':
+ return 'tool_calls'
+ case 'error':
+ return 'other'
+ default:
+ return 'other'
+ }
+}
+
+function mapFromMistralToolCalls(
+ mistralToolCalls: ToolCall[] | null | undefined,
+ logger: IntegrationLogger
+): llm.ToolCall[] | undefined {
+ if (!mistralToolCalls || mistralToolCalls.length === 0) {
+ return undefined
+ }
+ return mistralToolCalls.reduce((toolCalls, mistralToolCall) => {
+ if (!mistralToolCall.id) {
+ logger.forBot().warn('Mistral returned tool call without ID, skipping')
+ return toolCalls
+ }
+ const toolType = mistralToolCall.type || 'function' // Default to 'function' if not provided
+ if (toolType !== 'function') {
+ logger.forBot().warn(`Unsupported tool call type "${toolType}" from Mistral, skipping`)
+ return toolCalls
+ }
+
+ let toolCallArguments: llm.ToolCall['function']['arguments']
+ const rawArguments = mistralToolCall.function.arguments
+ // arguments can be either string or json
+ if (typeof rawArguments === 'string') {
+ try {
+ toolCallArguments = JSON.parse(rawArguments)
+ } catch (err) {
+ logger
+ .forBot()
+ .warn(
+ `Mistral returned invalid JSON for tool call "${mistralToolCall.function.name}" arguments. ` +
+ `Using null instead. Error: ${err}`
+ )
+ toolCallArguments = null
+ }
+ } else if (typeof rawArguments === 'object' && rawArguments !== null) {
+ toolCallArguments = rawArguments
+ } else {
+ logger
+ .forBot()
+ .warn(
+ `Mistral returned unexpected type for tool call "${mistralToolCall.function.name}" arguments: ${typeof rawArguments}. Using null instead.`
+ )
+ toolCallArguments = null
+ }
+ toolCalls.push({
+ id: mistralToolCall.id,
+ type: 'function',
+ function: {
+ name: mistralToolCall.function.name,
+ arguments: toolCallArguments,
+ },
+ })
+ return toolCalls
+ }, [] as llm.ToolCall[])
+}
+
+function extractTextContent(content: string | ContentChunk[] | null | undefined): string | null {
+ if (!content) {
+ return null
+ }
+ if (typeof content === 'string') {
+ return content
+ }
+ // content is ContentChunk[] - extract only text chunks
+ return (
+ content
+ .filter((chunk): chunk is Extract => chunk.type === 'text')
+ .map((chunk) => chunk.text)
+ .join('\n\n') || null
+ )
+}
diff --git a/integrations/mistral-ai/src/index.ts b/integrations/mistral-ai/src/index.ts
new file mode 100644
index 00000000000..8dd35ec4d3e
--- /dev/null
+++ b/integrations/mistral-ai/src/index.ts
@@ -0,0 +1,157 @@
+import { llm } from '@botpress/common'
+import { Mistral } from '@mistralai/mistralai'
+import { generateContent } from './actions/generate-content'
+import { DefaultModel, ModelId } from './schemas'
+import * as bp from '.botpress'
+
+const mistral = new Mistral({ apiKey: bp.secrets.MISTRAL_API_KEY })
+
+const LanguageModels: Record = {
+ // Reference: https://docs.mistral.ai/getting-started/models
+ 'mistral-large-2512': {
+ name: 'Mistral Large 3',
+ description:
+ 'Mistral Large 3, is a state-of-the-art, open-weight, general-purpose multimodal model with a granular Mixture-of-Experts architecture. It features 41B active parameters and 675B total parameters.',
+ tags: [
+ /* TODO: Add tags */
+ ],
+ input: {
+ costPer1MTokens: 0.5,
+ maxTokens: 256_000,
+ },
+ output: {
+ costPer1MTokens: 1.5,
+ maxTokens: 4096,
+ },
+ },
+ 'mistral-medium-2508': {
+ name: 'Mistral Medium 3.1',
+ description: 'Frontier-class multimodal model released August 2025. Improving tone and performance.',
+ tags: [
+ /* TODO: Add tags */
+ ],
+ input: {
+ costPer1MTokens: 0.4,
+ maxTokens: 128_000,
+ },
+ output: {
+ costPer1MTokens: 2,
+ maxTokens: 4096,
+ },
+ },
+ 'mistral-small-2506': {
+ name: 'Mistral Small 3.2',
+ description: 'An update to the previous small model, released June 2025.',
+ tags: [
+ /* TODO: Add tags */
+ ],
+ input: {
+ costPer1MTokens: 0.1,
+ maxTokens: 128_000,
+ },
+ output: {
+ costPer1MTokens: 0.3,
+ maxTokens: 4096,
+ },
+ },
+ 'ministral-14b-2512': {
+ name: 'Ministral 3 14B',
+ description:
+ 'Ministral 3 14B is the largest model in the Ministral 3 family, offering state-of-the-art capabilities and performance comparable to its larger Mistral Small 3.2 24B counterpart.',
+ tags: [
+ /* TODO: Add tags */
+ ],
+ input: {
+ costPer1MTokens: 0.2,
+ maxTokens: 256_000,
+ },
+ output: {
+ costPer1MTokens: 0.2,
+ maxTokens: 4096,
+ },
+ },
+ 'ministral-8b-2512': {
+ name: 'Ministral 3 8B',
+ description:
+ 'Ministral 3 8B is a powerful and efficient model in the Ministral 3 family, offering best-in-class text and vision capabilities.',
+ tags: [
+ /* TODO: Add tags */
+ ],
+ input: {
+ costPer1MTokens: 0.15,
+ maxTokens: 256_000,
+ },
+ output: {
+ costPer1MTokens: 0.15,
+ maxTokens: 4096,
+ },
+ },
+ 'ministral-3b-2512': {
+ name: 'Ministral 3 3B',
+ description:
+ 'Ministral 3 3B is the smallest and most efficient model in the Ministral 3 family, offering robust language and vision capabilities in a compact package.',
+ tags: [
+ /* TODO: Add tags */
+ ],
+ input: {
+ costPer1MTokens: 0.1,
+ maxTokens: 256_000,
+ },
+ output: {
+ costPer1MTokens: 0.1,
+ maxTokens: 4096,
+ },
+ },
+ 'magistral-medium-2509': {
+ name: 'Magistral Medium 1.2',
+ description: 'Frontier-class multimodal reasoning model update of September 2025.',
+ tags: [
+ /* TODO: Add tags */
+ ],
+ input: {
+ costPer1MTokens: 2,
+ maxTokens: 128_000,
+ },
+ output: {
+ costPer1MTokens: 5,
+ maxTokens: 4096,
+ },
+ },
+ 'magistral-small-2509': {
+ name: 'Magistral Small 1.2',
+ description: 'Small multimodal reasoning model update of September 2025.',
+ tags: [
+ /* TODO: Add tags */
+ ],
+ input: {
+ costPer1MTokens: 0.5,
+ maxTokens: 128_000,
+ },
+ output: {
+ costPer1MTokens: 1.5,
+ maxTokens: 4096,
+ },
+ },
+}
+
+export default new bp.Integration({
+ register: async () => {},
+ unregister: async () => {},
+ actions: {
+ generateContent: async ({ input, logger, metadata }) => {
+ const output = await generateContent(input, mistral, logger, {
+ models: LanguageModels,
+ defaultModel: DefaultModel,
+ })
+ metadata.setCost(output.botpress.cost)
+ return output
+ },
+ listLanguageModels: async ({}) => {
+ return {
+ models: Object.entries(LanguageModels).map(([id, model]) => ({ id: id, ...model })),
+ }
+ },
+ },
+ channels: {},
+ handler: async () => {},
+})
diff --git a/integrations/mistral-ai/src/schemas.ts b/integrations/mistral-ai/src/schemas.ts
new file mode 100644
index 00000000000..cd0b3e57b45
--- /dev/null
+++ b/integrations/mistral-ai/src/schemas.ts
@@ -0,0 +1,19 @@
+import { z } from '@botpress/sdk'
+
+export type ModelId = z.infer
+
+export const DefaultModel: ModelId = 'mistral-large-2512'
+
+export const ModelId = z
+ .enum([
+ 'mistral-large-2512',
+ 'mistral-medium-2508',
+ 'mistral-small-2506',
+ 'ministral-14b-2512',
+ 'ministral-8b-2512',
+ 'ministral-3b-2512',
+ 'magistral-medium-2509',
+ 'magistral-small-2509',
+ ])
+ .describe('Model to use for content generation')
+ .placeholder(DefaultModel)
diff --git a/integrations/mistral-ai/tsconfig.json b/integrations/mistral-ai/tsconfig.json
new file mode 100644
index 00000000000..0c1062fd8ce
--- /dev/null
+++ b/integrations/mistral-ai/tsconfig.json
@@ -0,0 +1,8 @@
+{
+ "extends": "../../tsconfig.json",
+ "compilerOptions": {
+ "baseUrl": ".",
+ "outDir": "dist"
+ },
+ "include": [".botpress/**/*", "definitions/**/*", "src/**/*", "*.ts"]
+}
diff --git a/integrations/pdf-generator/integration.definition.ts b/integrations/pdf-generator/integration.definition.ts
index a909b9d0fda..e29f904112e 100644
--- a/integrations/pdf-generator/integration.definition.ts
+++ b/integrations/pdf-generator/integration.definition.ts
@@ -3,7 +3,7 @@ import { IntegrationDefinition, z } from '@botpress/sdk'
export default new IntegrationDefinition({
name: 'pdf-generator',
- version: '0.0.3',
+ version: '0.0.4',
readme: 'hub.md',
icon: 'icon.svg',
description: 'Converts markdown content to PDF using PDFShift',
diff --git a/integrations/pdf-generator/src/util.ts b/integrations/pdf-generator/src/util.ts
index 8215fae232c..ba8077ba7e4 100644
--- a/integrations/pdf-generator/src/util.ts
+++ b/integrations/pdf-generator/src/util.ts
@@ -46,6 +46,7 @@ export async function uploadPdf(client: any, buffer: Buffer, filename: string) {
content: buffer,
contentType: 'application/pdf',
index: false,
+ publicContentImmediatelyAccessible: true,
})
return {
diff --git a/packages/llmz/package.json b/packages/llmz/package.json
index 9a79534ec12..bfccc408834 100644
--- a/packages/llmz/package.json
+++ b/packages/llmz/package.json
@@ -2,7 +2,7 @@
"name": "llmz",
"type": "module",
"description": "LLMz - An LLM-native Typescript VM built on top of Zui",
- "version": "0.0.39",
+ "version": "0.0.40",
"types": "./dist/index.d.ts",
"main": "./dist/index.cjs",
"module": "./dist/index.js",
diff --git a/packages/llmz/src/tool.test.ts b/packages/llmz/src/tool.test.ts
index 1f361eaae9c..46d7ecbfeda 100644
--- a/packages/llmz/src/tool.test.ts
+++ b/packages/llmz/src/tool.test.ts
@@ -132,7 +132,7 @@ describe('tools with unique names', () => {
expect(tools.map((t) => t.name)).toMatchInlineSnapshot(`
[
"add1",
- "add1",
+ "add2",
"sub1",
"add",
"sub",
diff --git a/packages/llmz/src/tool.ts b/packages/llmz/src/tool.ts
index c32e322e890..bb59af09a96 100644
--- a/packages/llmz/src/tool.ts
+++ b/packages/llmz/src/tool.ts
@@ -808,6 +808,8 @@ export class Tool impl
toolName = `${tool.name}${++counter}`
}
+ names.add(toolName)
+
return tool.rename(toolName)
})
}
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index 4259266f437..781d1567307 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -1488,6 +1488,28 @@ importers:
specifier: ^5.6.3
version: 5.8.3
+ integrations/mistral-ai:
+ dependencies:
+ '@botpress/client':
+ specifier: workspace:*
+ version: link:../../packages/client
+ '@botpress/common':
+ specifier: workspace:*
+ version: link:../../packages/common
+ '@botpress/sdk':
+ specifier: workspace:*
+ version: link:../../packages/sdk
+ '@mistralai/mistralai':
+ specifier: ^1.11.0
+ version: 1.11.0
+ devDependencies:
+ '@botpress/cli':
+ specifier: workspace:*
+ version: link:../../packages/cli
+ '@botpresshub/llm':
+ specifier: workspace:*
+ version: link:../../interfaces/llm
+
integrations/monday:
dependencies:
'@botpress/client':
@@ -4977,6 +4999,9 @@ packages:
'@microsoft/tsdoc@0.15.1':
resolution: {integrity: sha512-4aErSrCR/On/e5G2hDP0wjooqDdauzEbIq8hIkIe5pXV0rtWJZvdCEKL0ykZxex+IxIwBp0eGeV48hQN07dXtw==}
+ '@mistralai/mistralai@1.11.0':
+ resolution: {integrity: sha512-6/BVj2mcaggYbpMzNSxtqtM2Tv/Jb5845XFd2CMYFO+O5VBkX70iLjtkBBTI4JFhh1l9vTCIMYXBVOjLoBVHGQ==}
+
'@mixmark-io/domino@2.2.0':
resolution: {integrity: sha512-Y28PR25bHXUg88kCV7nivXrP2Nj2RueZ3/l/jdx6J9f8J4nsEGcgX0Qe6lt7Pa+J79+kPiJU3LguR6O/6zrLOw==}
@@ -10396,6 +10421,7 @@ packages:
scmp@2.1.0:
resolution: {integrity: sha512-o/mRQGk9Rcer/jEEw/yw4mwo3EU/NvYvp577/Btqrym9Qy5/MdWGBqipbALgd2lrdWTJ5/gqDusxfnQBxOxT2Q==}
+ deprecated: Just use Node.js's crypto.timingSafeEqual()
seek-bzip@1.0.6:
resolution: {integrity: sha512-e1QtP3YL5tWww8uKaOCQ18UxIT2laNBXHjV/S2WYCiK4udiv8lkG89KRIoCjUagnAmCBurjF4zEVX2ByBbnCjQ==}
@@ -13937,6 +13963,11 @@ snapshots:
'@microsoft/tsdoc@0.15.1': {}
+ '@mistralai/mistralai@1.11.0':
+ dependencies:
+ zod: 3.24.2
+ zod-to-json-schema: 3.24.6(zod@3.24.2)
+
'@mixmark-io/domino@2.2.0': {}
'@mswjs/interceptors@0.40.0':