Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 11 additions & 1 deletion packages/botonic-core/src/models/ai-agents.ts
Original file line number Diff line number Diff line change
Expand Up @@ -118,9 +118,13 @@ export interface HubtypeUserMessage {
export enum AiAgentType {
Worker = 'worker',
Router = 'router',
Manager = 'manager',
}

export type AiAgentArgs = AiAgentWorkerArgs | AIAgentRouterArgs
export type AiAgentArgs =
| AiAgentWorkerArgs
| AIAgentRouterArgs
| AIAgentManagerArgs

export type AiAgentBaseArgs = {
type: AiAgentType
Expand All @@ -147,3 +151,9 @@ export interface AIAgentRouterArgs extends AiAgentBaseArgs {
type: AiAgentType.Router
agents: AIAgentDataWithDescription[]
}

export interface AIAgentManagerArgs extends AiAgentBaseArgs {
type: AiAgentType.Manager
agents: AIAgentDataWithDescription[]
activeTools: { name: string }[]
}
99 changes: 99 additions & 0 deletions packages/botonic-plugin-ai-agents/src/agent-manager-builder.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
import type { CampaignV2, ContactInfo, ResolvedPlugins } from '@botonic/core'
import {
Agent,
type AgentOutputType,
type Handoff,
type ModelSettings,
} from '@openai/agents'
import type { z } from 'zod'

import { createInputGuardrails } from './guardrails'
import type { GuardrailTrackingContext } from './guardrails/input'
import type { LLMConfig } from './llm-config'
import {
getOutputInstructions,
getOutputSchema,
type OutputSchema,
} from './structured-output'
import type { AIAgent, Context, GuardrailRule, Tool } from './types'

interface AIAgentManagerBuilderOptions<
TPlugins extends ResolvedPlugins = ResolvedPlugins,
TExtraData = unknown,
> {
name: string
instructions: string
tools: Tool<TPlugins, TExtraData>[]
campaignsContext?: CampaignV2[]
contactInfo: ContactInfo[]
llmConfig: LLMConfig
inputGuardrailRules: GuardrailRule[]
outputMessagesSchemas?: z.ZodObject[]
guardrailTrackingContext: GuardrailTrackingContext
}

export class AIAgentManagerBuilder<
TPlugins extends ResolvedPlugins = ResolvedPlugins,
TExtraData = unknown,
> {
private name: string
private instructions: string
private tools: Tool<TPlugins, TExtraData>[]
private campaignsContext?: CampaignV2[]
private contactInfo: ContactInfo[]
private llmConfig: LLMConfig
private handoffs: Handoff<
Context<TPlugins, TExtraData>,
AgentOutputType<typeof OutputSchema>
>[]
private inputGuardrailRules: GuardrailRule[]
private outputMessagesSchemas: z.ZodObject[]
private guardrailTrackingContext: GuardrailTrackingContext

constructor(options: AIAgentManagerBuilderOptions<TPlugins, TExtraData>) {
this.name = options.name
this.instructions = options.instructions
this.tools = options.tools
this.campaignsContext = options.campaignsContext
this.contactInfo = options.contactInfo
this.llmConfig = options.llmConfig
this.inputGuardrailRules = options.inputGuardrailRules
this.outputMessagesSchemas = options.outputMessagesSchemas || []
this.guardrailTrackingContext = options.guardrailTrackingContext
}

async build(): Promise<AIAgent<TPlugins, TExtraData>> {
const inputGuardrails = await createInputGuardrails(
this.inputGuardrailRules,
this.llmConfig,
this.guardrailTrackingContext
)
const modelSettings = this.getAgentModelSettings()
const resolvedModel = await this.llmConfig.getModel()

return new Agent<
Context<TPlugins, TExtraData>,
AgentOutputType<typeof OutputSchema>
>({
name: this.name,
model: resolvedModel,
modelSettings,
instructions: `${this.instructions}\n\n${getOutputInstructions()}`,
tools: this.tools,
outputType: getOutputSchema(this.outputMessagesSchemas),
inputGuardrails,
outputGuardrails: [],
})
}

private getAgentModelSettings(): ModelSettings {
const modelSettings: ModelSettings = { ...this.llmConfig.modelSettings }
if (this.llmConfig.modelSettings.reasoning) {
modelSettings.reasoning = { ...this.llmConfig.modelSettings.reasoning }
}
if (this.llmConfig.modelSettings.text) {
modelSettings.text = { ...this.llmConfig.modelSettings.text }
}
return modelSettings
}
}
4 changes: 2 additions & 2 deletions packages/botonic-plugin-ai-agents/src/agent-router-builder.ts
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ export class AIAgentRouterBuilder<
this.llmConfig,
this.guardrailTrackingContext
)
const modelSettings = this.getRouterModelSettings()
const modelSettings = this.getAgentModelSettings()

// Agent.create is typed as Agent<UnknownContext>; we run with Context<TPlugins, TExtraData>.
const agent = Agent.create({
Expand All @@ -81,7 +81,7 @@ export class AIAgentRouterBuilder<
return agent
}

private getRouterModelSettings(): ModelSettings {
private getAgentModelSettings(): ModelSettings {
const modelSettings: ModelSettings = { ...this.llmConfig.modelSettings }
if (this.llmConfig.modelSettings.reasoning) {
modelSettings.reasoning = { ...this.llmConfig.modelSettings.reasoning }
Expand Down
94 changes: 93 additions & 1 deletion packages/botonic-plugin-ai-agents/src/index.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import {
type AIAgentManagerArgs,
type AIAgentRouterArgs,
type AiAgentArgs,
AiAgentType,
Expand All @@ -13,6 +14,7 @@ import { v7 as uuidv7 } from 'uuid'
import type { ZodObject } from 'zod'

import { AIAgentBuilder } from './agent-builder'
import { AIAgentManagerBuilder } from './agent-manager-builder'
import { AIAgentRouterBuilder } from './agent-router-builder'
import {
DEFAULT_MAX_RETRIES,
Expand All @@ -23,6 +25,7 @@ import {
import { createDebugLogger, type DebugLogger } from './debug-logger'
import { LLMConfig } from './llm-config'
import { AIAgentRunner } from './runner'
import { AIAgentManagerRunner } from './runner-manager'
import { AIAgentRouterRunner } from './runner-router'
import { HubtypeApiClient } from './services/hubtype-api-client'
import type {
Expand Down Expand Up @@ -110,6 +113,15 @@ export default class BotonicPluginAiAgents<
)
}

if (aiAgentArgs.type === AiAgentType.Manager) {
return await this.executeManagerAIAgent(
botContext,
aiAgentArgs,
authToken,
inferenceId
)
}

throw new Error('Invalid agent type')
} catch (error) {
console.error('error plugin returns undefined', error)
Expand Down Expand Up @@ -270,6 +282,86 @@ export default class BotonicPluginAiAgents<
return await runner.run(messages, context)
}

private async executeManagerAIAgent(
botContext: BotContext<TPlugins, TExtraData>,
aiAgentArgs: AIAgentManagerArgs,
authToken: string,
inferenceId: string
) {
const { agents, name, instructions } = aiAgentArgs

const llmConfig = new LLMConfig(
this.maxRetries,
this.timeout,
aiAgentArgs.model,
aiAgentArgs.verbosity
)

const agentsAsTools = await Promise.all(
agents.map(async aiAgentData => {
const { agent } = await this.getAIAgentWorkerAndTools(
botContext,
aiAgentData,
aiAgentArgs.outputMessagesSchemas || [],
authToken,
inferenceId,
llmConfig
)
return agent.asTool({
toolName: aiAgentData.name,
toolDescription: aiAgentData.description,
})
})
)

const tools = [...agentsAsTools, ...this.buildTools(aiAgentArgs)]

console.log('Manager tools', tools)

// TODO: Join tools with agents as tools
const agentManager = await new AIAgentManagerBuilder<TPlugins, TExtraData>({
name,
instructions,
tools,
contactInfo: botContext.session.user.contact_info || [],
inputGuardrailRules: aiAgentArgs.inputGuardrailRules || [],
guardrailTrackingContext: {
botId: botContext.session.bot.id,
isTest: botContext.session.is_test_integration,
authToken,
inferenceId,
},
outputMessagesSchemas: aiAgentArgs.outputMessagesSchemas || [],
llmConfig,
}).build()

const messages = await this.getMessages(
botContext,
authToken,
aiAgentArgs.previousHubtypeMessages || []
)

const context: Context<TPlugins, TExtraData> = {
authToken,
knowledgeUsed: {
query: '',
sourceIds: [],
chunksIds: [],
chunkTexts: [],
},
request: botContext,
}

const runner = new AIAgentManagerRunner<TPlugins, TExtraData>(
agentManager,
llmConfig,
inferenceId,
this.logger
)

return await runner.run(messages, context)
}

private async getAIAgentWorkerAndTools(
botContext: BotContext,
aiAgentArgs: AiAgentArgs,
Expand Down Expand Up @@ -332,7 +424,7 @@ export default class BotonicPluginAiAgents<

private buildTools(aiAgentArgs: AiAgentArgs): Tool<TPlugins, TExtraData>[] {
const activeTools =
aiAgentArgs.type === AiAgentType.Worker ? aiAgentArgs.activeTools : []
aiAgentArgs.type === AiAgentType.Router ? [] : aiAgentArgs.activeTools
const activeToolNames = activeTools.map(tool => tool.name)
const availableTools = this.toolDefinitions.filter(tool =>
activeToolNames.includes(tool.name)
Expand Down
96 changes: 96 additions & 0 deletions packages/botonic-plugin-ai-agents/src/runner-manager.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
import type { AgenticOutputMessage, ResolvedPlugins } from '@botonic/core'
import { InputGuardrailTripwireTriggered, Runner } from '@openai/agents'
import type { DebugLogger } from './debug-logger'
import type { LLMConfig } from './llm-config'
import type { AIAgentRunnerResult } from './runner'
import type { AgenticInputMessage, AIAgent, Context, RunResult } from './types'

export class AIAgentManagerRunner<
TPlugins extends ResolvedPlugins = ResolvedPlugins,
TExtraData = any,
> {
private agent: AIAgent<TPlugins, TExtraData>
private llmConfig: LLMConfig
private inferenceId: string
private logger: DebugLogger

constructor(
agent: AIAgent<TPlugins, TExtraData>,
llmConfig: LLMConfig,
inferenceId: string, // TODO: Use it for tracking
logger: DebugLogger
) {
this.agent = agent
this.llmConfig = llmConfig
this.inferenceId = inferenceId
this.logger = logger
}

async run(
messages: AgenticInputMessage[],
context: Context<TPlugins, TExtraData>
): Promise<RunResult> {
const startTime = Date.now()

this.logger.logRunnerStart(
this.llmConfig.modelName,
this.llmConfig.modelSettings
)

try {
const runner = new Runner({
tracingDisabled: true,
})
const result = (await runner.run(this.agent, messages, {
context,
})) as AIAgentRunnerResult

// const endTime = Date.now()

// await this.sendLlmRunTracking(result, context, startTime, endTime)

console.log('AIAgentManagerRunner result', result)
const outputMessages = result.finalOutput?.messages || []
const hasExit =
outputMessages.length === 0 ||
outputMessages.some(message => message.type === 'exit')

const runResult: RunResult = {
messages: hasExit
? []
: (outputMessages.filter(
message => message.type !== 'exit'
) as AgenticOutputMessage[]),
toolsExecuted: [],
exit: hasExit,
memoryLength: messages.length,
error: false,
inputGuardrailsTriggered: [],
outputGuardrailsTriggered: [],
}

this.logger.logRunResult(runResult, startTime)

return runResult
} catch (error) {
console.error('AIAgentManagerRunner error', error)
if (error instanceof InputGuardrailTripwireTriggered) {
const runResult: RunResult = {
messages: [],
memoryLength: 0,
toolsExecuted: [],
exit: true,
error: false,
inputGuardrailsTriggered: error.result.output.outputInfo,
outputGuardrailsTriggered: [],
}

this.logger.logGuardrailTriggered()

return runResult
}

throw error
}
}
}
2 changes: 1 addition & 1 deletion packages/botonic-plugin-ai-agents/src/runner-router.ts
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ export class AIAgentRouterRunner<
// await this.sendLlmRunTracking(result, context, startTime, endTime)

console.log('AIAgentRouterRunner result', result)
console.log('currentAgent: ', result.state?._currentAgent?.name)
console.log('CURRENT_AGENT: ', result.state?._currentAgent?.name)
const outputMessages = result.finalOutput?.messages || []
const hasExit =
outputMessages.length === 0 ||
Expand Down
Loading
Loading