Skip to content
Draft
37 changes: 34 additions & 3 deletions packages/botonic-core/src/models/ai-agents.ts
Original file line number Diff line number Diff line change
Expand Up @@ -115,14 +115,45 @@ export interface HubtypeUserMessage {
content: string
}

export interface AiAgentArgs {
export enum AiAgentType {
Worker = 'worker',
Router = 'router',
Manager = 'manager',
}

export type AiAgentArgs =
| AiAgentWorkerArgs
| AIAgentRouterArgs
| AIAgentManagerArgs

export type AiAgentBaseArgs = {
type: AiAgentType
name: string
instructions: string
model: string
verbosity: VerbosityLevel
activeTools?: { name: string }[]
inputGuardrailRules?: GuardrailRule[]
sourceIds?: string[]
previousHubtypeMessages?: HubtypeAssistantMessage[]
outputMessagesSchemas?: z.ZodObject<any>[]
}

export interface AiAgentWorkerArgs extends AiAgentBaseArgs {
type: AiAgentType.Worker
activeTools: { name: string }[]
sourceIds: string[]
}

interface AIAgentDataWithDescription extends AiAgentWorkerArgs {
description: string
}

export interface AIAgentRouterArgs extends AiAgentBaseArgs {
type: AiAgentType.Router
agents: AIAgentDataWithDescription[]
}

export interface AIAgentManagerArgs extends AiAgentBaseArgs {
type: AiAgentType.Manager
agents: AIAgentDataWithDescription[]
activeTools: { name: string }[]
}
95 changes: 54 additions & 41 deletions packages/botonic-plugin-ai-agents/src/agent-builder.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,16 +3,25 @@ import {
Agent,
type AgentOutputType,
type InputGuardrail,
type ModelSettings,
} from '@openai/agents'
import type { z } from 'zod'

import { OPENAI_PROVIDER } from './constants'
import type { DebugLogger } from './debug-logger'
import { createInputGuardrail } from './guardrails'
import { createInputGuardrails } from './guardrails'
import type { GuardrailTrackingContext } from './guardrails/input'
import type { LLMConfig } from './llm-config'
import { getOutputSchema, type OutputSchema } from './structured-output'
import { mandatoryTools, retrieveKnowledge } from './tools'
import {
getOutputInstructions,
getOutputSchema,
type OutputSchema,
} from './structured-output'
import {
createRetrieveKnowledge,
mandatoryTools,
RETRIEVE_KNOWLEDGE_TOOL_NAME,
} from './tools'
import type { AIAgent, Context, GuardrailRule, Tool } from './types'

interface AIAgentBuilderOptions<
Expand Down Expand Up @@ -43,6 +52,8 @@ export class AIAgentBuilder<
private inputGuardrails: InputGuardrail[]
public llmConfig: LLMConfig
private logger: DebugLogger
private inputGuardrailRules: GuardrailRule[]
private guardrailTrackingContext: GuardrailTrackingContext

constructor(options: AIAgentBuilderOptions<TPlugins, TExtraData>) {
this.name = options.name
Expand All @@ -56,33 +67,32 @@ export class AIAgentBuilder<
this.inputGuardrails = []
this.llmConfig = options.llmConfig
this.logger = options.logger
if (options.inputGuardrailRules.length > 0) {
const inputGuardrail = createInputGuardrail(
options.inputGuardrailRules,
options.llmConfig,
options.guardrailTrackingContext
)
this.inputGuardrails.push(inputGuardrail)
}
this.inputGuardrailRules = options.inputGuardrailRules
this.guardrailTrackingContext = options.guardrailTrackingContext
}

build(): AIAgent<TPlugins, TExtraData> {
// When using standard OpenAI API, we need to specify the model
// Azure OpenAI uses deployment name instead

async build(): Promise<AIAgent<TPlugins, TExtraData>> {
// When using standard OpenAI API, we need to specify the model.
// Azure OpenAI uses deployment name instead.
const model = this.llmConfig.modelName
const hasRetrieveKnowledge = this.tools.includes(retrieveKnowledge)
const resolvedModel = await this.llmConfig.getModel()
const hasRetrieveKnowledge = this.tools.some(
tool => tool.name === RETRIEVE_KNOWLEDGE_TOOL_NAME
)
const modelSettings = this.getAgentModelSettings(hasRetrieveKnowledge)

this.inputGuardrails = await createInputGuardrails(
this.inputGuardrailRules,
this.llmConfig,
this.guardrailTrackingContext
)

this.logger.logModelSettings({
provider: OPENAI_PROVIDER,
model,
reasoning: this.llmConfig.modelSettings.reasoning as
| { effort: string }
| undefined,
text: this.llmConfig.modelSettings.text as
| { verbosity: string }
| undefined,
toolChoice: this.llmConfig.modelSettings.toolChoice as string | undefined,
reasoning: modelSettings.reasoning as { effort: string } | undefined,
text: modelSettings.text as { verbosity: string } | undefined,
toolChoice: modelSettings.toolChoice as string | undefined,
hasRetrieveKnowledge,
})

Expand All @@ -91,7 +101,8 @@ export class AIAgentBuilder<
AgentOutputType<typeof OutputSchema>
>({
name: this.name,
model,
model: resolvedModel,
modelSettings,
instructions: this.instructions,
tools: this.tools,
outputType: getOutputSchema(this.externalOutputMessagesSchemas),
Expand All @@ -100,6 +111,23 @@ export class AIAgentBuilder<
})
}

private getAgentModelSettings(hasRetrieveKnowledge: boolean): ModelSettings {
const modelSettings: ModelSettings = { ...this.llmConfig.modelSettings }
if (this.llmConfig.modelSettings.reasoning) {
modelSettings.reasoning = { ...this.llmConfig.modelSettings.reasoning }
}
if (this.llmConfig.modelSettings.text) {
modelSettings.text = { ...this.llmConfig.modelSettings.text }
}

if (hasRetrieveKnowledge) {
// && this.llmConfig.modelName.includes('gpt-4')) {
modelSettings.toolChoice = RETRIEVE_KNOWLEDGE_TOOL_NAME
}

return modelSettings
}

private addExtraInstructions(
initialInstructions: string,
contactInfo: ContactInfo[],
Expand All @@ -109,7 +137,7 @@ export class AIAgentBuilder<
const metadataInstructions = this.getMetadataInstructions()
const contactInfoInstructions = this.getContactInfoInstructions(contactInfo)
const campaignInstructions = this.getCampaignInstructions(campaignsContext)
const outputInstructions = this.getOutputInstructions()
const outputInstructions = getOutputInstructions()
return `${instructions}\n\n${metadataInstructions}\n\n${contactInfoInstructions}\n\n${campaignInstructions}\n\n${outputInstructions}`
}

Expand Down Expand Up @@ -155,28 +183,13 @@ export class AIAgentBuilder<
.join('\n')
}

private getOutputInstructions(): string {
const example = {
messages: [
{
type: 'text',
content: {
text: 'Hello, how can I help you today?',
},
},
],
}
const output = `Return a JSON that follows the output schema provided. Never return multiple output schemas concatenated by a line break.\n<example>\n${JSON.stringify(example)}\n</example>`
return `<output>\n${output}\n</output>`
}

private addHubtypeTools(
tools: Tool<TPlugins, TExtraData>[],
sourceIds: string[]
): Tool<TPlugins, TExtraData>[] {
const hubtypeTools: Tool[] = [...mandatoryTools]
if (sourceIds.length > 0) {
hubtypeTools.push(retrieveKnowledge)
hubtypeTools.push(createRetrieveKnowledge(sourceIds))
}
return [...hubtypeTools, ...tools]
}
Expand Down
99 changes: 99 additions & 0 deletions packages/botonic-plugin-ai-agents/src/agent-manager-builder.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
import type { CampaignV2, ContactInfo, ResolvedPlugins } from '@botonic/core'
import {
Agent,
type AgentOutputType,
type Handoff,
type ModelSettings,
} from '@openai/agents'
import type { z } from 'zod'

import { createInputGuardrails } from './guardrails'
import type { GuardrailTrackingContext } from './guardrails/input'
import type { LLMConfig } from './llm-config'
import {
getOutputInstructions,
getOutputSchema,
type OutputSchema,
} from './structured-output'
import type { AIAgent, Context, GuardrailRule, Tool } from './types'

interface AIAgentManagerBuilderOptions<
TPlugins extends ResolvedPlugins = ResolvedPlugins,
TExtraData = unknown,
> {
name: string
instructions: string
tools: Tool<TPlugins, TExtraData>[]
campaignsContext?: CampaignV2[]
contactInfo: ContactInfo[]
llmConfig: LLMConfig
inputGuardrailRules: GuardrailRule[]
outputMessagesSchemas?: z.ZodObject[]
guardrailTrackingContext: GuardrailTrackingContext
}

export class AIAgentManagerBuilder<
TPlugins extends ResolvedPlugins = ResolvedPlugins,
TExtraData = unknown,
> {
private name: string
private instructions: string
private tools: Tool<TPlugins, TExtraData>[]
private campaignsContext?: CampaignV2[]
private contactInfo: ContactInfo[]
private llmConfig: LLMConfig
private handoffs: Handoff<
Context<TPlugins, TExtraData>,
AgentOutputType<typeof OutputSchema>
>[]
private inputGuardrailRules: GuardrailRule[]
private outputMessagesSchemas: z.ZodObject[]
private guardrailTrackingContext: GuardrailTrackingContext

constructor(options: AIAgentManagerBuilderOptions<TPlugins, TExtraData>) {
this.name = options.name
this.instructions = options.instructions
this.tools = options.tools
this.campaignsContext = options.campaignsContext
this.contactInfo = options.contactInfo
this.llmConfig = options.llmConfig
this.inputGuardrailRules = options.inputGuardrailRules
this.outputMessagesSchemas = options.outputMessagesSchemas || []
this.guardrailTrackingContext = options.guardrailTrackingContext
}

async build(): Promise<AIAgent<TPlugins, TExtraData>> {
const inputGuardrails = await createInputGuardrails(
this.inputGuardrailRules,
this.llmConfig,
this.guardrailTrackingContext
)
const modelSettings = this.getAgentModelSettings()
const resolvedModel = await this.llmConfig.getModel()

return new Agent<
Context<TPlugins, TExtraData>,
AgentOutputType<typeof OutputSchema>
>({
name: this.name,
model: resolvedModel,
modelSettings,
instructions: `${this.instructions}\n\n${getOutputInstructions()}`,
tools: this.tools,
outputType: getOutputSchema(this.outputMessagesSchemas),
inputGuardrails,
outputGuardrails: [],
})
}

private getAgentModelSettings(): ModelSettings {
const modelSettings: ModelSettings = { ...this.llmConfig.modelSettings }
if (this.llmConfig.modelSettings.reasoning) {
modelSettings.reasoning = { ...this.llmConfig.modelSettings.reasoning }
}
if (this.llmConfig.modelSettings.text) {
modelSettings.text = { ...this.llmConfig.modelSettings.text }
}
return modelSettings
}
}
Loading
Loading