diff --git a/plugins/conversation-insights/plugin.definition.ts b/plugins/conversation-insights/plugin.definition.ts index 7bec41af788..d8dd7002f1d 100644 --- a/plugins/conversation-insights/plugin.definition.ts +++ b/plugins/conversation-insights/plugin.definition.ts @@ -3,7 +3,7 @@ import llm from './bp_modules/llm' export default new PluginDefinition({ name: 'conversation-insights', - version: '0.2.1', + version: '0.2.2', configuration: { schema: z.object({ modelId: z.string() }), }, @@ -22,6 +22,10 @@ export default new PluginDefinition({ title: 'Participant count', description: 'The count of users having participated in the conversation, including the bot. Type: int', }, + sentiment: { + title: 'Sentiment', + description: 'The sentiment that best describes the conversation. Type: enum Sentiments', + }, }, }, events: { diff --git a/plugins/conversation-insights/src/index.ts b/plugins/conversation-insights/src/index.ts index 172804b2661..e7fdcf99616 100644 --- a/plugins/conversation-insights/src/index.ts +++ b/plugins/conversation-insights/src/index.ts @@ -1,6 +1,6 @@ import * as sdk from '@botpress/sdk' -import * as summaryUpdater from './summaryUpdater' import * as updateScheduler from './summaryUpdateScheduler' +import * as summaryUpdater from './tagsUpdater' import * as types from './types' import * as bp from '.botpress' @@ -15,7 +15,7 @@ plugin.on.afterIncomingMessage('*', async (props) => { const { message_count } = await _onNewMessage({ ...props, conversation }) if (updateScheduler.isTimeToUpdate(message_count)) { - props.client.createEvent({ payload: {}, type: 'updateSummary', conversationId: props.data.conversationId }) + await props.events.updateSummary.withConversationId(props.data.conversationId).emit({}) } return undefined @@ -52,8 +52,10 @@ const _onNewMessage = async ( } plugin.on.event('updateSummary', async (props) => { - const messages = await props.client.listMessages({ conversationId: props.event.conversationId }) - const newMessages: string[] = messages.messages.map((message) => message.payload.text) + const firstMessagePage = await props.client + .listMessages({ conversationId: props.event.conversationId }) + .then((res) => res.messages) + if (!props.event.conversationId) { throw new sdk.RuntimeError(`The conversationId cannot be null when calling the event '${props.event.type}'`) } @@ -62,7 +64,7 @@ plugin.on.event('updateSummary', async (props) => { await summaryUpdater.updateTitleAndSummary({ ...props, conversation: conversation.conversation, - messages: newMessages, + messages: firstMessagePage, }) }) diff --git a/plugins/conversation-insights/src/generate-content.ts b/plugins/conversation-insights/src/prompt/parse-content.ts similarity index 83% rename from plugins/conversation-insights/src/generate-content.ts rename to plugins/conversation-insights/src/prompt/parse-content.ts index 91a2d1eae66..bef79cea140 100644 --- a/plugins/conversation-insights/src/generate-content.ts +++ b/plugins/conversation-insights/src/prompt/parse-content.ts @@ -1,7 +1,6 @@ import * as sdk from '@botpress/sdk' import JSON5 from 'json5' import { jsonrepair } from 'jsonrepair' -import { OutputFormat } from './summary-prompt' import * as bp from '.botpress' export type LLMInput = bp.interfaces.llm.actions.generateContent.input.Input @@ -10,9 +9,9 @@ export type LLMOutput = bp.interfaces.llm.actions.generateContent.output.Output export type LLMMessage = LLMInput['messages'][number] export type LLMChoice = LLMOutput['choices'][number] -type PredictResponse = { +export type PredictResponse = { success: boolean - json: OutputFormat + json: T } const tryParseJson = (str: string) => { @@ -23,7 +22,7 @@ const tryParseJson = (str: string) => { } } -export const parseLLMOutput = (output: LLMOutput): PredictResponse => { +export const parseLLMOutput = (output: LLMOutput): PredictResponse => { const mappedChoices: LLMChoice['content'][] = output.choices.map((choice) => choice.content) if (!mappedChoices[0]) throw new sdk.RuntimeError('Could not parse LLM output') const firstChoice = mappedChoices[0] diff --git a/plugins/conversation-insights/src/prompt/prompt.ts b/plugins/conversation-insights/src/prompt/prompt.ts new file mode 100644 index 00000000000..6f4c39c525c --- /dev/null +++ b/plugins/conversation-insights/src/prompt/prompt.ts @@ -0,0 +1,47 @@ +import { z } from '@botpress/sdk' +import { LLMInput } from './parse-content' +import * as bp from '.botpress' + +export type SentimentAnalysisOutput = z.infer +export const SentimentAnalysisOutput = z.object({ + sentiment: z.string().describe('The sentiment that best describes the conversation'), +}) + +export type InputFormat = z.infer +export const InputFormat = z.array(z.string()) + +const formatMessages = ( + messages: PromptArgs['messages'], + context: PromptArgs['context'], + botId: string +): LLMInput['messages'] => { + const contextMessage: LLMInput['messages'][0] = { + role: 'assistant', + content: `Context: ${JSON.stringify(context)}`, + } + + const messagesWithUser: LLMInput['messages'] = [] + for (const message of messages) { + if (message.type !== 'text') continue // only text is supported to analyse messages + messagesWithUser.push({ + role: message.userId === botId ? 'assistant' : 'user', + content: message.payload.text, + }) + } + return [contextMessage, ...messagesWithUser.reverse()] +} + +export type PromptArgs = { + systemPrompt: string + messages: bp.MessageHandlerProps['message'][] + model: { id: string } + context: object + botId: string +} +export const createPrompt = (args: PromptArgs): LLMInput => ({ + responseFormat: 'json_object', + temperature: 0, + systemPrompt: args.systemPrompt.trim(), + messages: formatMessages(args.messages, args.context, args.botId), + model: args.model, +}) diff --git a/plugins/conversation-insights/src/prompt/sentiment-prompt.ts b/plugins/conversation-insights/src/prompt/sentiment-prompt.ts new file mode 100644 index 00000000000..67c69469401 --- /dev/null +++ b/plugins/conversation-insights/src/prompt/sentiment-prompt.ts @@ -0,0 +1,89 @@ +import { z } from '@botpress/sdk' +import { LLMInput } from './parse-content' +import * as prompt from './prompt' + +export type SentimentAnalysisOutput = z.infer +export const SentimentAnalysisOutput = z.object({ + sentiment: z + .enum(['very_negative', 'negative', 'neutral', 'positive', 'very_positive']) + .describe('The sentiment that best describes the conversation'), +}) + +export const SENTIMENT_OPTIONS = SentimentAnalysisOutput.shape.sentiment.options.map((opt) => ` "${opt}" `).join('|') + +export type PromptArgs = Omit +export const createPrompt = (args: PromptArgs): LLMInput => + prompt.createPrompt({ + ...args, + systemPrompt: ` +You are a conversation analyser. +You will be given: +- A previous sentiment +- An array of messages + +Your task is to reply the sentiment that best describes the overall conversation. + +Return your response only in valid JSON using the following type: + +\`\`\`json +{ + "sentiment": ${SENTIMENT_OPTIONS}, // The latest sentiment of the conversation +} +\`\`\` + +Instructions: + +- Consider the previous sentiment when choosing the new one — keep it if still relevant, or update it if needed. +- Focus on the most recent sentiment of the conversation. +- Only use the available sentiments +- Do not include extra commentary, formatting, or explanation outside the JSON output. +- The messages are in order, which means the most recent ones are at the end of the list. +- Keep in mind that your own messages are included in the messages, but have the 'assistant' role + +The available sentiments are: ${SENTIMENT_OPTIONS} + +Examples: + +Input: + +\`\`\`json +{ + "messages": [ + "Context: {'previousSentiment': 'negative'}", + "User: I hate your service. I want to unsubscribe right now!", + "Bot: I understand your frustation, but there is nothing we can do", + "User: I want a refund.", + ] +} +\`\`\` + +Output: + +\`\`\`json +{ + "sentiment": "very_negative" +} +\`\`\` + +Input: + +\`\`\`json +{ + "messages": [ + "previousSentiment: neutral", + "User: Hi, how could I get a premium subscription?", + "Bot: You can get it by clicking on the link I just sent you.", + "User: Thank you so much, your help has changed my life", + ] +} +\`\`\` + +Output: + +\`\`\`json +{ + "sentiment": "very_positive" +} +\`\`\` +`, + }) diff --git a/plugins/conversation-insights/src/summary-prompt.ts b/plugins/conversation-insights/src/prompt/summary-prompt.ts similarity index 51% rename from plugins/conversation-insights/src/summary-prompt.ts rename to plugins/conversation-insights/src/prompt/summary-prompt.ts index 2f60caa3c02..d72d91bfe72 100644 --- a/plugins/conversation-insights/src/summary-prompt.ts +++ b/plugins/conversation-insights/src/prompt/summary-prompt.ts @@ -1,5 +1,6 @@ import { z } from '@botpress/sdk' -import { LLMInput } from './generate-content' +import { LLMInput } from './parse-content' +import * as prompt from './prompt' export type OutputFormat = z.infer export const OutputFormat = z.object({ @@ -7,36 +8,11 @@ export const OutputFormat = z.object({ summary: z.string().describe('A short summary of the conversation'), }) -export type InputFormat = z.infer -export const InputFormat = z.array(z.string()) - -const formatMessages = ( - messages: string[], - context: PromptArgs['context'] -): { role: 'user' | 'assistant'; content: string } => { - return { - role: 'user', - content: JSON.stringify( - { - previousTitle: context.previousTitle, - previousSummary: context.previousSummary, - messages: messages.reverse(), - }, - null, - 2 - ), - } -} - -export type PromptArgs = { - messages: string[] - model: { id: string } - context: { previousSummary?: string; previousTitle?: string } -} -export const createPrompt = (args: PromptArgs): LLMInput => ({ - responseFormat: 'json_object', - temperature: 0, - systemPrompt: ` +export type PromptArgs = Omit +export const createPrompt = (args: PromptArgs): LLMInput => + prompt.createPrompt({ + ...args, + systemPrompt: ` You are a conversation summarizer. You will be given: - A previous title and summary @@ -68,13 +44,12 @@ Input: \`\`\`json { - "previousTitle": "Used cars", - "previousSummary": "The user is talking abous a used Toyota Matrix", "messages": [ - "What mileage should I expect from a car that was made two years ago?", - "What price should I expect from a car manufactured in 2011?", - "What should I look out for when buying a secondhand Toyota Matrix?", - "I am looking to buy a used car, what would you recommend?", + "Context: {'previousTitle': 'Used cars', 'previousSummary': 'The user is talking abous a used Toyota Matrix'}", + "User: What mileage should I expect from a car that was made two years ago?", + "User: What price should I expect from a car manufactured in 2011?", + "User: What should I look out for when buying a secondhand Toyota Matrix?", + "User: I am looking to buy a used car, what would you recommend?", ] } \`\`\` @@ -87,7 +62,5 @@ Output: "summary": "The user is seeking advice on purchasing a used car." } \`\`\` -`.trim(), - messages: [formatMessages(args.messages, args.context)], - model: args.model, -}) +`, + }) diff --git a/plugins/conversation-insights/src/summaryUpdater.ts b/plugins/conversation-insights/src/summaryUpdater.ts deleted file mode 100644 index 84963078a19..00000000000 --- a/plugins/conversation-insights/src/summaryUpdater.ts +++ /dev/null @@ -1,44 +0,0 @@ -import * as gen from './generate-content' -import * as summarizer from './summary-prompt' -import * as types from './types' -import * as bp from '.botpress' - -type CommonProps = types.CommonProps - -type UpdateTitleAndSummaryProps = CommonProps & { - conversation: bp.MessageHandlerProps['conversation'] - messages: string[] -} -export const updateTitleAndSummary = async (props: UpdateTitleAndSummaryProps) => { - const prompt = summarizer.createPrompt({ - messages: props.messages, - model: { id: props.configuration.modelId }, - context: { previousTitle: props.conversation.tags.title, previousSummary: props.conversation.tags.summary }, - }) - - let attemptCount = 0 - const maxRetries = 3 - - let llmOutput = await props.actions.llm.generateContent(prompt) - let parsed = gen.parseLLMOutput(llmOutput) - - while (!parsed.success && attemptCount < maxRetries) { - props.logger.debug(`Attempt ${attemptCount + 1}: The LLM output did not respect the schema.`, parsed.json) - llmOutput = await props.actions.llm.generateContent(prompt) - parsed = gen.parseLLMOutput(llmOutput) - attemptCount++ - } - - if (!parsed.success) { - props.logger.debug(`The LLM output did not respect the schema after ${attemptCount} retries.`, parsed.json) - return - } - - await props.client.updateConversation({ - id: props.conversation.id, - tags: { - title: parsed.json.title, - summary: parsed.json.summary, - }, - }) -} diff --git a/plugins/conversation-insights/src/tagsUpdater.ts b/plugins/conversation-insights/src/tagsUpdater.ts new file mode 100644 index 00000000000..55a65360382 --- /dev/null +++ b/plugins/conversation-insights/src/tagsUpdater.ts @@ -0,0 +1,73 @@ +import * as gen from './prompt/parse-content' +import * as sentiment from './prompt/sentiment-prompt' +import * as summarizer from './prompt/summary-prompt' +import * as types from './types' +import * as bp from '.botpress' + +type CommonProps = types.CommonProps + +type UpdateTitleAndSummaryProps = CommonProps & { + conversation: bp.MessageHandlerProps['conversation'] + messages: bp.MessageHandlerProps['message'][] +} +export const updateTitleAndSummary = async (props: UpdateTitleAndSummaryProps) => { + const summaryPrompt = summarizer.createPrompt({ + messages: props.messages, + botId: props.ctx.botId, + model: { id: props.configuration.modelId }, + context: { previousTitle: props.conversation.tags.title, previousSummary: props.conversation.tags.summary }, + }) + + const parsedSummary = await _generateContentWithRetries({ + actions: props.actions, + logger: props.logger, + prompt: summaryPrompt, + }) + + const sentimentPrompt = sentiment.createPrompt({ + messages: props.messages, + botId: props.ctx.botId, + context: { previousSentiment: props.conversation.tags.sentiment }, + model: { id: props.configuration.modelId }, + }) + + const parsedSentiment = await _generateContentWithRetries({ + actions: props.actions, + logger: props.logger, + prompt: sentimentPrompt, + }) + + await props.client.updateConversation({ + id: props.conversation.id, + tags: { + title: parsedSummary.json.title, + summary: parsedSummary.json.summary, + sentiment: parsedSentiment.json.sentiment, + }, + }) +} + +type ParsePromptProps = { + actions: UpdateTitleAndSummaryProps['actions'] + logger: UpdateTitleAndSummaryProps['logger'] + prompt: gen.LLMInput +} +const _generateContentWithRetries = async (props: ParsePromptProps): Promise> => { + let attemptCount = 0 + const maxRetries = 3 + + let llmOutput = await props.actions.llm.generateContent(props.prompt) + let parsed = gen.parseLLMOutput(llmOutput) + + while (!parsed.success && attemptCount < maxRetries) { + props.logger.debug(`Attempt ${attemptCount + 1}: The LLM output did not respect the schema.`, parsed.json) + llmOutput = await props.actions.llm.generateContent(props.prompt) + parsed = gen.parseLLMOutput(llmOutput) + attemptCount++ + } + + if (!parsed.success) { + props.logger.debug(`The LLM output did not respect the schema after ${attemptCount} retries.`, parsed.json) + } + return parsed +}