diff --git a/fern/apis/api/openapi.json b/fern/apis/api/openapi.json index 7052ab8ac..7334311b2 100644 --- a/fern/apis/api/openapi.json +++ b/fern/apis/api/openapi.json @@ -854,6 +854,619 @@ "type": "string" } }, + { + "required": false, + "description": "These are the options for the assistant's transcriber.", + "oneOf": [ + { + "$ref": "#/components/schemas/AssemblyAITranscriber", + "title": "AssemblyAITranscriber" + }, + { + "$ref": "#/components/schemas/AzureSpeechTranscriber", + "title": "AzureSpeechTranscriber" + }, + { + "$ref": "#/components/schemas/CustomTranscriber", + "title": "CustomTranscriber" + }, + { + "$ref": "#/components/schemas/DeepgramTranscriber", + "title": "DeepgramTranscriber" + }, + { + "$ref": "#/components/schemas/ElevenLabsTranscriber", + "title": "ElevenLabsTranscriber" + }, + { + "$ref": "#/components/schemas/GladiaTranscriber", + "title": "GladiaTranscriber" + }, + { + "$ref": "#/components/schemas/GoogleTranscriber", + "title": "GoogleTranscriber" + }, + { + "$ref": "#/components/schemas/SpeechmaticsTranscriber", + "title": "SpeechmaticsTranscriber" + }, + { + "$ref": "#/components/schemas/TalkscriberTranscriber", + "title": "TalkscriberTranscriber" + }, + { + "$ref": "#/components/schemas/OpenAITranscriber", + "title": "OpenAITranscriber" + }, + { + "$ref": "#/components/schemas/CartesiaTranscriber", + "title": "CartesiaTranscriber" + } + ], + "name": "transcriber", + "in": "query", + "schema": {} + }, + { + "required": false, + "description": "These are the options for the assistant's LLM.", + "oneOf": [ + { + "$ref": "#/components/schemas/AnthropicModel", + "title": "Anthropic" + }, + { + "$ref": "#/components/schemas/AnyscaleModel", + "title": "Anyscale" + }, + { + "$ref": "#/components/schemas/CerebrasModel", + "title": "Cerebras" + }, + { + "$ref": "#/components/schemas/CustomLLMModel", + "title": "CustomLLM" + }, + { + "$ref": "#/components/schemas/DeepInfraModel", + "title": "DeepInfra" + }, + { + "$ref": "#/components/schemas/DeepSeekModel", + "title": "DeepSeek" + }, + { + "$ref": "#/components/schemas/GoogleModel", + "title": "Google" + }, + { + "$ref": "#/components/schemas/GroqModel", + "title": "Groq" + }, + { + "$ref": "#/components/schemas/InflectionAIModel", + "title": "InflectionAI" + }, + { + "$ref": "#/components/schemas/OpenAIModel", + "title": "OpenAI" + }, + { + "$ref": "#/components/schemas/OpenRouterModel", + "title": "OpenRouter" + }, + { + "$ref": "#/components/schemas/PerplexityAIModel", + "title": "PerplexityAI" + }, + { + "$ref": "#/components/schemas/TogetherAIModel", + "title": "Together" + }, + { + "$ref": "#/components/schemas/XaiModel", + "title": "XAI" + } + ], + "name": "model", + "in": "query", + "schema": {} + }, + { + "required": false, + "description": "These are the options for the assistant's voice.", + "oneOf": [ + { + "$ref": "#/components/schemas/AzureVoice", + "title": "AzureVoice" + }, + { + "$ref": "#/components/schemas/CartesiaVoice", + "title": "CartesiaVoice" + }, + { + "$ref": "#/components/schemas/CustomVoice", + "title": "CustomVoice" + }, + { + "$ref": "#/components/schemas/DeepgramVoice", + "title": "DeepgramVoice" + }, + { + "$ref": "#/components/schemas/ElevenLabsVoice", + "title": "ElevenLabsVoice" + }, + { + "$ref": "#/components/schemas/HumeVoice", + "title": "HumeVoice" + }, + { + "$ref": "#/components/schemas/LMNTVoice", + "title": "LMNTVoice" + }, + { + "$ref": "#/components/schemas/NeuphonicVoice", + "title": "NeuphonicVoice" + }, + { + "$ref": "#/components/schemas/OpenAIVoice", + "title": "OpenAIVoice" + }, + { + "$ref": "#/components/schemas/PlayHTVoice", + "title": "PlayHTVoice" + }, + { + "$ref": "#/components/schemas/RimeAIVoice", + "title": "RimeAIVoice" + }, + { + "$ref": "#/components/schemas/SmallestAIVoice", + "title": "SmallestAIVoice" + }, + { + "$ref": "#/components/schemas/TavusVoice", + "title": "TavusVoice" + }, + { + "$ref": "#/components/schemas/VapiVoice", + "title": "VapiVoice" + }, + { + "$ref": "#/components/schemas/SesameVoice", + "title": "SesameVoice" + }, + { + "$ref": "#/components/schemas/InworldVoice", + "title": "InworldVoice" + }, + { + "$ref": "#/components/schemas/MinimaxVoice", + "title": "MinimaxVoice" + } + ], + "name": "voice", + "in": "query", + "schema": {} + }, + { + "required": false, + "description": "This is the first message that the assistant will say. This can also be a URL to a containerized audio file (mp3, wav, etc.).\n\nIf unspecified, assistant will wait for user to speak and use the model to respond once they speak.", + "name": "firstMessage", + "in": "query", + "schema": { + "example": "Hello! How can I help you today?", + "type": "string" + } + }, + { + "required": false, + "name": "firstMessageInterruptionsEnabled", + "in": "query", + "schema": { + "default": false, + "type": "boolean" + } + }, + { + "required": false, + "description": "This is the mode for the first message. Default is 'assistant-speaks-first'.\n\nUse:\n- 'assistant-speaks-first' to have the assistant speak first.\n- 'assistant-waits-for-user' to have the assistant wait for the user to speak first.\n- 'assistant-speaks-first-with-model-generated-message' to have the assistant speak first with a message generated by the model based on the conversation state. (`assistant.model.messages` at call start, `call.messages` at squad transfer points).\n\n@default 'assistant-speaks-first'", + "name": "firstMessageMode", + "in": "query", + "schema": { + "example": "assistant-speaks-first", + "enum": [ + "assistant-speaks-first", + "assistant-speaks-first-with-model-generated-message", + "assistant-waits-for-user" + ], + "type": "string" + } + }, + { + "required": false, + "description": "These are the settings to configure or disable voicemail detection. Alternatively, voicemail detection can be configured using the model.tools=[VoicemailTool].\nThis uses Twilio's built-in detection while the VoicemailTool relies on the model to detect if a voicemail was reached.\nYou can use neither of them, one of them, or both of them. By default, Twilio built-in detection is enabled while VoicemailTool is not.", + "oneOf": [ + { + "$ref": "#/components/schemas/GoogleVoicemailDetectionPlan", + "title": "Google" + }, + { + "$ref": "#/components/schemas/OpenAIVoicemailDetectionPlan", + "title": "OpenAI" + }, + { + "$ref": "#/components/schemas/TwilioVoicemailDetectionPlan", + "title": "Twilio" + }, + { + "$ref": "#/components/schemas/VapiVoicemailDetectionPlan", + "title": "Vapi" + } + ], + "name": "voicemailDetection", + "in": "query", + "schema": {} + }, + { + "required": false, + "description": "These are the messages that will be sent to your Client SDKs. Default is conversation-update,function-call,hang,model-output,speech-update,status-update,transfer-update,transcript,tool-calls,user-interrupted,voice-input,workflow.node.started. You can check the shape of the messages in ClientMessage schema.", + "name": "clientMessages", + "in": "query", + "schema": { + "example": [ + "conversation-update", + "function-call", + "hang", + "model-output", + "speech-update", + "status-update", + "transfer-update", + "transcript", + "tool-calls", + "user-interrupted", + "voice-input", + "workflow.node.started" + ], + "type": "array", + "items": { + "type": "string", + "enum": [ + "conversation-update", + "function-call", + "function-call-result", + "hang", + "language-changed", + "metadata", + "model-output", + "speech-update", + "status-update", + "transcript", + "tool-calls", + "tool-calls-result", + "tool.completed", + "transfer-update", + "user-interrupted", + "voice-input", + "workflow.node.started" + ] + } + } + }, + { + "required": false, + "description": "These are the messages that will be sent to your Server URL. Default is conversation-update,end-of-call-report,function-call,hang,speech-update,status-update,tool-calls,transfer-destination-request,handoff-destination-request,user-interrupted. You can check the shape of the messages in ServerMessage schema.", + "name": "serverMessages", + "in": "query", + "schema": { + "example": [ + "conversation-update", + "end-of-call-report", + "function-call", + "hang", + "speech-update", + "status-update", + "tool-calls", + "transfer-destination-request", + "handoff-destination-request", + "user-interrupted" + ], + "type": "array", + "items": { + "type": "string", + "enum": [ + "conversation-update", + "end-of-call-report", + "function-call", + "hang", + "language-changed", + "language-change-detected", + "model-output", + "phone-call-control", + "speech-update", + "status-update", + "transcript", + "transcript[transcriptType=\"final\"]", + "tool-calls", + "transfer-destination-request", + "handoff-destination-request", + "transfer-update", + "user-interrupted", + "voice-input", + "chat.created", + "chat.deleted", + "session.created", + "session.updated", + "session.deleted", + "call.deleted", + "call.delete.failed" + ] + } + } + }, + { + "required": false, + "description": "This is the maximum number of seconds that the call will last. When the call reaches this duration, it will be ended.\n\n@default 600 (10 minutes)", + "name": "maxDurationSeconds", + "in": "query", + "schema": { + "minimum": 10, + "maximum": 43200, + "example": 600, + "type": "number" + } + }, + { + "required": false, + "description": "This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'.\nYou can also provide a custom sound by providing a URL to an audio file.", + "oneOf": [ + { + "type": "enum", + "enum": [ + "off", + "office" + ], + "example": "office" + }, + { + "type": "string", + "format": "uri", + "example": "https://www.soundjay.com/ambient/sounds/people-in-lounge-1.mp3" + } + ], + "name": "backgroundSound", + "in": "query", + "schema": {} + }, + { + "required": false, + "description": "This determines whether the model's output is used in conversation history rather than the transcription of assistant's speech.\n\nDefault `false` while in beta.\n\n@default false", + "name": "modelOutputInMessagesEnabled", + "in": "query", + "schema": { + "example": false, + "type": "boolean" + } + }, + { + "required": false, + "description": "These are the configurations to be passed to the transport providers of assistant's calls, like Twilio. You can store multiple configurations for different transport providers. For a call, only the configuration matching the call transport provider is used.", + "name": "transportConfigurations", + "in": "query", + "schema": { + "type": "array" + } + }, + { + "name": "observabilityPlan", + "required": false, + "description": "This is the plan for observability of assistant's calls.\n\nCurrently, only Langfuse is supported.", + "oneOf": [ + { + "$ref": "#/components/schemas/LangfuseObservabilityPlan", + "title": "Langfuse" + } + ], + "allOf": [ + { + "$ref": "#/components/schemas/LangfuseObservabilityPlan" + } + ], + "in": "query", + "schema": {} + }, + { + "required": false, + "description": "These are dynamic credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials.", + "name": "credentials", + "in": "query", + "schema": { + "type": "array" + } + }, + { + "required": false, + "description": "This is a set of actions that will be performed on certain events.", + "name": "hooks", + "in": "query", + "schema": { + "type": "array" + } + }, + { + "required": false, + "description": "These are values that will be used to replace the template variables in the assistant messages and other text-based fields.\nThis uses LiquidJS syntax. https://liquidjs.com/tutorials/intro-to-liquid.html\n\nSo for example, `{{ name }}` will be replaced with the value of `name` in `variableValues`.\n`{{\"now\" | date: \"%b %d, %Y, %I:%M %p\", \"America/New_York\"}}` will be replaced with the current date and time in New York.\n Some VAPI reserved defaults:\n - *customer* - the customer object", + "name": "variableValues", + "in": "query", + "schema": { + "type": "object" + } + }, + { + "required": false, + "description": "This is the name of the assistant.\n\nThis is required when you want to transfer between assistants in a call.", + "name": "name", + "in": "query", + "schema": { + "maxLength": 40, + "type": "string" + } + }, + { + "required": false, + "description": "This is the message that the assistant will say if the call is forwarded to voicemail.\n\nIf unspecified, it will hang up.", + "name": "voicemailMessage", + "in": "query", + "schema": { + "maxLength": 1000, + "type": "string" + } + }, + { + "required": false, + "description": "This is the message that the assistant will say if it ends the call.\n\nIf unspecified, it will hang up without saying anything.", + "name": "endCallMessage", + "in": "query", + "schema": { + "maxLength": 1000, + "type": "string" + } + }, + { + "required": false, + "description": "This list contains phrases that, if spoken by the assistant, will trigger the call to be hung up. Case insensitive.", + "name": "endCallPhrases", + "in": "query", + "schema": { + "type": "array", + "items": { + "type": "string", + "maxLength": 140, + "minLength": 2 + } + } + }, + { + "name": "compliancePlan", + "required": false, + "in": "query", + "schema": { + "$ref": "#/components/schemas/CompliancePlan" + } + }, + { + "required": false, + "description": "This is for metadata you want to store on the assistant.", + "name": "metadata", + "in": "query", + "schema": { + "type": "object" + } + }, + { + "name": "backgroundSpeechDenoisingPlan", + "required": false, + "description": "This enables filtering of noise and background speech while the user is talking.\n\nFeatures:\n- Smart denoising using Krisp\n- Fourier denoising\n\nSmart denoising can be combined with or used independently of Fourier denoising.\n\nOrder of precedence:\n- Smart denoising\n- Fourier denoising", + "allOf": [ + { + "$ref": "#/components/schemas/BackgroundSpeechDenoisingPlan" + } + ], + "in": "query", + "schema": {} + }, + { + "name": "analysisPlan", + "required": false, + "description": "This is the plan for analysis of assistant's calls. Stored in `call.analysis`.", + "allOf": [ + { + "$ref": "#/components/schemas/AnalysisPlan" + } + ], + "in": "query", + "schema": {} + }, + { + "name": "artifactPlan", + "required": false, + "description": "This is the plan for artifacts generated during assistant's calls. Stored in `call.artifact`.", + "allOf": [ + { + "$ref": "#/components/schemas/ArtifactPlan" + } + ], + "in": "query", + "schema": {} + }, + { + "name": "startSpeakingPlan", + "required": false, + "description": "This is the plan for when the assistant should start talking.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to start talking after the customer is done speaking.\n- The assistant is too fast to start talking after the customer is done speaking.\n- The assistant is so fast that it's actually interrupting the customer.", + "allOf": [ + { + "$ref": "#/components/schemas/StartSpeakingPlan" + } + ], + "in": "query", + "schema": {} + }, + { + "name": "stopSpeakingPlan", + "required": false, + "description": "This is the plan for when assistant should stop talking on customer interruption.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to recognize customer's interruption.\n- The assistant is too fast to recognize customer's interruption.\n- The assistant is getting interrupted by phrases that are just acknowledgments.\n- The assistant is getting interrupted by background noises.\n- The assistant is not properly stopping -- it starts talking right after getting interrupted.", + "allOf": [ + { + "$ref": "#/components/schemas/StopSpeakingPlan" + } + ], + "in": "query", + "schema": {} + }, + { + "name": "monitorPlan", + "required": false, + "description": "This is the plan for real-time monitoring of the assistant's calls.\n\nUsage:\n- To enable live listening of the assistant's calls, set `monitorPlan.listenEnabled` to `true`.\n- To enable live control of the assistant's calls, set `monitorPlan.controlEnabled` to `true`.", + "allOf": [ + { + "$ref": "#/components/schemas/MonitorPlan" + } + ], + "in": "query", + "schema": {} + }, + { + "required": false, + "description": "These are the credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can provide a subset using this.", + "name": "credentialIds", + "in": "query", + "schema": { + "type": "array", + "items": { + "type": "string" + } + } + }, + { + "name": "server", + "required": false, + "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server.url\n2. phoneNumber.serverUrl\n3. org.serverUrl", + "allOf": [ + { + "$ref": "#/components/schemas/Server" + } + ], + "in": "query", + "schema": {} + }, + { + "name": "keypadInputPlan", + "required": false, + "in": "query", + "schema": { + "$ref": "#/components/schemas/KeypadInputPlan" + } + }, { "name": "phoneNumberId", "required": false, @@ -863,6 +1476,89 @@ "type": "string" } }, + { + "required": false, + "description": "This is the flag to toggle the E164 check for the `number` field. This is an advanced property which should be used if you know your use case requires it.\n\nUse cases:\n- `false`: To allow non-E164 numbers like `+001234567890`, `1234`, or `abc`. This is useful for dialing out to non-E164 numbers on your SIP trunks.\n- `true` (default): To allow only E164 numbers like `+14155551234`. This is standard for PSTN calls.\n\nIf `false`, the `number` is still required to only contain alphanumeric characters (regex: `/^\\+?[a-zA-Z0-9]+$/`).\n\n@default true (E164 check is enabled)", + "name": "numberE164CheckEnabled", + "in": "query", + "schema": { + "default": true, + "type": "boolean" + } + }, + { + "required": false, + "description": "This is the extension that will be dialed after the call is answered.", + "name": "extension", + "in": "query", + "schema": { + "maxLength": 10, + "example": null, + "type": "string" + } + }, + { + "name": "assistantOverrides", + "required": false, + "description": "These are the overrides for the assistant's settings and template variables specific to this customer.\nThis allows customization of the assistant's behavior for individual customers in batch calls.", + "allOf": [ + { + "$ref": "#/components/schemas/AssistantOverrides" + } + ], + "in": "query", + "schema": {} + }, + { + "required": false, + "description": "This is the number of the customer.", + "name": "number", + "in": "query", + "schema": { + "minLength": 3, + "maxLength": 40, + "type": "string" + } + }, + { + "required": false, + "description": "This is the SIP URI of the customer.", + "name": "sipUri", + "in": "query", + "schema": { + "type": "string" + } + }, + { + "required": false, + "description": "This is the name of the customer. This is just for your own reference.\n\nFor SIP inbound calls, this is extracted from the `From` SIP header with format `\"Display Name\" `.", + "name": "name", + "in": "query", + "schema": { + "maxLength": 40, + "type": "string" + } + }, + { + "required": false, + "description": "This is the email of the customer.", + "name": "email", + "in": "query", + "schema": { + "maxLength": 40, + "type": "string" + } + }, + { + "required": false, + "description": "This is the external ID of the customer.", + "name": "externalId", + "in": "query", + "schema": { + "maxLength": 40, + "type": "string" + } + }, { "required": false, "description": "Filter calls by structured output values. Use structured output ID as key and filter operators as values.",