From d229cbfae29c40d0125dffbe5769419de0078448 Mon Sep 17 00:00:00 2001 From: github-actions Date: Thu, 20 Nov 2025 00:27:24 +0000 Subject: [PATCH] Update API specifications with fern api update --- fern/apis/api/openapi.json | 1440 ++++++++++++++++++------------------ 1 file changed, 724 insertions(+), 716 deletions(-) diff --git a/fern/apis/api/openapi.json b/fern/apis/api/openapi.json index f7f812482..a9207d5a9 100644 --- a/fern/apis/api/openapi.json +++ b/fern/apis/api/openapi.json @@ -11470,6 +11470,310 @@ "model" ] }, + "RegexOption": { + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "This is the type of the regex option. Options are:\n- `ignore-case`: Ignores the case of the text being matched. Add\n- `whole-word`: Matches whole words only.\n- `multi-line`: Matches across multiple lines.", + "enum": [ + "ignore-case", + "whole-word", + "multi-line" + ] + }, + "enabled": { + "type": "boolean", + "description": "This is whether to enable the option.\n\n@default false" + } + }, + "required": [ + "type", + "enabled" + ] + }, + "AssistantCustomEndpointingRule": { + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "This endpointing rule is based on the last assistant message before customer started speaking.\n\nFlow:\n- Assistant speaks\n- Customer starts speaking\n- Customer transcription comes in\n- This rule is evaluated on the last assistant message\n- If a match is found based on `regex`, the endpointing timeout is set to `timeoutSeconds`\n\nUsage:\n- If you have yes/no questions in your use case like \"are you interested in a loan?\", you can set a shorter timeout.\n- If you have questions where the customer may pause to look up information like \"what's my account number?\", you can set a longer timeout.", + "enum": [ + "assistant" + ] + }, + "regex": { + "type": "string", + "description": "This is the regex pattern to match.\n\nNote:\n- This works by using the `RegExp.test` method in Node.JS. Eg. `/hello/.test(\"hello there\")` will return `true`.\n\nHot tip:\n- In JavaScript, escape `\\` when sending the regex pattern. Eg. `\"hello\\sthere\"` will be sent over the wire as `\"hellosthere\"`. Send `\"hello\\\\sthere\"` instead.\n- `RegExp.test` does substring matching, so `/cat/.test(\"I love cats\")` will return `true`. To do full string matching, send \"^cat$\"." + }, + "regexOptions": { + "description": "These are the options for the regex match. Defaults to all disabled.\n\n@default []", + "type": "array", + "items": { + "$ref": "#/components/schemas/RegexOption" + } + }, + "timeoutSeconds": { + "type": "number", + "description": "This is the endpointing timeout in seconds, if the rule is matched.", + "minimum": 0, + "maximum": 15 + } + }, + "required": [ + "type", + "regex", + "timeoutSeconds" + ] + }, + "CustomerCustomEndpointingRule": { + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "This endpointing rule is based on current customer message as they are speaking.\n\nFlow:\n- Assistant speaks\n- Customer starts speaking\n- Customer transcription comes in\n- This rule is evaluated on the current customer transcription\n- If a match is found based on `regex`, the endpointing timeout is set to `timeoutSeconds`\n\nUsage:\n- If you want to wait longer while customer is speaking numbers, you can set a longer timeout.", + "enum": [ + "customer" + ] + }, + "regex": { + "type": "string", + "description": "This is the regex pattern to match.\n\nNote:\n- This works by using the `RegExp.test` method in Node.JS. Eg. `/hello/.test(\"hello there\")` will return `true`.\n\nHot tip:\n- In JavaScript, escape `\\` when sending the regex pattern. Eg. `\"hello\\sthere\"` will be sent over the wire as `\"hellosthere\"`. Send `\"hello\\\\sthere\"` instead.\n- `RegExp.test` does substring matching, so `/cat/.test(\"I love cats\")` will return `true`. To do full string matching, send \"^cat$\"." + }, + "regexOptions": { + "description": "These are the options for the regex match. Defaults to all disabled.\n\n@default []", + "type": "array", + "items": { + "$ref": "#/components/schemas/RegexOption" + } + }, + "timeoutSeconds": { + "type": "number", + "description": "This is the endpointing timeout in seconds, if the rule is matched.", + "minimum": 0, + "maximum": 15 + } + }, + "required": [ + "type", + "regex", + "timeoutSeconds" + ] + }, + "BothCustomEndpointingRule": { + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "This endpointing rule is based on both the last assistant message and the current customer message as they are speaking.\n\nFlow:\n- Assistant speaks\n- Customer starts speaking\n- Customer transcription comes in\n- This rule is evaluated on the last assistant message and the current customer transcription\n- If assistant message matches `assistantRegex` AND customer message matches `customerRegex`, the endpointing timeout is set to `timeoutSeconds`\n\nUsage:\n- If you want to wait longer while customer is speaking numbers, you can set a longer timeout.", + "enum": [ + "both" + ] + }, + "assistantRegex": { + "type": "string", + "description": "This is the regex pattern to match the assistant's message.\n\nNote:\n- This works by using the `RegExp.test` method in Node.JS. Eg. `/hello/.test(\"hello there\")` will return `true`.\n\nHot tip:\n- In JavaScript, escape `\\` when sending the regex pattern. Eg. `\"hello\\sthere\"` will be sent over the wire as `\"hellosthere\"`. Send `\"hello\\\\sthere\"` instead.\n- `RegExp.test` does substring matching, so `/cat/.test(\"I love cats\")` will return `true`. To do full string matching, send \"^cat$\"." + }, + "assistantRegexOptions": { + "description": "These are the options for the assistant's message regex match. Defaults to all disabled.\n\n@default []", + "type": "array", + "items": { + "$ref": "#/components/schemas/RegexOption" + } + }, + "customerRegex": { + "type": "string" + }, + "customerRegexOptions": { + "description": "These are the options for the customer's message regex match. Defaults to all disabled.\n\n@default []", + "type": "array", + "items": { + "$ref": "#/components/schemas/RegexOption" + } + }, + "timeoutSeconds": { + "type": "number", + "description": "This is the endpointing timeout in seconds, if the rule is matched.", + "minimum": 0, + "maximum": 15 + } + }, + "required": [ + "type", + "assistantRegex", + "customerRegex", + "timeoutSeconds" + ] + }, + "VapiSmartEndpointingPlan": { + "type": "object", + "properties": { + "provider": { + "type": "string", + "description": "This is the provider for the smart endpointing plan.", + "enum": [ + "vapi", + "livekit", + "custom-endpointing-model" + ], + "example": "vapi" + } + }, + "required": [ + "provider" + ] + }, + "LivekitSmartEndpointingPlan": { + "type": "object", + "properties": { + "provider": { + "type": "string", + "description": "This is the provider for the smart endpointing plan.", + "enum": [ + "vapi", + "livekit", + "custom-endpointing-model" + ], + "example": "livekit" + }, + "waitFunction": { + "type": "string", + "description": "This expression describes how long the bot will wait to start speaking based on the likelihood that the user has reached an endpoint.\n\nThis is a millisecond valued function. It maps probabilities (real numbers on [0,1]) to milliseconds that the bot should wait before speaking ([0, \\infty]). Any negative values that are returned are set to zero (the bot can't start talking in the past).\n\nA probability of zero represents very high confidence that the caller has stopped speaking, and would like the bot to speak to them. A probability of one represents very high confidence that the caller is still speaking.\n\nUnder the hood, this is parsed into a mathjs expression. Whatever you use to write your expression needs to be valid with respect to mathjs\n\n@default \"20 + 500 * sqrt(x) + 2500 * x^3\"", + "examples": [ + "70 + 4000 * x", + "200 + 8000 * x", + "4000 * (1 - cos(pi * x))" + ] + } + }, + "required": [ + "provider" + ] + }, + "CustomEndpointingModelSmartEndpointingPlan": { + "type": "object", + "properties": { + "provider": { + "type": "string", + "description": "This is the provider for the smart endpointing plan. Use `custom-endpointing-model` for custom endpointing providers that are not natively supported.", + "enum": [ + "vapi", + "livekit", + "custom-endpointing-model" + ], + "example": "custom-endpointing-model" + }, + "server": { + "description": "This is where the endpointing request will be sent. If not provided, will be sent to `assistant.server`. If that does not exist either, will be sent to `org.server`.\n\nRequest Example:\n\nPOST https://{server.url}\nContent-Type: application/json\n\n{\n \"message\": {\n \"type\": \"call.endpointing.request\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"message\": \"Hello, how are you?\",\n \"time\": 1234567890,\n \"secondsFromStart\": 0\n }\n ],\n ...other metadata about the call...\n }\n}\n\nResponse Expected:\n{\n \"timeoutSeconds\": 0.5\n}\n\nThe timeout is the number of seconds to wait before considering the user's speech as finished. The endpointing timeout is automatically reset each time a new transcript is received (and another `call.endpointing.request` is sent).", + "allOf": [ + { + "$ref": "#/components/schemas/Server" + } + ] + } + }, + "required": [ + "provider" + ] + }, + "TranscriptionEndpointingPlan": { + "type": "object", + "properties": { + "onPunctuationSeconds": { + "type": "number", + "description": "The minimum number of seconds to wait after transcription ending with punctuation before sending a request to the model. Defaults to 0.1.\n\nThis setting exists because the transcriber punctuates the transcription when it's more confident that customer has completed a thought.\n\n@default 0.1", + "minimum": 0, + "maximum": 3, + "example": 0.1 + }, + "onNoPunctuationSeconds": { + "type": "number", + "description": "The minimum number of seconds to wait after transcription ending without punctuation before sending a request to the model. Defaults to 1.5.\n\nThis setting exists to catch the cases where the transcriber was not confident enough to punctuate the transcription, but the customer is done and has been silent for a long time.\n\n@default 1.5", + "minimum": 0, + "maximum": 3, + "example": 1.5 + }, + "onNumberSeconds": { + "type": "number", + "description": "The minimum number of seconds to wait after transcription ending with a number before sending a request to the model. Defaults to 0.4.\n\nThis setting exists because the transcriber will sometimes punctuate the transcription ending with a number, even though the customer hasn't uttered the full number. This happens commonly for long numbers when the customer reads the number in chunks.\n\n@default 0.5", + "minimum": 0, + "maximum": 3, + "example": 0.5 + } + } + }, + "StartSpeakingPlan": { + "type": "object", + "properties": { + "waitSeconds": { + "type": "number", + "description": "This is how long assistant waits before speaking. Defaults to 0.4.\n\nThis is the minimum it will wait but if there is latency is the pipeline, this minimum will be exceeded. This is intended as a stopgap in case the pipeline is moving too fast.\n\nExample:\n- If model generates tokens and voice generates bytes within 100ms, the pipeline still waits 300ms before outputting speech.\n\nUsage:\n- If the customer is taking long pauses, set this to a higher value.\n- If the assistant is accidentally jumping in too much, set this to a higher value.\n\n@default 0.4", + "minimum": 0, + "maximum": 5, + "example": 0.4 + }, + "smartEndpointingEnabled": { + "example": false, + "deprecated": true, + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "string", + "enum": [ + "livekit" + ] + } + ] + }, + "smartEndpointingPlan": { + "description": "This is the plan for smart endpointing. Pick between Vapi smart endpointing, LiveKit, or custom endpointing model (or nothing). We strongly recommend using livekit endpointing when working in English. LiveKit endpointing is not supported in other languages, yet.\n\nIf this is set, it will override and take precedence over `transcriptionEndpointingPlan`.\nThis plan will still be overridden by any matching `customEndpointingRules`.\n\nIf this is not set, the system will automatically use the transcriber's built-in endpointing capabilities if available.", + "oneOf": [ + { + "$ref": "#/components/schemas/VapiSmartEndpointingPlan", + "title": "Vapi" + }, + { + "$ref": "#/components/schemas/LivekitSmartEndpointingPlan", + "title": "Livekit" + }, + { + "$ref": "#/components/schemas/CustomEndpointingModelSmartEndpointingPlan", + "title": "Custom Endpointing Model" + } + ] + }, + "customEndpointingRules": { + "type": "array", + "description": "These are the custom endpointing rules to set an endpointing timeout based on a regex on the customer's speech or the assistant's last message.\n\nUsage:\n- If you have yes/no questions like \"are you interested in a loan?\", you can set a shorter timeout.\n- If you have questions where the customer may pause to look up information like \"what's my account number?\", you can set a longer timeout.\n- If you want to wait longer while customer is enumerating a list of numbers, you can set a longer timeout.\n\nThese rules have the highest precedence and will override both `smartEndpointingPlan` and `transcriptionEndpointingPlan` when a rule is matched.\n\nThe rules are evaluated in order and the first one that matches will be used.\n\nOrder of precedence for endpointing:\n1. customEndpointingRules (if any match)\n2. smartEndpointingPlan (if set)\n3. transcriptionEndpointingPlan\n\n@default []", + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/AssistantCustomEndpointingRule", + "title": "Assistant" + }, + { + "$ref": "#/components/schemas/CustomerCustomEndpointingRule", + "title": "Customer" + }, + { + "$ref": "#/components/schemas/BothCustomEndpointingRule", + "title": "Both" + } + ] + } + }, + "transcriptionEndpointingPlan": { + "description": "This determines how a customer speech is considered done (endpointing) using the transcription of customer's speech.\n\nOnce an endpoint is triggered, the request is sent to `assistant.model`.\n\nNote: This plan is only used if `smartEndpointingPlan` is not set and transcriber does not have built-in endpointing capabilities. If both are provided, `smartEndpointingPlan` takes precedence.\nThis plan will also be overridden by any matching `customEndpointingRules`.", + "allOf": [ + { + "$ref": "#/components/schemas/TranscriptionEndpointingPlan" + } + ] + } + } + }, "TransferAssistant": { "type": "object", "properties": { @@ -11633,6 +11937,14 @@ } ] }, + "startSpeakingPlan": { + "description": "This is the plan for when the transfer assistant should start talking.\n\nYou should configure this if the transfer assistant needs different endpointing behavior than the base assistant.\n\nIf this is not set, the transfer assistant will inherit the start speaking plan from the base assistant.", + "allOf": [ + { + "$ref": "#/components/schemas/StartSpeakingPlan" + } + ] + }, "firstMessageMode": { "type": "string", "description": "This is the mode for the first message. Default is 'assistant-speaks-first'.\n\nUse:\n- 'assistant-speaks-first' to have the assistant speak first.\n- 'assistant-waits-for-user' to have the assistant wait for the user to speak first.\n- 'assistant-speaks-first-with-model-generated-message' to have the assistant speak first with a message generated by the model based on the conversation state.\n\n@default 'assistant-speaks-first'", @@ -16879,126 +17191,19 @@ }, "to": { "type": "string", - "maxLength": 80 - }, - "metadata": { - "type": "object", - "description": "This is for metadata you want to store on the edge." - } - }, - "required": [ - "from", - "to" - ] - }, - "RecordingConsentPlanStayOnLine": { - "type": "object", - "properties": { - "message": { - "type": "string", - "description": "This is the message asking for consent to record the call.\nIf the type is `stay-on-line`, the message should ask the user to hang up if they do not consent.\nIf the type is `verbal`, the message should ask the user to verbally consent or decline.", - "maxLength": 1000, - "examples": [ - "For quality purposes, this call may be recorded. Please stay on the line if you agree or end the call if you do not consent.", - "This call may be recorded for quality and training purposes. Say \"I agree\" if you consent to being recorded, or \"I disagree\" if you do not consent." - ] - }, - "voice": { - "description": "This is the voice to use for the consent message. If not specified, inherits from the assistant's voice.\nUse a different voice for the consent message for a better user experience.", - "oneOf": [ - { - "$ref": "#/components/schemas/AzureVoice", - "title": "AzureVoice" - }, - { - "$ref": "#/components/schemas/CartesiaVoice", - "title": "CartesiaVoice" - }, - { - "$ref": "#/components/schemas/CustomVoice", - "title": "CustomVoice" - }, - { - "$ref": "#/components/schemas/DeepgramVoice", - "title": "DeepgramVoice" - }, - { - "$ref": "#/components/schemas/ElevenLabsVoice", - "title": "ElevenLabsVoice" - }, - { - "$ref": "#/components/schemas/HumeVoice", - "title": "HumeVoice" - }, - { - "$ref": "#/components/schemas/LMNTVoice", - "title": "LMNTVoice" - }, - { - "$ref": "#/components/schemas/NeuphonicVoice", - "title": "NeuphonicVoice" - }, - { - "$ref": "#/components/schemas/OpenAIVoice", - "title": "OpenAIVoice" - }, - { - "$ref": "#/components/schemas/PlayHTVoice", - "title": "PlayHTVoice" - }, - { - "$ref": "#/components/schemas/RimeAIVoice", - "title": "RimeAIVoice" - }, - { - "$ref": "#/components/schemas/SmallestAIVoice", - "title": "SmallestAIVoice" - }, - { - "$ref": "#/components/schemas/TavusVoice", - "title": "TavusVoice" - }, - { - "$ref": "#/components/schemas/VapiVoice", - "title": "VapiVoice" - }, - { - "$ref": "#/components/schemas/SesameVoice", - "title": "SesameVoice" - }, - { - "$ref": "#/components/schemas/InworldVoice", - "title": "InworldVoice" - }, - { - "$ref": "#/components/schemas/MinimaxVoice", - "title": "MinimaxVoice" - } - ] - }, - "type": { - "type": "string", - "description": "This is the type of recording consent plan. This type assumes consent is granted if the user stays on the line.", - "enum": [ - "stay-on-line" - ], - "example": "stay-on-line" + "maxLength": 80 }, - "waitSeconds": { - "type": "number", - "description": "Number of seconds to wait before transferring to the assistant if user stays on the call", - "minimum": 1, - "maximum": 6, - "default": 3, - "example": 3 + "metadata": { + "type": "object", + "description": "This is for metadata you want to store on the edge." } }, "required": [ - "message", - "type" + "from", + "to" ] }, - "RecordingConsentPlanVerbal": { + "RecordingConsentPlanStayOnLine": { "type": "object", "properties": { "message": { @@ -17070,725 +17275,528 @@ "title": "VapiVoice" }, { - "$ref": "#/components/schemas/SesameVoice", - "title": "SesameVoice" - }, - { - "$ref": "#/components/schemas/InworldVoice", - "title": "InworldVoice" - }, - { - "$ref": "#/components/schemas/MinimaxVoice", - "title": "MinimaxVoice" - } - ] - }, - "type": { - "type": "string", - "description": "This is the type of recording consent plan. This type assumes consent is granted if the user verbally consents or declines.", - "enum": [ - "verbal" - ], - "example": "verbal" - }, - "declineTool": { - "type": "object", - "description": "Tool to execute if user verbally declines recording consent" - }, - "declineToolId": { - "type": "string", - "description": "ID of existing tool to execute if user verbally declines recording consent" - } - }, - "required": [ - "message", - "type" - ] - }, - "SecurityFilterBase": { - "type": "object", - "properties": {} - }, - "SecurityFilterPlan": { - "type": "object", - "properties": { - "enabled": { - "type": "boolean", - "description": "Whether the security filter is enabled.\n@default false", - "default": false - }, - "filters": { - "description": "Array of security filter types to apply.\nIf array is not empty, only those security filters are run.", - "example": "[{ type: \"sql-injection\" }, { type: \"xss\" }]", - "type": "array", - "items": { - "$ref": "#/components/schemas/SecurityFilterBase" - } - }, - "mode": { - "type": "string", - "description": "Mode of operation when a security threat is detected.\n- 'sanitize': Remove or replace the threatening content\n- 'reject': Replace the entire transcript with replacement text\n- 'replace': Replace threatening patterns with replacement text\n@default 'sanitize'", - "enum": [ - "sanitize", - "reject", - "replace" - ], - "default": "sanitize" - }, - "replacementText": { - "type": "string", - "description": "Text to use when replacing filtered content.\n@default '[FILTERED]'", - "default": "[FILTERED]" - } - } - }, - "CompliancePlan": { - "type": "object", - "properties": { - "hipaaEnabled": { - "type": "boolean", - "description": "When this is enabled, no logs, recordings, or transcriptions will be stored.\nAt the end of the call, you will still receive an end-of-call-report message to store on your server. Defaults to false.", - "example": { - "hipaaEnabled": false - } - }, - "pciEnabled": { - "type": "boolean", - "description": "When this is enabled, the user will be restricted to use PCI-compliant providers, and no logs or transcripts are stored.\nAt the end of the call, you will receive an end-of-call-report message to store on your server. Defaults to false.", - "example": { - "pciEnabled": false - } - }, - "securityFilterPlan": { - "description": "This is the security filter plan for the assistant. It allows filtering of transcripts for security threats before sending to LLM.", - "allOf": [ - { - "$ref": "#/components/schemas/SecurityFilterPlan" - } - ] - }, - "recordingConsentPlan": { - "oneOf": [ - { - "$ref": "#/components/schemas/RecordingConsentPlanStayOnLine", - "title": "RecordingConsentStayOnLinePlan" - }, - { - "$ref": "#/components/schemas/RecordingConsentPlanVerbal", - "title": "RecordingConsentPlanVerbal" - } - ], - "discriminator": { - "propertyName": "type", - "mapping": { - "stay-on-line": "#/components/schemas/RecordingConsentPlanStayOnLine", - "verbal": "#/components/schemas/RecordingConsentPlanVerbal" - } - } - } - } - }, - "StructuredDataPlan": { - "type": "object", - "properties": { - "messages": { - "description": "These are the messages used to generate the structured data.\n\n@default: ```\n[\n {\n \"role\": \"system\",\n \"content\": \"You are an expert data extractor. You will be given a transcript of a call. Extract structured data per the JSON Schema. DO NOT return anything except the structured data.\\n\\nJson Schema:\\\\n{{schema}}\\n\\nOnly respond with the JSON.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Here is the transcript:\\n\\n{{transcript}}\\n\\n. Here is the ended reason of the call:\\n\\n{{endedReason}}\\n\\n\"\n }\n]```\n\nYou can customize by providing any messages you want.\n\nHere are the template variables available:\n- {{transcript}}: the transcript of the call from `call.artifact.transcript`- {{systemPrompt}}: the system prompt of the call from `assistant.model.messages[type=system].content`- {{messages}}: the messages of the call from `assistant.model.messages`- {{schema}}: the schema of the structured data from `structuredDataPlan.schema`- {{endedReason}}: the ended reason of the call from `call.endedReason`", - "type": "array", - "items": { - "type": "object" - } - }, - "enabled": { - "type": "boolean", - "description": "This determines whether structured data is generated and stored in `call.analysis.structuredData`. Defaults to false.\n\nUsage:\n- If you want to extract structured data, set this to true and provide a `schema`.\n\n@default false" - }, - "schema": { - "description": "This is the schema of the structured data. The output is stored in `call.analysis.structuredData`.\n\nComplete guide on JSON Schema can be found [here](https://ajv.js.org/json-schema.html#json-data-type).", - "allOf": [ - { - "$ref": "#/components/schemas/JsonSchema" - } - ] - }, - "timeoutSeconds": { - "type": "number", - "description": "This is how long the request is tried before giving up. When request times out, `call.analysis.structuredData` will be empty.\n\nUsage:\n- To guarantee the structured data is generated, set this value high. Note, this will delay the end of call report in cases where model is slow to respond.\n\n@default 5 seconds", - "minimum": 1, - "maximum": 60 - } - } - }, - "StructuredDataMultiPlan": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "This is the key of the structured data plan in the catalog." - }, - "plan": { - "description": "This is an individual structured data plan in the catalog.", - "allOf": [ - { - "$ref": "#/components/schemas/StructuredDataPlan" - } - ] - } - }, - "required": [ - "key", - "plan" - ] - }, - "SuccessEvaluationPlan": { - "type": "object", - "properties": { - "rubric": { - "type": "string", - "enum": [ - "NumericScale", - "DescriptiveScale", - "Checklist", - "Matrix", - "PercentageScale", - "LikertScale", - "AutomaticRubric", - "PassFail" - ], - "description": "This enforces the rubric of the evaluation. The output is stored in `call.analysis.successEvaluation`.\n\nOptions include:\n- 'NumericScale': A scale of 1 to 10.\n- 'DescriptiveScale': A scale of Excellent, Good, Fair, Poor.\n- 'Checklist': A checklist of criteria and their status.\n- 'Matrix': A grid that evaluates multiple criteria across different performance levels.\n- 'PercentageScale': A scale of 0% to 100%.\n- 'LikertScale': A scale of Strongly Agree, Agree, Neutral, Disagree, Strongly Disagree.\n- 'AutomaticRubric': Automatically break down evaluation into several criteria, each with its own score.\n- 'PassFail': A simple 'true' if call passed, 'false' if not.\n\nDefault is 'PassFail'." - }, - "messages": { - "description": "These are the messages used to generate the success evaluation.\n\n@default: ```\n[\n {\n \"role\": \"system\",\n \"content\": \"You are an expert call evaluator. You will be given a transcript of a call and the system prompt of the AI participant. Determine if the call was successful based on the objectives inferred from the system prompt. DO NOT return anything except the result.\\n\\nRubric:\\\\n{{rubric}}\\n\\nOnly respond with the result.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Here is the transcript:\\n\\n{{transcript}}\\n\\n\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Here was the system prompt of the call:\\n\\n{{systemPrompt}}\\n\\n. Here is the ended reason of the call:\\n\\n{{endedReason}}\\n\\n\"\n }\n]```\n\nYou can customize by providing any messages you want.\n\nHere are the template variables available:\n- {{transcript}}: the transcript of the call from `call.artifact.transcript`- {{systemPrompt}}: the system prompt of the call from `assistant.model.messages[type=system].content`- {{messages}}: the messages of the call from `assistant.model.messages`- {{rubric}}: the rubric of the success evaluation from `successEvaluationPlan.rubric`- {{endedReason}}: the ended reason of the call from `call.endedReason`", - "type": "array", - "items": { - "type": "object" - } - }, - "enabled": { - "type": "boolean", - "description": "This determines whether a success evaluation is generated and stored in `call.analysis.successEvaluation`. Defaults to true.\n\nUsage:\n- If you want to disable the success evaluation, set this to false.\n\n@default true" - }, - "timeoutSeconds": { - "type": "number", - "description": "This is how long the request is tried before giving up. When request times out, `call.analysis.successEvaluation` will be empty.\n\nUsage:\n- To guarantee the success evaluation is generated, set this value high. Note, this will delay the end of call report in cases where model is slow to respond.\n\n@default 5 seconds", - "minimum": 1, - "maximum": 60 - } - } - }, - "AnalysisPlan": { - "type": "object", - "properties": { - "minMessagesThreshold": { - "type": "number", - "description": "The minimum number of messages required to run the analysis plan.\nIf the number of messages is less than this, analysis will be skipped.\n@default 2", - "minimum": 0 - }, - "summaryPlan": { - "description": "This is the plan for generating the summary of the call. This outputs to `call.analysis.summary`.", - "allOf": [ - { - "$ref": "#/components/schemas/SummaryPlan" - } - ] - }, - "structuredDataPlan": { - "description": "This is the plan for generating the structured data from the call. This outputs to `call.analysis.structuredData`.", - "allOf": [ - { - "$ref": "#/components/schemas/StructuredDataPlan" - } - ] - }, - "structuredDataMultiPlan": { - "description": "This is an array of structured data plan catalogs. Each entry includes a `key` and a `plan` for generating the structured data from the call. This outputs to `call.analysis.structuredDataMulti`.", - "type": "array", - "items": { - "$ref": "#/components/schemas/StructuredDataMultiPlan" - } - }, - "successEvaluationPlan": { - "description": "This is the plan for generating the success evaluation of the call. This outputs to `call.analysis.successEvaluation`.", - "allOf": [ + "$ref": "#/components/schemas/SesameVoice", + "title": "SesameVoice" + }, { - "$ref": "#/components/schemas/SuccessEvaluationPlan" + "$ref": "#/components/schemas/InworldVoice", + "title": "InworldVoice" + }, + { + "$ref": "#/components/schemas/MinimaxVoice", + "title": "MinimaxVoice" } ] }, - "outcomeIds": { - "description": "This is an array of outcome UUIDs to be calculated during analysis.\nThe outcomes will be calculated and stored in `call.analysis.outcomes`.", - "type": "array", - "items": { - "type": "string" - } - } - } - }, - "TranscriptPlan": { - "type": "object", - "properties": { - "enabled": { - "type": "boolean", - "description": "This determines whether the transcript is stored in `call.artifact.transcript`. Defaults to true.\n\n@default true", - "example": true - }, - "assistantName": { - "type": "string", - "description": "This is the name of the assistant in the transcript. Defaults to 'AI'.\n\nUsage:\n- If you want to change the name of the assistant in the transcript, set this. Example, here is what the transcript would look like with `assistantName` set to 'Buyer':\n```\nUser: Hello, how are you?\nBuyer: I'm fine.\nUser: Do you want to buy a car?\nBuyer: No.\n```\n\n@default 'AI'" - }, - "userName": { - "type": "string", - "description": "This is the name of the user in the transcript. Defaults to 'User'.\n\nUsage:\n- If you want to change the name of the user in the transcript, set this. Example, here is what the transcript would look like with `userName` set to 'Seller':\n```\nSeller: Hello, how are you?\nAI: I'm fine.\nSeller: Do you want to buy a car?\nAI: No.\n```\n\n@default 'User'" - } - } - }, - "ScorecardMetric": { - "type": "object", - "properties": { - "structuredOutputId": { + "type": { "type": "string", - "description": "This is the unique identifier for the structured output that will be used to evaluate the scorecard.\nThe structured output must be of type number or boolean only for now." + "description": "This is the type of recording consent plan. This type assumes consent is granted if the user stays on the line.", + "enum": [ + "stay-on-line" + ], + "example": "stay-on-line" }, - "conditions": { - "description": "These are the conditions that will be used to evaluate the scorecard.\nEach condition will have a comparator, value, and points that will be used to calculate the final score.\nThe points will be added to the overall score if the condition is met.\nThe overall score will be normalized to a 100 point scale to ensure uniformity across different scorecards.", - "type": "array", - "items": { - "type": "object" - } + "waitSeconds": { + "type": "number", + "description": "Number of seconds to wait before transferring to the assistant if user stays on the call", + "minimum": 1, + "maximum": 6, + "default": 3, + "example": 3 } }, "required": [ - "structuredOutputId", - "conditions" + "message", + "type" ] }, - "CreateScorecardDTO": { + "RecordingConsentPlanVerbal": { "type": "object", "properties": { - "name": { + "message": { "type": "string", - "description": "This is the name of the scorecard. It is only for user reference and will not be used for any evaluation.", - "maxLength": 80 + "description": "This is the message asking for consent to record the call.\nIf the type is `stay-on-line`, the message should ask the user to hang up if they do not consent.\nIf the type is `verbal`, the message should ask the user to verbally consent or decline.", + "maxLength": 1000, + "examples": [ + "For quality purposes, this call may be recorded. Please stay on the line if you agree or end the call if you do not consent.", + "This call may be recorded for quality and training purposes. Say \"I agree\" if you consent to being recorded, or \"I disagree\" if you do not consent." + ] }, - "description": { + "voice": { + "description": "This is the voice to use for the consent message. If not specified, inherits from the assistant's voice.\nUse a different voice for the consent message for a better user experience.", + "oneOf": [ + { + "$ref": "#/components/schemas/AzureVoice", + "title": "AzureVoice" + }, + { + "$ref": "#/components/schemas/CartesiaVoice", + "title": "CartesiaVoice" + }, + { + "$ref": "#/components/schemas/CustomVoice", + "title": "CustomVoice" + }, + { + "$ref": "#/components/schemas/DeepgramVoice", + "title": "DeepgramVoice" + }, + { + "$ref": "#/components/schemas/ElevenLabsVoice", + "title": "ElevenLabsVoice" + }, + { + "$ref": "#/components/schemas/HumeVoice", + "title": "HumeVoice" + }, + { + "$ref": "#/components/schemas/LMNTVoice", + "title": "LMNTVoice" + }, + { + "$ref": "#/components/schemas/NeuphonicVoice", + "title": "NeuphonicVoice" + }, + { + "$ref": "#/components/schemas/OpenAIVoice", + "title": "OpenAIVoice" + }, + { + "$ref": "#/components/schemas/PlayHTVoice", + "title": "PlayHTVoice" + }, + { + "$ref": "#/components/schemas/RimeAIVoice", + "title": "RimeAIVoice" + }, + { + "$ref": "#/components/schemas/SmallestAIVoice", + "title": "SmallestAIVoice" + }, + { + "$ref": "#/components/schemas/TavusVoice", + "title": "TavusVoice" + }, + { + "$ref": "#/components/schemas/VapiVoice", + "title": "VapiVoice" + }, + { + "$ref": "#/components/schemas/SesameVoice", + "title": "SesameVoice" + }, + { + "$ref": "#/components/schemas/InworldVoice", + "title": "InworldVoice" + }, + { + "$ref": "#/components/schemas/MinimaxVoice", + "title": "MinimaxVoice" + } + ] + }, + "type": { "type": "string", - "description": "This is the description of the scorecard. It is only for user reference and will not be used for any evaluation.", - "maxLength": 500 + "description": "This is the type of recording consent plan. This type assumes consent is granted if the user verbally consents or declines.", + "enum": [ + "verbal" + ], + "example": "verbal" }, - "metrics": { - "description": "These are the metrics that will be used to evaluate the scorecard.\nEach metric will have a set of conditions and points that will be used to generate the score.", - "type": "array", - "items": { - "$ref": "#/components/schemas/ScorecardMetric" - } + "declineTool": { + "type": "object", + "description": "Tool to execute if user verbally declines recording consent" }, - "assistantIds": { - "description": "These are the assistant IDs that this scorecard is linked to.\nWhen linked to assistants, this scorecard will be available for evaluation during those assistants' calls.", - "type": "array", - "items": { - "type": "string" - } + "declineToolId": { + "type": "string", + "description": "ID of existing tool to execute if user verbally declines recording consent" } }, "required": [ - "metrics" + "message", + "type" ] }, - "ArtifactPlan": { + "SecurityFilterBase": { "type": "object", - "properties": { - "recordingEnabled": { - "type": "boolean", - "description": "This determines whether assistant's calls are recorded. Defaults to true.\n\nUsage:\n- If you don't want to record the calls, set this to false.\n- If you want to record the calls when `assistant.hipaaEnabled` (deprecated) or `assistant.compliancePlan.hipaaEnabled` explicity set this to true and make sure to provide S3 or GCP credentials on the Provider Credentials page in the Dashboard.\n\nYou can find the recording at `call.artifact.recordingUrl` and `call.artifact.stereoRecordingUrl` after the call is ended.\n\n@default true", - "example": true - }, - "recordingFormat": { - "type": "string", - "description": "This determines the format of the recording. Defaults to `wav;l16`.\n\n@default 'wav;l16'", - "enum": [ - "wav;l16", - "mp3" - ] - }, - "recordingUseCustomStorageEnabled": { - "type": "boolean", - "description": "This determines whether to use custom storage (S3 or GCP) for call recordings when storage credentials are configured.\n\nWhen set to false, recordings will be stored on Vapi's storage instead of your custom storage, even if you have custom storage credentials configured.\n\nUsage:\n- Set to false if you have custom storage configured but want to store recordings on Vapi's storage for this assistant.\n- Set to true (or leave unset) to use your custom storage for recordings when available.\n\n@default true", - "example": true - }, - "videoRecordingEnabled": { - "type": "boolean", - "description": "This determines whether the video is recorded during the call. Defaults to false. Only relevant for `webCall` type.\n\nYou can find the video recording at `call.artifact.videoRecordingUrl` after the call is ended.\n\n@default false", - "example": false - }, - "fullMessageHistoryEnabled": { - "type": "boolean", - "description": "This determines whether the artifact contains the full message history, even after handoff context engineering. Defaults to false.", - "example": false - }, - "pcapEnabled": { - "type": "boolean", - "description": "This determines whether the SIP packet capture is enabled. Defaults to true. Only relevant for `phone` type calls where phone number's provider is `vapi` or `byo-phone-number`.\n\nYou can find the packet capture at `call.artifact.pcapUrl` after the call is ended.\n\n@default true", - "example": true - }, - "pcapS3PathPrefix": { - "type": "string", - "description": "This is the path where the SIP packet capture will be uploaded. This is only used if you have provided S3 or GCP credentials on the Provider Credentials page in the Dashboard.\n\nIf credential.s3PathPrefix or credential.bucketPlan.path is set, this will append to it.\n\nUsage:\n- If you want to upload the packet capture to a specific path, set this to the path. Example: `/my-assistant-captures`.\n- If you want to upload the packet capture to the root of the bucket, set this to `/`.\n\n@default '/'", - "example": "/pcaps" - }, - "pcapUseCustomStorageEnabled": { - "type": "boolean", - "description": "This determines whether to use custom storage (S3 or GCP) for SIP packet captures when storage credentials are configured.\n\nWhen set to false, packet captures will be stored on Vapi's storage instead of your custom storage, even if you have custom storage credentials configured.\n\nUsage:\n- Set to false if you have custom storage configured but want to store packet captures on Vapi's storage for this assistant.\n- Set to true (or leave unset) to use your custom storage for packet captures when available.\n\n@default true", - "example": true - }, - "loggingEnabled": { - "type": "boolean", - "description": "This determines whether the call logs are enabled. Defaults to true.\n\n@default true", - "example": true - }, - "loggingUseCustomStorageEnabled": { - "type": "boolean", - "description": "This determines whether to use custom storage (S3 or GCP) for call logs when storage credentials are configured.\n\nWhen set to false, logs will be stored on Vapi's storage instead of your custom storage, even if you have custom storage credentials configured.\n\nUsage:\n- Set to false if you have custom storage configured but want to store logs on Vapi's storage for this assistant.\n- Set to true (or leave unset) to use your custom storage for logs when available.\n\n@default true", - "example": true - }, - "transcriptPlan": { - "description": "This is the plan for `call.artifact.transcript`. To disable, set `transcriptPlan.enabled` to false.", - "allOf": [ - { - "$ref": "#/components/schemas/TranscriptPlan" - } - ] - }, - "recordingPath": { - "type": "string", - "description": "This is the path where the recording will be uploaded. This is only used if you have provided S3 or GCP credentials on the Provider Credentials page in the Dashboard.\n\nIf credential.s3PathPrefix or credential.bucketPlan.path is set, this will append to it.\n\nUsage:\n- If you want to upload the recording to a specific path, set this to the path. Example: `/my-assistant-recordings`.\n- If you want to upload the recording to the root of the bucket, set this to `/`.\n\n@default '/'" - }, - "structuredOutputIds": { - "description": "This is an array of structured output IDs to be calculated during the call.\nThe outputs will be extracted and stored in `call.artifact.structuredOutputs` after the call is ended.", - "type": "array", - "items": { - "type": "string" - } + "properties": {} + }, + "SecurityFilterPlan": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "description": "Whether the security filter is enabled.\n@default false", + "default": false }, - "scorecardIds": { - "description": "This is an array of scorecard IDs that will be evaluated based on the structured outputs extracted during the call.\nThe scorecards will be evaluated and the results will be stored in `call.artifact.scorecards` after the call has ended.", + "filters": { + "description": "Array of security filter types to apply.\nIf array is not empty, only those security filters are run.", + "example": "[{ type: \"sql-injection\" }, { type: \"xss\" }]", "type": "array", "items": { - "type": "string" + "$ref": "#/components/schemas/SecurityFilterBase" } }, - "scorecards": { - "description": "This is the array of scorecards that will be evaluated based on the structured outputs extracted during the call.\nThe scorecards will be evaluated and the results will be stored in `call.artifact.scorecards` after the call has ended.", - "type": "array", - "items": { - "$ref": "#/components/schemas/CreateScorecardDTO" - } + "mode": { + "type": "string", + "description": "Mode of operation when a security threat is detected.\n- 'sanitize': Remove or replace the threatening content\n- 'reject': Replace the entire transcript with replacement text\n- 'replace': Replace threatening patterns with replacement text\n@default 'sanitize'", + "enum": [ + "sanitize", + "reject", + "replace" + ], + "default": "sanitize" }, - "loggingPath": { + "replacementText": { "type": "string", - "description": "This is the path where the call logs will be uploaded. This is only used if you have provided S3 or GCP credentials on the Provider Credentials page in the Dashboard.\n\nIf credential.s3PathPrefix or credential.bucketPlan.path is set, this will append to it.\n\nUsage:\n- If you want to upload the call logs to a specific path, set this to the path. Example: `/my-assistant-logs`.\n- If you want to upload the call logs to the root of the bucket, set this to `/`.\n\n@default '/'" + "description": "Text to use when replacing filtered content.\n@default '[FILTERED]'", + "default": "[FILTERED]" } } }, - "RegexOption": { + "CompliancePlan": { "type": "object", "properties": { - "type": { - "type": "string", - "description": "This is the type of the regex option. Options are:\n- `ignore-case`: Ignores the case of the text being matched. Add\n- `whole-word`: Matches whole words only.\n- `multi-line`: Matches across multiple lines.", - "enum": [ - "ignore-case", - "whole-word", - "multi-line" - ] + "hipaaEnabled": { + "type": "boolean", + "description": "When this is enabled, no logs, recordings, or transcriptions will be stored.\nAt the end of the call, you will still receive an end-of-call-report message to store on your server. Defaults to false.", + "example": { + "hipaaEnabled": false + } }, - "enabled": { + "pciEnabled": { "type": "boolean", - "description": "This is whether to enable the option.\n\n@default false" + "description": "When this is enabled, the user will be restricted to use PCI-compliant providers, and no logs or transcripts are stored.\nAt the end of the call, you will receive an end-of-call-report message to store on your server. Defaults to false.", + "example": { + "pciEnabled": false + } + }, + "securityFilterPlan": { + "description": "This is the security filter plan for the assistant. It allows filtering of transcripts for security threats before sending to LLM.", + "allOf": [ + { + "$ref": "#/components/schemas/SecurityFilterPlan" + } + ] + }, + "recordingConsentPlan": { + "oneOf": [ + { + "$ref": "#/components/schemas/RecordingConsentPlanStayOnLine", + "title": "RecordingConsentStayOnLinePlan" + }, + { + "$ref": "#/components/schemas/RecordingConsentPlanVerbal", + "title": "RecordingConsentPlanVerbal" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "stay-on-line": "#/components/schemas/RecordingConsentPlanStayOnLine", + "verbal": "#/components/schemas/RecordingConsentPlanVerbal" + } + } } - }, - "required": [ - "type", - "enabled" - ] + } }, - "AssistantCustomEndpointingRule": { + "StructuredDataPlan": { "type": "object", "properties": { - "type": { - "type": "string", - "description": "This endpointing rule is based on the last assistant message before customer started speaking.\n\nFlow:\n- Assistant speaks\n- Customer starts speaking\n- Customer transcription comes in\n- This rule is evaluated on the last assistant message\n- If a match is found based on `regex`, the endpointing timeout is set to `timeoutSeconds`\n\nUsage:\n- If you have yes/no questions in your use case like \"are you interested in a loan?\", you can set a shorter timeout.\n- If you have questions where the customer may pause to look up information like \"what's my account number?\", you can set a longer timeout.", - "enum": [ - "assistant" - ] - }, - "regex": { - "type": "string", - "description": "This is the regex pattern to match.\n\nNote:\n- This works by using the `RegExp.test` method in Node.JS. Eg. `/hello/.test(\"hello there\")` will return `true`.\n\nHot tip:\n- In JavaScript, escape `\\` when sending the regex pattern. Eg. `\"hello\\sthere\"` will be sent over the wire as `\"hellosthere\"`. Send `\"hello\\\\sthere\"` instead.\n- `RegExp.test` does substring matching, so `/cat/.test(\"I love cats\")` will return `true`. To do full string matching, send \"^cat$\"." - }, - "regexOptions": { - "description": "These are the options for the regex match. Defaults to all disabled.\n\n@default []", + "messages": { + "description": "These are the messages used to generate the structured data.\n\n@default: ```\n[\n {\n \"role\": \"system\",\n \"content\": \"You are an expert data extractor. You will be given a transcript of a call. Extract structured data per the JSON Schema. DO NOT return anything except the structured data.\\n\\nJson Schema:\\\\n{{schema}}\\n\\nOnly respond with the JSON.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Here is the transcript:\\n\\n{{transcript}}\\n\\n. Here is the ended reason of the call:\\n\\n{{endedReason}}\\n\\n\"\n }\n]```\n\nYou can customize by providing any messages you want.\n\nHere are the template variables available:\n- {{transcript}}: the transcript of the call from `call.artifact.transcript`- {{systemPrompt}}: the system prompt of the call from `assistant.model.messages[type=system].content`- {{messages}}: the messages of the call from `assistant.model.messages`- {{schema}}: the schema of the structured data from `structuredDataPlan.schema`- {{endedReason}}: the ended reason of the call from `call.endedReason`", "type": "array", "items": { - "$ref": "#/components/schemas/RegexOption" + "type": "object" } }, + "enabled": { + "type": "boolean", + "description": "This determines whether structured data is generated and stored in `call.analysis.structuredData`. Defaults to false.\n\nUsage:\n- If you want to extract structured data, set this to true and provide a `schema`.\n\n@default false" + }, + "schema": { + "description": "This is the schema of the structured data. The output is stored in `call.analysis.structuredData`.\n\nComplete guide on JSON Schema can be found [here](https://ajv.js.org/json-schema.html#json-data-type).", + "allOf": [ + { + "$ref": "#/components/schemas/JsonSchema" + } + ] + }, "timeoutSeconds": { "type": "number", - "description": "This is the endpointing timeout in seconds, if the rule is matched.", - "minimum": 0, - "maximum": 15 + "description": "This is how long the request is tried before giving up. When request times out, `call.analysis.structuredData` will be empty.\n\nUsage:\n- To guarantee the structured data is generated, set this value high. Note, this will delay the end of call report in cases where model is slow to respond.\n\n@default 5 seconds", + "minimum": 1, + "maximum": 60 + } + } + }, + "StructuredDataMultiPlan": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "This is the key of the structured data plan in the catalog." + }, + "plan": { + "description": "This is an individual structured data plan in the catalog.", + "allOf": [ + { + "$ref": "#/components/schemas/StructuredDataPlan" + } + ] } }, "required": [ - "type", - "regex", - "timeoutSeconds" + "key", + "plan" ] }, - "CustomerCustomEndpointingRule": { + "SuccessEvaluationPlan": { "type": "object", "properties": { - "type": { + "rubric": { "type": "string", - "description": "This endpointing rule is based on current customer message as they are speaking.\n\nFlow:\n- Assistant speaks\n- Customer starts speaking\n- Customer transcription comes in\n- This rule is evaluated on the current customer transcription\n- If a match is found based on `regex`, the endpointing timeout is set to `timeoutSeconds`\n\nUsage:\n- If you want to wait longer while customer is speaking numbers, you can set a longer timeout.", "enum": [ - "customer" - ] - }, - "regex": { - "type": "string", - "description": "This is the regex pattern to match.\n\nNote:\n- This works by using the `RegExp.test` method in Node.JS. Eg. `/hello/.test(\"hello there\")` will return `true`.\n\nHot tip:\n- In JavaScript, escape `\\` when sending the regex pattern. Eg. `\"hello\\sthere\"` will be sent over the wire as `\"hellosthere\"`. Send `\"hello\\\\sthere\"` instead.\n- `RegExp.test` does substring matching, so `/cat/.test(\"I love cats\")` will return `true`. To do full string matching, send \"^cat$\"." + "NumericScale", + "DescriptiveScale", + "Checklist", + "Matrix", + "PercentageScale", + "LikertScale", + "AutomaticRubric", + "PassFail" + ], + "description": "This enforces the rubric of the evaluation. The output is stored in `call.analysis.successEvaluation`.\n\nOptions include:\n- 'NumericScale': A scale of 1 to 10.\n- 'DescriptiveScale': A scale of Excellent, Good, Fair, Poor.\n- 'Checklist': A checklist of criteria and their status.\n- 'Matrix': A grid that evaluates multiple criteria across different performance levels.\n- 'PercentageScale': A scale of 0% to 100%.\n- 'LikertScale': A scale of Strongly Agree, Agree, Neutral, Disagree, Strongly Disagree.\n- 'AutomaticRubric': Automatically break down evaluation into several criteria, each with its own score.\n- 'PassFail': A simple 'true' if call passed, 'false' if not.\n\nDefault is 'PassFail'." }, - "regexOptions": { - "description": "These are the options for the regex match. Defaults to all disabled.\n\n@default []", + "messages": { + "description": "These are the messages used to generate the success evaluation.\n\n@default: ```\n[\n {\n \"role\": \"system\",\n \"content\": \"You are an expert call evaluator. You will be given a transcript of a call and the system prompt of the AI participant. Determine if the call was successful based on the objectives inferred from the system prompt. DO NOT return anything except the result.\\n\\nRubric:\\\\n{{rubric}}\\n\\nOnly respond with the result.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Here is the transcript:\\n\\n{{transcript}}\\n\\n\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Here was the system prompt of the call:\\n\\n{{systemPrompt}}\\n\\n. Here is the ended reason of the call:\\n\\n{{endedReason}}\\n\\n\"\n }\n]```\n\nYou can customize by providing any messages you want.\n\nHere are the template variables available:\n- {{transcript}}: the transcript of the call from `call.artifact.transcript`- {{systemPrompt}}: the system prompt of the call from `assistant.model.messages[type=system].content`- {{messages}}: the messages of the call from `assistant.model.messages`- {{rubric}}: the rubric of the success evaluation from `successEvaluationPlan.rubric`- {{endedReason}}: the ended reason of the call from `call.endedReason`", "type": "array", "items": { - "$ref": "#/components/schemas/RegexOption" + "type": "object" } }, + "enabled": { + "type": "boolean", + "description": "This determines whether a success evaluation is generated and stored in `call.analysis.successEvaluation`. Defaults to true.\n\nUsage:\n- If you want to disable the success evaluation, set this to false.\n\n@default true" + }, "timeoutSeconds": { "type": "number", - "description": "This is the endpointing timeout in seconds, if the rule is matched.", - "minimum": 0, - "maximum": 15 + "description": "This is how long the request is tried before giving up. When request times out, `call.analysis.successEvaluation` will be empty.\n\nUsage:\n- To guarantee the success evaluation is generated, set this value high. Note, this will delay the end of call report in cases where model is slow to respond.\n\n@default 5 seconds", + "minimum": 1, + "maximum": 60 } - }, - "required": [ - "type", - "regex", - "timeoutSeconds" - ] + } }, - "BothCustomEndpointingRule": { + "AnalysisPlan": { "type": "object", "properties": { - "type": { - "type": "string", - "description": "This endpointing rule is based on both the last assistant message and the current customer message as they are speaking.\n\nFlow:\n- Assistant speaks\n- Customer starts speaking\n- Customer transcription comes in\n- This rule is evaluated on the last assistant message and the current customer transcription\n- If assistant message matches `assistantRegex` AND customer message matches `customerRegex`, the endpointing timeout is set to `timeoutSeconds`\n\nUsage:\n- If you want to wait longer while customer is speaking numbers, you can set a longer timeout.", - "enum": [ - "both" + "minMessagesThreshold": { + "type": "number", + "description": "The minimum number of messages required to run the analysis plan.\nIf the number of messages is less than this, analysis will be skipped.\n@default 2", + "minimum": 0 + }, + "summaryPlan": { + "description": "This is the plan for generating the summary of the call. This outputs to `call.analysis.summary`.", + "allOf": [ + { + "$ref": "#/components/schemas/SummaryPlan" + } ] }, - "assistantRegex": { - "type": "string", - "description": "This is the regex pattern to match the assistant's message.\n\nNote:\n- This works by using the `RegExp.test` method in Node.JS. Eg. `/hello/.test(\"hello there\")` will return `true`.\n\nHot tip:\n- In JavaScript, escape `\\` when sending the regex pattern. Eg. `\"hello\\sthere\"` will be sent over the wire as `\"hellosthere\"`. Send `\"hello\\\\sthere\"` instead.\n- `RegExp.test` does substring matching, so `/cat/.test(\"I love cats\")` will return `true`. To do full string matching, send \"^cat$\"." + "structuredDataPlan": { + "description": "This is the plan for generating the structured data from the call. This outputs to `call.analysis.structuredData`.", + "allOf": [ + { + "$ref": "#/components/schemas/StructuredDataPlan" + } + ] }, - "assistantRegexOptions": { - "description": "These are the options for the assistant's message regex match. Defaults to all disabled.\n\n@default []", + "structuredDataMultiPlan": { + "description": "This is an array of structured data plan catalogs. Each entry includes a `key` and a `plan` for generating the structured data from the call. This outputs to `call.analysis.structuredDataMulti`.", "type": "array", "items": { - "$ref": "#/components/schemas/RegexOption" + "$ref": "#/components/schemas/StructuredDataMultiPlan" } }, - "customerRegex": { - "type": "string" + "successEvaluationPlan": { + "description": "This is the plan for generating the success evaluation of the call. This outputs to `call.analysis.successEvaluation`.", + "allOf": [ + { + "$ref": "#/components/schemas/SuccessEvaluationPlan" + } + ] }, - "customerRegexOptions": { - "description": "These are the options for the customer's message regex match. Defaults to all disabled.\n\n@default []", + "outcomeIds": { + "description": "This is an array of outcome UUIDs to be calculated during analysis.\nThe outcomes will be calculated and stored in `call.analysis.outcomes`.", "type": "array", "items": { - "$ref": "#/components/schemas/RegexOption" + "type": "string" } - }, - "timeoutSeconds": { - "type": "number", - "description": "This is the endpointing timeout in seconds, if the rule is matched.", - "minimum": 0, - "maximum": 15 - } - }, - "required": [ - "type", - "assistantRegex", - "customerRegex", - "timeoutSeconds" - ] + } + } }, - "VapiSmartEndpointingPlan": { + "TranscriptPlan": { "type": "object", "properties": { - "provider": { + "enabled": { + "type": "boolean", + "description": "This determines whether the transcript is stored in `call.artifact.transcript`. Defaults to true.\n\n@default true", + "example": true + }, + "assistantName": { "type": "string", - "description": "This is the provider for the smart endpointing plan.", - "enum": [ - "vapi", - "livekit", - "custom-endpointing-model" - ], - "example": "vapi" + "description": "This is the name of the assistant in the transcript. Defaults to 'AI'.\n\nUsage:\n- If you want to change the name of the assistant in the transcript, set this. Example, here is what the transcript would look like with `assistantName` set to 'Buyer':\n```\nUser: Hello, how are you?\nBuyer: I'm fine.\nUser: Do you want to buy a car?\nBuyer: No.\n```\n\n@default 'AI'" + }, + "userName": { + "type": "string", + "description": "This is the name of the user in the transcript. Defaults to 'User'.\n\nUsage:\n- If you want to change the name of the user in the transcript, set this. Example, here is what the transcript would look like with `userName` set to 'Seller':\n```\nSeller: Hello, how are you?\nAI: I'm fine.\nSeller: Do you want to buy a car?\nAI: No.\n```\n\n@default 'User'" } - }, - "required": [ - "provider" - ] + } }, - "LivekitSmartEndpointingPlan": { + "ScorecardMetric": { "type": "object", "properties": { - "provider": { + "structuredOutputId": { "type": "string", - "description": "This is the provider for the smart endpointing plan.", - "enum": [ - "vapi", - "livekit", - "custom-endpointing-model" - ], - "example": "livekit" + "description": "This is the unique identifier for the structured output that will be used to evaluate the scorecard.\nThe structured output must be of type number or boolean only for now." }, - "waitFunction": { - "type": "string", - "description": "This expression describes how long the bot will wait to start speaking based on the likelihood that the user has reached an endpoint.\n\nThis is a millisecond valued function. It maps probabilities (real numbers on [0,1]) to milliseconds that the bot should wait before speaking ([0, \\infty]). Any negative values that are returned are set to zero (the bot can't start talking in the past).\n\nA probability of zero represents very high confidence that the caller has stopped speaking, and would like the bot to speak to them. A probability of one represents very high confidence that the caller is still speaking.\n\nUnder the hood, this is parsed into a mathjs expression. Whatever you use to write your expression needs to be valid with respect to mathjs\n\n@default \"20 + 500 * sqrt(x) + 2500 * x^3\"", - "examples": [ - "70 + 4000 * x", - "200 + 8000 * x", - "4000 * (1 - cos(pi * x))" - ] + "conditions": { + "description": "These are the conditions that will be used to evaluate the scorecard.\nEach condition will have a comparator, value, and points that will be used to calculate the final score.\nThe points will be added to the overall score if the condition is met.\nThe overall score will be normalized to a 100 point scale to ensure uniformity across different scorecards.", + "type": "array", + "items": { + "type": "object" + } } }, "required": [ - "provider" + "structuredOutputId", + "conditions" ] }, - "CustomEndpointingModelSmartEndpointingPlan": { + "CreateScorecardDTO": { "type": "object", "properties": { - "provider": { + "name": { "type": "string", - "description": "This is the provider for the smart endpointing plan. Use `custom-endpointing-model` for custom endpointing providers that are not natively supported.", - "enum": [ - "vapi", - "livekit", - "custom-endpointing-model" - ], - "example": "custom-endpointing-model" + "description": "This is the name of the scorecard. It is only for user reference and will not be used for any evaluation.", + "maxLength": 80 }, - "server": { - "description": "This is where the endpointing request will be sent. If not provided, will be sent to `assistant.server`. If that does not exist either, will be sent to `org.server`.\n\nRequest Example:\n\nPOST https://{server.url}\nContent-Type: application/json\n\n{\n \"message\": {\n \"type\": \"call.endpointing.request\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"message\": \"Hello, how are you?\",\n \"time\": 1234567890,\n \"secondsFromStart\": 0\n }\n ],\n ...other metadata about the call...\n }\n}\n\nResponse Expected:\n{\n \"timeoutSeconds\": 0.5\n}\n\nThe timeout is the number of seconds to wait before considering the user's speech as finished. The endpointing timeout is automatically reset each time a new transcript is received (and another `call.endpointing.request` is sent).", - "allOf": [ - { - "$ref": "#/components/schemas/Server" - } - ] + "description": { + "type": "string", + "description": "This is the description of the scorecard. It is only for user reference and will not be used for any evaluation.", + "maxLength": 500 + }, + "metrics": { + "description": "These are the metrics that will be used to evaluate the scorecard.\nEach metric will have a set of conditions and points that will be used to generate the score.", + "type": "array", + "items": { + "$ref": "#/components/schemas/ScorecardMetric" + } + }, + "assistantIds": { + "description": "These are the assistant IDs that this scorecard is linked to.\nWhen linked to assistants, this scorecard will be available for evaluation during those assistants' calls.", + "type": "array", + "items": { + "type": "string" + } } }, "required": [ - "provider" + "metrics" ] }, - "TranscriptionEndpointingPlan": { + "ArtifactPlan": { "type": "object", "properties": { - "onPunctuationSeconds": { - "type": "number", - "description": "The minimum number of seconds to wait after transcription ending with punctuation before sending a request to the model. Defaults to 0.1.\n\nThis setting exists because the transcriber punctuates the transcription when it's more confident that customer has completed a thought.\n\n@default 0.1", - "minimum": 0, - "maximum": 3, - "example": 0.1 + "recordingEnabled": { + "type": "boolean", + "description": "This determines whether assistant's calls are recorded. Defaults to true.\n\nUsage:\n- If you don't want to record the calls, set this to false.\n- If you want to record the calls when `assistant.hipaaEnabled` (deprecated) or `assistant.compliancePlan.hipaaEnabled` explicity set this to true and make sure to provide S3 or GCP credentials on the Provider Credentials page in the Dashboard.\n\nYou can find the recording at `call.artifact.recordingUrl` and `call.artifact.stereoRecordingUrl` after the call is ended.\n\n@default true", + "example": true }, - "onNoPunctuationSeconds": { - "type": "number", - "description": "The minimum number of seconds to wait after transcription ending without punctuation before sending a request to the model. Defaults to 1.5.\n\nThis setting exists to catch the cases where the transcriber was not confident enough to punctuate the transcription, but the customer is done and has been silent for a long time.\n\n@default 1.5", - "minimum": 0, - "maximum": 3, - "example": 1.5 + "recordingFormat": { + "type": "string", + "description": "This determines the format of the recording. Defaults to `wav;l16`.\n\n@default 'wav;l16'", + "enum": [ + "wav;l16", + "mp3" + ] }, - "onNumberSeconds": { - "type": "number", - "description": "The minimum number of seconds to wait after transcription ending with a number before sending a request to the model. Defaults to 0.4.\n\nThis setting exists because the transcriber will sometimes punctuate the transcription ending with a number, even though the customer hasn't uttered the full number. This happens commonly for long numbers when the customer reads the number in chunks.\n\n@default 0.5", - "minimum": 0, - "maximum": 3, - "example": 0.5 - } - } - }, - "StartSpeakingPlan": { - "type": "object", - "properties": { - "waitSeconds": { - "type": "number", - "description": "This is how long assistant waits before speaking. Defaults to 0.4.\n\nThis is the minimum it will wait but if there is latency is the pipeline, this minimum will be exceeded. This is intended as a stopgap in case the pipeline is moving too fast.\n\nExample:\n- If model generates tokens and voice generates bytes within 100ms, the pipeline still waits 300ms before outputting speech.\n\nUsage:\n- If the customer is taking long pauses, set this to a higher value.\n- If the assistant is accidentally jumping in too much, set this to a higher value.\n\n@default 0.4", - "minimum": 0, - "maximum": 5, - "example": 0.4 + "recordingUseCustomStorageEnabled": { + "type": "boolean", + "description": "This determines whether to use custom storage (S3 or GCP) for call recordings when storage credentials are configured.\n\nWhen set to false, recordings will be stored on Vapi's storage instead of your custom storage, even if you have custom storage credentials configured.\n\nUsage:\n- Set to false if you have custom storage configured but want to store recordings on Vapi's storage for this assistant.\n- Set to true (or leave unset) to use your custom storage for recordings when available.\n\n@default true", + "example": true }, - "smartEndpointingEnabled": { - "example": false, - "deprecated": true, - "oneOf": [ - { - "type": "boolean" - }, - { - "type": "string", - "enum": [ - "livekit" - ] - } - ] + "videoRecordingEnabled": { + "type": "boolean", + "description": "This determines whether the video is recorded during the call. Defaults to false. Only relevant for `webCall` type.\n\nYou can find the video recording at `call.artifact.videoRecordingUrl` after the call is ended.\n\n@default false", + "example": false }, - "smartEndpointingPlan": { - "description": "This is the plan for smart endpointing. Pick between Vapi smart endpointing, LiveKit, or custom endpointing model (or nothing). We strongly recommend using livekit endpointing when working in English. LiveKit endpointing is not supported in other languages, yet.\n\nIf this is set, it will override and take precedence over `transcriptionEndpointingPlan`.\nThis plan will still be overridden by any matching `customEndpointingRules`.\n\nIf this is not set, the system will automatically use the transcriber's built-in endpointing capabilities if available.", - "oneOf": [ - { - "$ref": "#/components/schemas/VapiSmartEndpointingPlan", - "title": "Vapi" - }, - { - "$ref": "#/components/schemas/LivekitSmartEndpointingPlan", - "title": "Livekit" - }, + "fullMessageHistoryEnabled": { + "type": "boolean", + "description": "This determines whether the artifact contains the full message history, even after handoff context engineering. Defaults to false.", + "example": false + }, + "pcapEnabled": { + "type": "boolean", + "description": "This determines whether the SIP packet capture is enabled. Defaults to true. Only relevant for `phone` type calls where phone number's provider is `vapi` or `byo-phone-number`.\n\nYou can find the packet capture at `call.artifact.pcapUrl` after the call is ended.\n\n@default true", + "example": true + }, + "pcapS3PathPrefix": { + "type": "string", + "description": "This is the path where the SIP packet capture will be uploaded. This is only used if you have provided S3 or GCP credentials on the Provider Credentials page in the Dashboard.\n\nIf credential.s3PathPrefix or credential.bucketPlan.path is set, this will append to it.\n\nUsage:\n- If you want to upload the packet capture to a specific path, set this to the path. Example: `/my-assistant-captures`.\n- If you want to upload the packet capture to the root of the bucket, set this to `/`.\n\n@default '/'", + "example": "/pcaps" + }, + "pcapUseCustomStorageEnabled": { + "type": "boolean", + "description": "This determines whether to use custom storage (S3 or GCP) for SIP packet captures when storage credentials are configured.\n\nWhen set to false, packet captures will be stored on Vapi's storage instead of your custom storage, even if you have custom storage credentials configured.\n\nUsage:\n- Set to false if you have custom storage configured but want to store packet captures on Vapi's storage for this assistant.\n- Set to true (or leave unset) to use your custom storage for packet captures when available.\n\n@default true", + "example": true + }, + "loggingEnabled": { + "type": "boolean", + "description": "This determines whether the call logs are enabled. Defaults to true.\n\n@default true", + "example": true + }, + "loggingUseCustomStorageEnabled": { + "type": "boolean", + "description": "This determines whether to use custom storage (S3 or GCP) for call logs when storage credentials are configured.\n\nWhen set to false, logs will be stored on Vapi's storage instead of your custom storage, even if you have custom storage credentials configured.\n\nUsage:\n- Set to false if you have custom storage configured but want to store logs on Vapi's storage for this assistant.\n- Set to true (or leave unset) to use your custom storage for logs when available.\n\n@default true", + "example": true + }, + "transcriptPlan": { + "description": "This is the plan for `call.artifact.transcript`. To disable, set `transcriptPlan.enabled` to false.", + "allOf": [ { - "$ref": "#/components/schemas/CustomEndpointingModelSmartEndpointingPlan", - "title": "Custom Endpointing Model" + "$ref": "#/components/schemas/TranscriptPlan" } ] }, - "customEndpointingRules": { + "recordingPath": { + "type": "string", + "description": "This is the path where the recording will be uploaded. This is only used if you have provided S3 or GCP credentials on the Provider Credentials page in the Dashboard.\n\nIf credential.s3PathPrefix or credential.bucketPlan.path is set, this will append to it.\n\nUsage:\n- If you want to upload the recording to a specific path, set this to the path. Example: `/my-assistant-recordings`.\n- If you want to upload the recording to the root of the bucket, set this to `/`.\n\n@default '/'" + }, + "structuredOutputIds": { + "description": "This is an array of structured output IDs to be calculated during the call.\nThe outputs will be extracted and stored in `call.artifact.structuredOutputs` after the call is ended.", "type": "array", - "description": "These are the custom endpointing rules to set an endpointing timeout based on a regex on the customer's speech or the assistant's last message.\n\nUsage:\n- If you have yes/no questions like \"are you interested in a loan?\", you can set a shorter timeout.\n- If you have questions where the customer may pause to look up information like \"what's my account number?\", you can set a longer timeout.\n- If you want to wait longer while customer is enumerating a list of numbers, you can set a longer timeout.\n\nThese rules have the highest precedence and will override both `smartEndpointingPlan` and `transcriptionEndpointingPlan` when a rule is matched.\n\nThe rules are evaluated in order and the first one that matches will be used.\n\nOrder of precedence for endpointing:\n1. customEndpointingRules (if any match)\n2. smartEndpointingPlan (if set)\n3. transcriptionEndpointingPlan\n\n@default []", "items": { - "oneOf": [ - { - "$ref": "#/components/schemas/AssistantCustomEndpointingRule", - "title": "Assistant" - }, - { - "$ref": "#/components/schemas/CustomerCustomEndpointingRule", - "title": "Customer" - }, - { - "$ref": "#/components/schemas/BothCustomEndpointingRule", - "title": "Both" - } - ] + "type": "string" } }, - "transcriptionEndpointingPlan": { - "description": "This determines how a customer speech is considered done (endpointing) using the transcription of customer's speech.\n\nOnce an endpoint is triggered, the request is sent to `assistant.model`.\n\nNote: This plan is only used if `smartEndpointingPlan` is not set and transcriber does not have built-in endpointing capabilities. If both are provided, `smartEndpointingPlan` takes precedence.\nThis plan will also be overridden by any matching `customEndpointingRules`.", - "allOf": [ - { - "$ref": "#/components/schemas/TranscriptionEndpointingPlan" - } - ] + "scorecardIds": { + "description": "This is an array of scorecard IDs that will be evaluated based on the structured outputs extracted during the call.\nThe scorecards will be evaluated and the results will be stored in `call.artifact.scorecards` after the call has ended.", + "type": "array", + "items": { + "type": "string" + } + }, + "scorecards": { + "description": "This is the array of scorecards that will be evaluated based on the structured outputs extracted during the call.\nThe scorecards will be evaluated and the results will be stored in `call.artifact.scorecards` after the call has ended.", + "type": "array", + "items": { + "$ref": "#/components/schemas/CreateScorecardDTO" + } + }, + "loggingPath": { + "type": "string", + "description": "This is the path where the call logs will be uploaded. This is only used if you have provided S3 or GCP credentials on the Provider Credentials page in the Dashboard.\n\nIf credential.s3PathPrefix or credential.bucketPlan.path is set, this will append to it.\n\nUsage:\n- If you want to upload the call logs to a specific path, set this to the path. Example: `/my-assistant-logs`.\n- If you want to upload the call logs to the root of the bucket, set this to `/`.\n\n@default '/'" } } },